Re: [PATCH 2/8] Initial removal of plugins

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



ACK

On 1/14/2012 10:13 PM, Steven Dake wrote:
> Quorum is broken in this patch.
> service.h needs to be cleaned up significantly
> 
> Signed-off-by: Steven Dake <sdake@xxxxxxxxxx>
> ---
>  Makefile.am           |    4 +-
>  configure.ac          |    1 -
>  exec/Makefile.am      |   29 +-
>  exec/apidef.c         |    4 -
>  exec/cfg.c            | 1063 +++++++++++++++++++++++++
>  exec/cmap.c           |  603 ++++++++++++++
>  exec/coroparse.c      |   43 +-
>  exec/cpg.c            | 2064 ++++++++++++++++++++++++++++++++++++++++++++++++
>  exec/evs.c            |  489 ++++++++++++
>  exec/main.c           |   47 +-
>  exec/main.h           |    2 +
>  exec/mon.c            |  506 ++++++++++++
>  exec/pload.c          |  337 ++++++++
>  exec/quorum.c         |    1 -
>  exec/service.c        |  181 ++---
>  exec/service.h        |   16 +-
>  exec/votequorum.c     | 1580 +++++++++++++++++++++++++++++++++++++
>  exec/vsf_quorum.c     |   36 +-
>  exec/vsf_ykd.c        |   37 -
>  exec/wd.c             |  707 +++++++++++++++++
>  services/Makefile.am  |  103 ---
>  services/cfg.c        | 1104 --------------------------
>  services/cmap.c       |  645 ---------------
>  services/cpg.c        | 2106 -------------------------------------------------
>  services/evs.c        |  531 -------------
>  services/mon.c        |  548 -------------
>  services/pload.c      |  379 ---------
>  services/testquorum.c |  156 ----
>  services/votequorum.c | 1639 --------------------------------------
>  services/wd.c         |  749 ------------------
>  30 files changed, 7435 insertions(+), 8275 deletions(-)
>  create mode 100644 exec/cfg.c
>  create mode 100644 exec/cmap.c
>  create mode 100644 exec/cpg.c
>  create mode 100644 exec/evs.c
>  create mode 100644 exec/mon.c
>  create mode 100644 exec/pload.c
>  create mode 100644 exec/testquorum.c
>  create mode 100644 exec/votequorum.c
>  create mode 100644 exec/wd.c
>  delete mode 100644 services/Makefile.am
>  delete mode 100644 services/cfg.c
>  delete mode 100644 services/cmap.c
>  delete mode 100644 services/cpg.c
>  delete mode 100644 services/evs.c
>  delete mode 100644 services/mon.c
>  delete mode 100644 services/pload.c
>  delete mode 100644 services/testquorum.c
>  delete mode 100644 services/votequorum.c
>  delete mode 100644 services/wd.c
> 
> diff --git a/Makefile.am b/Makefile.am
> index eb8b875..fae829f 100644
> --- a/Makefile.am
> +++ b/Makefile.am
> @@ -71,7 +71,7 @@ corosysxmlxsltdir	= ${datadir}/corosync
>  corosysxmlxslt_DATA	= conf/xml2conf.xsl
>  endif
>  
> -SUBDIRS			= include lcr lib exec services tools test cts pkgconfig \
> +SUBDIRS			= include lcr lib exec tools test cts pkgconfig \
>  			  man init conf
>  
>  coverity:
> @@ -104,7 +104,7 @@ test_lense.sh:
>  endif
>  
>  lint:
> -	for dir in lcr lib exec services tools test; do make -C $$dir lint; done
> +	for dir in lcr lib exec tools test; do make -C $$dir lint; done
>  
>  .PHONY: doxygen
>  doxygen:
> diff --git a/configure.ac b/configure.ac
> index e7ca328..6226f2b 100644
> --- a/configure.ac
> +++ b/configure.ac
> @@ -134,7 +134,6 @@ AC_CONFIG_FILES([Makefile
>  		 lib/Makefile
>  		 man/Makefile
>  		 pkgconfig/Makefile
> -		 services/Makefile
>  		 test/Makefile
>  		 cts/Makefile
>  		 cts/agents/Makefile
> diff --git a/exec/Makefile.am b/exec/Makefile.am
> index a0ca646..0bb3a64 100644
> --- a/exec/Makefile.am
> +++ b/exec/Makefile.am
> @@ -42,25 +42,22 @@ if BUILD_RDMA
>  TOTEM_SRC		+= totemiba.c
>  endif
>  
> -LCRSO_SRC		= vsf_ykd.c coroparse.c vsf_quorum.c
> -LCRSO_OBJS		= $(LCRSO_SRC:%.c=%.o)
> -LCRSO			= $(LCRSO_SRC:%.c=%.lcrso)
> -
>  lib_LIBRARIES		= libtotem_pg.a
>  sbin_PROGRAMS		= corosync
>  
>  libtotem_pg_a_SOURCES	= $(TOTEM_SRC)
>  
> -corosync_SOURCES 	= main.c ipc_glue.c util.c sync.c apidef.c service.c \
> -			  timer.c totemconfig.c mainconfig.c quorum.c schedwrk.c \
> -			  ../lcr/lcr_ifact.c evil.c syncv2.c logsys.c icmap.c
> +corosync_SOURCES	= evil.c vsf_ykd.c coroparse.c vsf_quorum.c syncv2.c \
> +			  logsys.c cfg.c cmap.c cpg.c evs.c mon.c pload.c \
> +			  votequorum.c wd.c util.c schedwrk.c main.c \
> +			  apidef.c quorum.c sync.c icmap.c timer.c \
> +			  ipc_glue.c service.c mainconfig.c totemconfig.c
>  corosync_LDADD	  	= -ltotem_pg $(LIBQB_LIBS) $(statgrab_LIBS)
>  corosync_DEPENDENCIES	= libtotem_pg.so.$(SONAME)
>  corosync_LDFLAGS	= $(OS_DYFLAGS) -L./
>  
>  TOTEM_OBJS		= $(TOTEM_SRC:%.c=%.o)
>  LOGSYS_OBJS		= $(LOGSYS_SRC:%.c=%.o)
> -ICMAP_OBJS		= $(ICMAP_SRC:%.c=%.o)
>  
>  SHARED_LIBS		= $(lib_LIBRARIES:%.a=%.so.$(SONAME))
>  SHARED_LIBS_SO		= $(SHARED_LIBS:%.so.$(SONAME)=%.so)
> @@ -72,12 +69,7 @@ noinst_HEADERS		= apidef.h crypto.h mainconfig.h main.h \
>  			  totemudpu.h totemsrp.h util.h vsf.h schedwrk.h \
>  			  evil.h syncv2.h fsm.h
>  
> -EXTRA_DIST		= $(LCRSO_SRC)
> -
>  if BUILD_DARWIN
> -%.lcrso: %.o
> -	$(CC) $(LDFLAGS) $(CFLAGS) -L$(top_builddir)/exec -bundle -bind_at_load -bundle_loader ./corosync $^ -o $@
> -
>  libtotem_pg.so.$(SONAME): $(TOTEM_OBJS)
>  	$(CC) $(LDFLAGS) $(DARWIN_OPTS) $(TOTEM_OBJS) -o $@ -lpthread
>  	ln -sf libtotem_pg.so.$(SONAME) libtotem_pg.so
> @@ -86,19 +78,12 @@ libtotem_pg.so.$(SONAME): $(TOTEM_OBJS)
>  else
>  
>  if BUILD_SOLARIS
> -%.lcrso: %.o
> -	$(LD) $(LDFLAGS) -G $^ -o $@
> -
>  libtotem_pg.so.$(SONAME): $(TOTEM_OBJS)
>  	$(LD) $(LDFLAGS) -G $(TOTEM_OBJS) -o $@ -lpthread
>  	ln -sf libtotem_pg.so.$(SONAME) libtotem_pg.so
>  	ln -sf libtotem_pg.so.$(SONAME) libtotem_pg.so.$(SOMAJOR)
>  
>  else
> -
> -%.lcrso: %.o
> -	$(CC) $(LDFLAGS) $(CFLAGS) $(COVERAGE_LCRSO_EXTRA_LDFLAGS) -shared -Wl,-soname=$@ $^ -o $@
> -
>  libtotem_pg.so.$(SONAME): $(TOTEM_OBJS)
>  	$(CC) -shared -o $@ \
>  		-Wl,-soname=libtotem_pg.so.$(SOMAJOR) \
> @@ -120,8 +105,6 @@ install-exec-local:
>  	$(INSTALL) -d $(DESTDIR)/$(libdir)
>  	$(INSTALL) -m 755 $(SHARED_LIBS) $(DESTDIR)/$(libdir)
>  	$(CP) -a $(SHARED_LIBS_SO) $(SHARED_LIBS_SO_TWO) $(DESTDIR)/$(libdir)
> -	$(INSTALL) -d $(DESTDIR)/$(LCRSODIR)
> -	$(INSTALL) -m 755 $(LCRSO) $(DESTDIR)/$(LCRSODIR)
>  
>  uninstall-local:
>  	cd $(DESTDIR)/$(libdir) && \
> @@ -130,4 +113,4 @@ uninstall-local:
>  		rm -f $(LCRSO)
>  
>  clean-local:
> -	rm -f corosync *.o *.lcrso gmon.out *.da *.bb *.bbg *.so*
> +	rm -f corosync *.o gmon.out *.da *.bb *.bbg *.so*
> diff --git a/exec/apidef.c b/exec/apidef.c
> index 6e14e55..24fa5b9 100644
> --- a/exec/apidef.c
> +++ b/exec/apidef.c
> @@ -137,10 +137,6 @@ static struct corosync_api_v1 apidef_corosync_api_v1 = {
>  	.quorum_register_callback = corosync_quorum_register_callback,
>  	.quorum_unregister_callback = corosync_quorum_unregister_callback,
>  	.quorum_initialize = corosync_quorum_initialize,
> -	.service_link_and_init = corosync_service_link_and_init,
> -	.service_unlink_and_exit = corosync_service_unlink_and_exit,
> -	.plugin_interface_reference = lcr_ifact_reference,
> -	.plugin_interface_release = lcr_ifact_release,
>  	.error_memory_failure = _corosync_out_of_memory_error,
>  	.fatal_error = _corosync_public_exit_error,
>  	.shutdown_request = corosync_shutdown_request,
> diff --git a/exec/cfg.c b/exec/cfg.c
> new file mode 100644
> index 0000000..14bb431
> --- /dev/null
> +++ b/exec/cfg.c
> @@ -0,0 +1,1063 @@
> +/*
> + * Copyright (c) 2005-2006 MontaVista Software, Inc.
> + * Copyright (c) 2006-2009 Red Hat, Inc.
> + *
> + * All rights reserved.
> + *
> + * Author: Steven Dake (sdake@xxxxxxxxxx)
> + *
> + * This software licensed under BSD license, the text of which follows:
> + *
> + * Redistribution and use in source and binary forms, with or without
> + * modification, are permitted provided that the following conditions are met:
> + *
> + * - Redistributions of source code must retain the above copyright notice,
> + *   this list of conditions and the following disclaimer.
> + * - Redistributions in binary form must reproduce the above copyright notice,
> + *   this list of conditions and the following disclaimer in the documentation
> + *   and/or other materials provided with the distribution.
> + * - Neither the name of the MontaVista Software, Inc. nor the names of its
> + *   contributors may be used to endorse or promote products derived from this
> + *   software without specific prior written permission.
> + *
> + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
> + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
> + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
> + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
> + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
> + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
> + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
> + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
> + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
> + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
> + * THE POSSIBILITY OF SUCH DAMAGE.
> + */
> +
> +#include <config.h>
> +
> +#include <sys/types.h>
> +#include <sys/uio.h>
> +#include <sys/socket.h>
> +#include <sys/un.h>
> +#include <netinet/in.h>
> +#include <arpa/inet.h>
> +#include <unistd.h>
> +#include <fcntl.h>
> +#include <stdlib.h>
> +#include <stdio.h>
> +#include <limits.h>
> +#include <errno.h>
> +#include <string.h>
> +#include <assert.h>
> +
> +#include <corosync/corotypes.h>
> +#include <qb/qbipc_common.h>
> +#include <corosync/cfg.h>
> +#include <corosync/list.h>
> +#include <corosync/mar_gen.h>
> +#include <corosync/totem/totemip.h>
> +#include <corosync/totem/totem.h>
> +#include <corosync/ipc_cfg.h>
> +#include <corosync/logsys.h>
> +#include <corosync/coroapi.h>
> +#include <corosync/icmap.h>
> +#include <corosync/corodefs.h>
> +
> +LOGSYS_DECLARE_SUBSYS ("CFG");
> +
> +enum cfg_message_req_types {
> +        MESSAGE_REQ_EXEC_CFG_RINGREENABLE = 0,
> +	MESSAGE_REQ_EXEC_CFG_KILLNODE = 1,
> +	MESSAGE_REQ_EXEC_CFG_SHUTDOWN = 2,
> +	MESSAGE_REQ_EXEC_CFG_CRYPTO_SET = 3
> +};
> +
> +#define DEFAULT_SHUTDOWN_TIMEOUT 5
> +
> +static struct list_head trackers_list;
> +
> +/*
> + * Variables controlling a requested shutdown
> + */
> +static corosync_timer_handle_t shutdown_timer;
> +static struct cfg_info *shutdown_con;
> +static uint32_t shutdown_flags;
> +static int shutdown_yes;
> +static int shutdown_no;
> +static int shutdown_expected;
> +
> +struct cfg_info
> +{
> +	struct list_head list;
> +	void *conn;
> +	void *tracker_conn;
> +	enum {SHUTDOWN_REPLY_UNKNOWN, SHUTDOWN_REPLY_YES, SHUTDOWN_REPLY_NO} shutdown_reply;
> +};
> +
> +static void cfg_confchg_fn (
> +	enum totem_configuration_type configuration_type,
> +	const unsigned int *member_list, size_t member_list_entries,
> +	const unsigned int *left_list, size_t left_list_entries,
> +	const unsigned int *joined_list, size_t joined_list_entries,
> +	const struct memb_ring_id *ring_id);
> +
> +static int cfg_exec_init_fn (struct corosync_api_v1 *corosync_api_v1);
> +
> +static struct corosync_api_v1 *api;
> +
> +static int cfg_lib_init_fn (void *conn);
> +
> +static int cfg_lib_exit_fn (void *conn);
> +
> +static void message_handler_req_exec_cfg_ringreenable (
> +        const void *message,
> +        unsigned int nodeid);
> +
> +static void message_handler_req_exec_cfg_killnode (
> +        const void *message,
> +        unsigned int nodeid);
> +
> +static void message_handler_req_exec_cfg_shutdown (
> +        const void *message,
> +        unsigned int nodeid);
> +
> +static void message_handler_req_exec_cfg_crypto_set (
> +        const void *message,
> +        unsigned int nodeid);
> +
> +static void exec_cfg_killnode_endian_convert (void *msg);
> +
> +static void message_handler_req_lib_cfg_ringstatusget (
> +	void *conn,
> +	const void *msg);
> +
> +static void message_handler_req_lib_cfg_ringreenable (
> +	void *conn,
> +	const void *msg);
> +
> +static void message_handler_req_lib_cfg_statetrack (
> +	void *conn,
> +	const void *msg);
> +
> +static void message_handler_req_lib_cfg_statetrackstop (
> +	void *conn,
> +	const void *msg);
> +
> +static void message_handler_req_lib_cfg_administrativestateset (
> +	void *conn,
> +	const void *msg);
> +
> +static void message_handler_req_lib_cfg_administrativestateget (
> +	void *conn,
> +	const void *msg);
> +
> +static void message_handler_req_lib_cfg_serviceload (
> +	void *conn,
> +	const void *msg);
> +
> +static void message_handler_req_lib_cfg_serviceunload (
> +	void *conn,
> +	const void *msg);
> +
> +static void message_handler_req_lib_cfg_killnode (
> +	void *conn,
> +	const void *msg);
> +
> +static void message_handler_req_lib_cfg_tryshutdown (
> +	void *conn,
> +	const void *msg);
> +
> +static void message_handler_req_lib_cfg_replytoshutdown (
> +	void *conn,
> +	const void *msg);
> +
> +static void message_handler_req_lib_cfg_get_node_addrs (
> +	void *conn,
> +	const void *msg);
> +
> +static void message_handler_req_lib_cfg_local_get (
> +	void *conn,
> +	const void *msg);
> +
> +static void message_handler_req_lib_cfg_crypto_set (
> +	void *conn,
> +	const void *msg);
> +
> +/*
> + * Service Handler Definition
> + */
> +static struct corosync_lib_handler cfg_lib_engine[] =
> +{
> +	{ /* 0 */
> +		.lib_handler_fn		= message_handler_req_lib_cfg_ringstatusget,
> +		.flow_control		= CS_LIB_FLOW_CONTROL_REQUIRED
> +	},
> +	{ /* 1 */
> +		.lib_handler_fn		= message_handler_req_lib_cfg_ringreenable,
> +		.flow_control		= CS_LIB_FLOW_CONTROL_REQUIRED
> +	},
> +	{ /* 2 */
> +		.lib_handler_fn		= message_handler_req_lib_cfg_statetrack,
> +		.flow_control		= CS_LIB_FLOW_CONTROL_REQUIRED
> +	},
> +	{ /* 3 */
> +		.lib_handler_fn		= message_handler_req_lib_cfg_statetrackstop,
> +		.flow_control		= CS_LIB_FLOW_CONTROL_REQUIRED
> +	},
> +	{ /* 4 */
> +		.lib_handler_fn		= message_handler_req_lib_cfg_administrativestateset,
> +		.flow_control		= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> +	},
> +	{ /* 5 */
> +		.lib_handler_fn		= message_handler_req_lib_cfg_administrativestateget,
> +		.flow_control		= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> +	},
> +	{ /* 6 */
> +		.lib_handler_fn		= message_handler_req_lib_cfg_serviceload,
> +		.flow_control		= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> +	},
> +	{ /* 7 */
> +		.lib_handler_fn		= message_handler_req_lib_cfg_serviceunload,
> +		.flow_control		= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> +	},
> +	{ /* 8 */
> +		.lib_handler_fn		= message_handler_req_lib_cfg_killnode,
> +		.flow_control		= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> +	},
> +	{ /* 9 */
> +		.lib_handler_fn		= message_handler_req_lib_cfg_tryshutdown,
> +		.flow_control		= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> +	},
> +	{ /* 10 */
> +		.lib_handler_fn		= message_handler_req_lib_cfg_replytoshutdown,
> +		.flow_control		= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> +	},
> +	{ /* 11 */
> +		.lib_handler_fn		= message_handler_req_lib_cfg_get_node_addrs,
> +		.flow_control		= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> +	},
> +	{ /* 12 */
> +		.lib_handler_fn		= message_handler_req_lib_cfg_local_get,
> +		.flow_control		= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> +	},
> +	{ /* 13 */
> +		.lib_handler_fn		= message_handler_req_lib_cfg_crypto_set,
> +		.flow_control		= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> +	}
> +};
> +
> +static struct corosync_exec_handler cfg_exec_engine[] =
> +{
> +	{ /* 0 */
> +		.exec_handler_fn = message_handler_req_exec_cfg_ringreenable,
> +	},
> +	{ /* 1 */
> +		.exec_handler_fn = message_handler_req_exec_cfg_killnode,
> +		.exec_endian_convert_fn	= exec_cfg_killnode_endian_convert
> +	},
> +	{ /* 2 */
> +		.exec_handler_fn = message_handler_req_exec_cfg_shutdown,
> +	},
> +	{ /* 3 */
> +		.exec_handler_fn = message_handler_req_exec_cfg_crypto_set,
> +	}
> +};
> +
> +/*
> + * Exports the interface for the service
> + */
> +struct corosync_service_engine cfg_service_engine = {
> +	.name					= "corosync configuration service",
> +	.id					= CFG_SERVICE,
> +	.priority				= 1,
> +	.private_data_size			= sizeof(struct cfg_info),
> +	.flow_control				= CS_LIB_FLOW_CONTROL_NOT_REQUIRED,
> +	.allow_inquorate			= CS_LIB_ALLOW_INQUORATE,
> +	.lib_init_fn				= cfg_lib_init_fn,
> +	.lib_exit_fn				= cfg_lib_exit_fn,
> +	.lib_engine				= cfg_lib_engine,
> +	.lib_engine_count			= sizeof (cfg_lib_engine) / sizeof (struct corosync_lib_handler),
> +	.exec_init_fn				= cfg_exec_init_fn,
> +	.exec_engine				= cfg_exec_engine,
> +	.exec_engine_count			= sizeof (cfg_exec_engine) / sizeof (struct corosync_exec_handler),
> +	.confchg_fn				= cfg_confchg_fn,
> +	.sync_mode				= CS_SYNC_V1
> +};
> +
> +struct corosync_service_engine *cfg_get_service_engine_ver0 (void)
> +{
> +	return (&cfg_service_engine);
> +}
> +
> +struct req_exec_cfg_ringreenable {
> +	struct qb_ipc_request_header header __attribute__((aligned(8)));
> +        mar_message_source_t source __attribute__((aligned(8)));
> +};
> +
> +struct req_exec_cfg_killnode {
> +	struct qb_ipc_request_header header __attribute__((aligned(8)));
> +        mar_uint32_t nodeid __attribute__((aligned(8)));
> +	mar_name_t reason __attribute__((aligned(8)));
> +};
> +
> +struct req_exec_cfg_crypto_set {
> +	struct qb_ipc_request_header header __attribute__((aligned(8)));
> +	mar_uint32_t type __attribute__((aligned(8)));
> +};
> +
> +struct req_exec_cfg_shutdown {
> +	struct qb_ipc_request_header header __attribute__((aligned(8)));
> +};
> +
> +/* IMPL */
> +
> +static int cfg_exec_init_fn (
> +	struct corosync_api_v1 *corosync_api_v1)
> +{
> +#ifdef COROSYNC_SOLARIS
> +	logsys_subsys_init();
> +#endif
> +
> +	api = corosync_api_v1;
> +
> +	list_init(&trackers_list);
> +	return (0);
> +}
> +
> +static void cfg_confchg_fn (
> +	enum totem_configuration_type configuration_type,
> +	const unsigned int *member_list, size_t member_list_entries,
> +	const unsigned int *left_list, size_t left_list_entries,
> +	const unsigned int *joined_list, size_t joined_list_entries,
> +	const struct memb_ring_id *ring_id)
> +{
> +}
> +
> +/*
> + * Tell other nodes we are shutting down
> + */
> +static int send_shutdown(void)
> +{
> +	struct req_exec_cfg_shutdown req_exec_cfg_shutdown;
> +	struct iovec iovec;
> +
> +	ENTER();
> +	req_exec_cfg_shutdown.header.size =
> +		sizeof (struct req_exec_cfg_shutdown);
> +	req_exec_cfg_shutdown.header.id = SERVICE_ID_MAKE (CFG_SERVICE,
> +		MESSAGE_REQ_EXEC_CFG_SHUTDOWN);
> +
> +	iovec.iov_base = (char *)&req_exec_cfg_shutdown;
> +	iovec.iov_len = sizeof (struct req_exec_cfg_shutdown);
> +
> +	assert (api->totem_mcast (&iovec, 1, TOTEM_SAFE) == 0);
> +
> +	LEAVE();
> +	return 0;
> +}
> +
> +static void send_test_shutdown(void *only_conn, void *exclude_conn, int status)
> +{
> +	struct res_lib_cfg_testshutdown res_lib_cfg_testshutdown;
> +	struct list_head *iter;
> +
> +	ENTER();
> +	res_lib_cfg_testshutdown.header.size = sizeof(struct res_lib_cfg_testshutdown);
> +	res_lib_cfg_testshutdown.header.id = MESSAGE_RES_CFG_TESTSHUTDOWN;
> +	res_lib_cfg_testshutdown.header.error = status;
> +	res_lib_cfg_testshutdown.flags = shutdown_flags;
> +
> +	if (only_conn) {
> +		TRACE1("sending testshutdown to only %p", only_conn);
> +		api->ipc_dispatch_send(only_conn, &res_lib_cfg_testshutdown,
> +				       sizeof(res_lib_cfg_testshutdown));
> +	} else {
> +		for (iter = trackers_list.next; iter != &trackers_list; iter = iter->next) {
> +			struct cfg_info *ci = list_entry(iter, struct cfg_info, list);
> +
> +			if (ci->conn != exclude_conn) {
> +				TRACE1("sending testshutdown to %p", ci->tracker_conn);
> +				api->ipc_dispatch_send(ci->tracker_conn, &res_lib_cfg_testshutdown,
> +						       sizeof(res_lib_cfg_testshutdown));
> +			}
> +		}
> +	}
> +	LEAVE();
> +}
> +
> +static void check_shutdown_status(void)
> +{
> +	ENTER();
> +
> +	/*
> +	 * Shutdown client might have gone away
> +	 */
> +	if (!shutdown_con) {
> +		LEAVE();
> +		return;
> +	}
> +
> +	/*
> +	 * All replies safely gathered in ?
> +	 */
> +	if (shutdown_yes + shutdown_no >= shutdown_expected) {
> +		struct res_lib_cfg_tryshutdown res_lib_cfg_tryshutdown;
> +
> +		api->timer_delete(shutdown_timer);
> +
> +		if (shutdown_yes >= shutdown_expected ||
> +		    shutdown_flags == CFG_SHUTDOWN_FLAG_REGARDLESS) {
> +			TRACE1("shutdown confirmed");
> +
> +			res_lib_cfg_tryshutdown.header.size = sizeof(struct res_lib_cfg_tryshutdown);
> +			res_lib_cfg_tryshutdown.header.id = MESSAGE_RES_CFG_TRYSHUTDOWN;
> +			res_lib_cfg_tryshutdown.header.error = CS_OK;
> +
> +			/*
> +			 * Tell originator that shutdown was confirmed
> +			 */
> +			api->ipc_response_send(shutdown_con->conn, &res_lib_cfg_tryshutdown,
> +						    sizeof(res_lib_cfg_tryshutdown));
> +			shutdown_con = NULL;
> +
> +			/*
> +			 * Tell other nodes we are going down
> +			 */
> +			send_shutdown();
> +
> +		}
> +		else {
> +
> +			TRACE1("shutdown cancelled");
> +			res_lib_cfg_tryshutdown.header.size = sizeof(struct res_lib_cfg_tryshutdown);
> +			res_lib_cfg_tryshutdown.header.id = MESSAGE_RES_CFG_TRYSHUTDOWN;
> +			res_lib_cfg_tryshutdown.header.error = CS_ERR_BUSY;
> +
> +			/*
> +			 * Tell originator that shutdown was cancelled
> +			 */
> +			api->ipc_response_send(shutdown_con->conn, &res_lib_cfg_tryshutdown,
> +						    sizeof(res_lib_cfg_tryshutdown));
> +			shutdown_con = NULL;
> +		}
> +
> +		log_printf(LOGSYS_LEVEL_DEBUG, "shutdown decision is: (yes count: %d, no count: %d) flags=%x\n", shutdown_yes, shutdown_no, shutdown_flags);
> +	}
> +	LEAVE();
> +}
> +
> +
> +/*
> + * Not all nodes responded to the shutdown (in time)
> + */
> +static void shutdown_timer_fn(void *arg)
> +{
> +	ENTER();
> +
> +	/*
> +	 * Mark undecideds as "NO"
> +	 */
> +	shutdown_no = shutdown_expected;
> +	check_shutdown_status();
> +
> +	send_test_shutdown(NULL, NULL, CS_ERR_TIMEOUT);
> +	LEAVE();
> +}
> +
> +static void remove_ci_from_shutdown(struct cfg_info *ci)
> +{
> +	ENTER();
> +
> +	/*
> +	 * If the controlling shutdown process has quit, then cancel the
> +	 * shutdown session
> +	 */
> +	if (ci == shutdown_con) {
> +		shutdown_con = NULL;
> +		api->timer_delete(shutdown_timer);
> +	}
> +
> +	if (!list_empty(&ci->list)) {
> +		list_del(&ci->list);
> +		list_init(&ci->list);
> +
> +		/*
> +		 * Remove our option
> +		 */
> +		if (shutdown_con) {
> +			if (ci->shutdown_reply == SHUTDOWN_REPLY_YES)
> +				shutdown_yes--;
> +			if (ci->shutdown_reply == SHUTDOWN_REPLY_NO)
> +				shutdown_no--;
> +		}
> +
> +		/*
> +		 * If we are leaving, then that's an implicit YES to shutdown
> +		 */
> +		ci->shutdown_reply = SHUTDOWN_REPLY_YES;
> +		shutdown_yes++;
> +
> +		check_shutdown_status();
> +	}
> +	LEAVE();
> +}
> +
> +
> +int cfg_lib_exit_fn (void *conn)
> +{
> +	struct cfg_info *ci = (struct cfg_info *)api->ipc_private_data_get (conn);
> +
> +	ENTER();
> +	remove_ci_from_shutdown(ci);
> +	LEAVE();
> +	return (0);
> +}
> +
> +static int cfg_lib_init_fn (void *conn)
> +{
> +	struct cfg_info *ci = (struct cfg_info *)api->ipc_private_data_get (conn);
> +
> +	ENTER();
> +	list_init(&ci->list);
> +	LEAVE();
> +
> +        return (0);
> +}
> +
> +/*
> + * Executive message handlers
> + */
> +static void message_handler_req_exec_cfg_ringreenable (
> +        const void *message,
> +        unsigned int nodeid)
> +{
> +	const struct req_exec_cfg_ringreenable *req_exec_cfg_ringreenable
> +	  = message;
> +	struct res_lib_cfg_ringreenable res_lib_cfg_ringreenable;
> +
> +	ENTER();
> +	api->totem_ring_reenable ();
> +        if (api->ipc_source_is_local(&req_exec_cfg_ringreenable->source)) {
> +		res_lib_cfg_ringreenable.header.id = MESSAGE_RES_CFG_RINGREENABLE;
> +		res_lib_cfg_ringreenable.header.size = sizeof (struct res_lib_cfg_ringreenable);
> +		res_lib_cfg_ringreenable.header.error = CS_OK;
> +		api->ipc_response_send (
> +			req_exec_cfg_ringreenable->source.conn,
> +			&res_lib_cfg_ringreenable,
> +			sizeof (struct res_lib_cfg_ringreenable));
> +
> +		api->ipc_refcnt_dec(req_exec_cfg_ringreenable->source.conn);
> +	}
> +	LEAVE();
> +}
> +
> +static void exec_cfg_killnode_endian_convert (void *msg)
> +{
> +	struct req_exec_cfg_killnode *req_exec_cfg_killnode =
> +		(struct req_exec_cfg_killnode *)msg;
> +	ENTER();
> +
> +	swab_mar_name_t(&req_exec_cfg_killnode->reason);
> +	LEAVE();
> +}
> +
> +
> +static void message_handler_req_exec_cfg_killnode (
> +        const void *message,
> +        unsigned int nodeid)
> +{
> +	const struct req_exec_cfg_killnode *req_exec_cfg_killnode = message;
> +	cs_name_t reason;
> +
> +	ENTER();
> +	log_printf(LOGSYS_LEVEL_DEBUG, "request to kill node %d(us=%d): %s\n",  req_exec_cfg_killnode->nodeid, api->totem_nodeid_get(), reason.value);
> +        if (req_exec_cfg_killnode->nodeid == api->totem_nodeid_get()) {
> +		marshall_from_mar_name_t(&reason, &req_exec_cfg_killnode->reason);
> +		log_printf(LOGSYS_LEVEL_NOTICE, "Killed by node %d: %s\n",
> +			   nodeid, reason.value);
> +		corosync_fatal_error(COROSYNC_FATAL_ERROR_EXIT);
> +	}
> +	LEAVE();
> +}
> +
> +/*
> + * Self shutdown
> + */
> +static void message_handler_req_exec_cfg_shutdown (
> +        const void *message,
> +        unsigned int nodeid)
> +{
> +	ENTER();
> +
> +	log_printf(LOGSYS_LEVEL_NOTICE, "Node %d was shut down by sysadmin\n", nodeid);
> +	if (nodeid == api->totem_nodeid_get()) {
> +		api->shutdown_request();
> +	}
> +	LEAVE();
> +}
> +
> +static void message_handler_req_exec_cfg_crypto_set (
> +        const void *message,
> +        unsigned int nodeid)
> +{
> +	const struct req_exec_cfg_crypto_set *req_exec_cfg_crypto_set = message;
> +	ENTER();
> +
> +	log_printf(LOGSYS_LEVEL_NOTICE, "Node %d requested set crypto to %d\n", nodeid, req_exec_cfg_crypto_set->type);
> +
> +	api->totem_crypto_set(req_exec_cfg_crypto_set->type);
> +	LEAVE();
> +}
> +
> +
> +/*
> + * Library Interface Implementation
> + */
> +static void message_handler_req_lib_cfg_ringstatusget (
> +	void *conn,
> +	const void *msg)
> +{
> +	struct res_lib_cfg_ringstatusget res_lib_cfg_ringstatusget;
> +	struct totem_ip_address interfaces[INTERFACE_MAX];
> +	unsigned int iface_count;
> +	char **status;
> +	const char *totem_ip_string;
> +	unsigned int i;
> +
> +	ENTER();
> +
> +	res_lib_cfg_ringstatusget.header.id = MESSAGE_RES_CFG_RINGSTATUSGET;
> +	res_lib_cfg_ringstatusget.header.size = sizeof (struct res_lib_cfg_ringstatusget);
> +	res_lib_cfg_ringstatusget.header.error = CS_OK;
> +
> +	api->totem_ifaces_get (
> +		api->totem_nodeid_get(),
> +		interfaces,
> +		&status,
> +		&iface_count);
> +
> +	res_lib_cfg_ringstatusget.interface_count = iface_count;
> +
> +	for (i = 0; i < iface_count; i++) {
> +		totem_ip_string
> +		  = (const char *)api->totem_ip_print (&interfaces[i]);
> +		strcpy ((char *)&res_lib_cfg_ringstatusget.interface_status[i],
> +			status[i]);
> +		strcpy ((char *)&res_lib_cfg_ringstatusget.interface_name[i],
> +			totem_ip_string);
> +	}
> +	api->ipc_response_send (
> +		conn,
> +		&res_lib_cfg_ringstatusget,
> +		sizeof (struct res_lib_cfg_ringstatusget));
> +
> +	LEAVE();
> +}
> +
> +static void message_handler_req_lib_cfg_ringreenable (
> +	void *conn,
> +	const void *msg)
> +{
> +	struct req_exec_cfg_ringreenable req_exec_cfg_ringreenable;
> +	struct iovec iovec;
> +
> +	ENTER();
> +	req_exec_cfg_ringreenable.header.size =
> +		sizeof (struct req_exec_cfg_ringreenable);
> +	req_exec_cfg_ringreenable.header.id = SERVICE_ID_MAKE (CFG_SERVICE,
> +		MESSAGE_REQ_EXEC_CFG_RINGREENABLE);
> +	api->ipc_source_set (&req_exec_cfg_ringreenable.source, conn);
> +	api->ipc_refcnt_inc(conn);
> +
> +	iovec.iov_base = (char *)&req_exec_cfg_ringreenable;
> +	iovec.iov_len = sizeof (struct req_exec_cfg_ringreenable);
> +
> +	assert (api->totem_mcast (&iovec, 1, TOTEM_SAFE) == 0);
> +
> +	LEAVE();
> +}
> +
> +static void message_handler_req_lib_cfg_statetrack (
> +	void *conn,
> +	const void *msg)
> +{
> +	struct cfg_info *ci = (struct cfg_info *)api->ipc_private_data_get (conn);
> +	struct res_lib_cfg_statetrack res_lib_cfg_statetrack;
> +
> +	ENTER();
> +
> +	/*
> +	 * We only do shutdown tracking at the moment
> +	 */
> +	if (list_empty(&ci->list)) {
> +		list_add(&ci->list, &trackers_list);
> +		ci->tracker_conn = conn;
> +
> +		if (shutdown_con) {
> +			/*
> +			 * Shutdown already in progress, ask the newcomer's opinion
> +			 */
> +			ci->shutdown_reply = SHUTDOWN_REPLY_UNKNOWN;
> +			shutdown_expected++;
> +			send_test_shutdown(conn, NULL, CS_OK);
> +		}
> +	}
> +
> +	res_lib_cfg_statetrack.header.size = sizeof(struct res_lib_cfg_statetrack);
> +	res_lib_cfg_statetrack.header.id = MESSAGE_RES_CFG_STATETRACKSTART;
> +	res_lib_cfg_statetrack.header.error = CS_OK;
> +
> +	api->ipc_response_send(conn, &res_lib_cfg_statetrack,
> +				    sizeof(res_lib_cfg_statetrack));
> +
> +	LEAVE();
> +}
> +
> +static void message_handler_req_lib_cfg_statetrackstop (
> +	void *conn,
> +	const void *msg)
> +{
> +	struct cfg_info *ci = (struct cfg_info *)api->ipc_private_data_get (conn);
> +//	struct req_lib_cfg_statetrackstop *req_lib_cfg_statetrackstop = (struct req_lib_cfg_statetrackstop *)message;
> +
> +	ENTER();
> +	remove_ci_from_shutdown(ci);
> +	LEAVE();
> +}
> +
> +static void message_handler_req_lib_cfg_administrativestateset (
> +	void *conn,
> +	const void *msg)
> +{
> +//	struct req_lib_cfg_administrativestateset *req_lib_cfg_administrativestateset = (struct req_lib_cfg_administrativestateset *)message;
> +
> +	ENTER();
> +	LEAVE();
> +}
> +static void message_handler_req_lib_cfg_administrativestateget (
> +	void *conn,
> +	const void *msg)
> +{
> +//	struct req_lib_cfg_administrativestateget *req_lib_cfg_administrativestateget = (struct req_lib_cfg_administrativestateget *)message;
> +	ENTER();
> +	LEAVE();
> +}
> +
> +static void message_handler_req_lib_cfg_serviceload (
> +	void *conn,
> +	const void *msg)
> +{
> +	const struct req_lib_cfg_serviceload *req_lib_cfg_serviceload = msg;
> +	struct res_lib_cfg_serviceload res_lib_cfg_serviceload;
> +
> +	ENTER();
> +	api->service_link_and_init (
> +		api,
> +		(const char *)req_lib_cfg_serviceload->service_name,
> +		req_lib_cfg_serviceload->service_ver);
> +
> +	res_lib_cfg_serviceload.header.id = MESSAGE_RES_CFG_SERVICEUNLOAD;
> +	res_lib_cfg_serviceload.header.size = sizeof (struct res_lib_cfg_serviceload);
> +	res_lib_cfg_serviceload.header.error = CS_OK;
> +	api->ipc_response_send (
> +		conn,
> +		&res_lib_cfg_serviceload,
> +		sizeof (struct res_lib_cfg_serviceload));
> +	LEAVE();
> +}
> +
> +static void message_handler_req_lib_cfg_serviceunload (
> +	void *conn,
> +	const void *msg)
> +{
> +	const struct req_lib_cfg_serviceunload *req_lib_cfg_serviceunload = msg;
> +	struct res_lib_cfg_serviceunload res_lib_cfg_serviceunload;
> +
> +	ENTER();
> +	api->service_unlink_and_exit (
> +		api,
> +		(const char *)req_lib_cfg_serviceunload->service_name,
> +		req_lib_cfg_serviceunload->service_ver);
> +	res_lib_cfg_serviceunload.header.id = MESSAGE_RES_CFG_SERVICEUNLOAD;
> +	res_lib_cfg_serviceunload.header.size = sizeof (struct res_lib_cfg_serviceunload);
> +	res_lib_cfg_serviceunload.header.error = CS_OK;
> +	api->ipc_response_send (
> +		conn,
> +		&res_lib_cfg_serviceunload,
> +		sizeof (struct res_lib_cfg_serviceunload));
> +	LEAVE();
> +}
> +
> +
> +static void message_handler_req_lib_cfg_killnode (
> +	void *conn,
> +	const void *msg)
> +{
> +	const struct req_lib_cfg_killnode *req_lib_cfg_killnode = msg;
> +	struct res_lib_cfg_killnode res_lib_cfg_killnode;
> +	struct req_exec_cfg_killnode req_exec_cfg_killnode;
> +	struct iovec iovec;
> +
> +	ENTER();
> +	req_exec_cfg_killnode.header.size =
> +		sizeof (struct req_exec_cfg_killnode);
> +	req_exec_cfg_killnode.header.id = SERVICE_ID_MAKE (CFG_SERVICE,
> +		MESSAGE_REQ_EXEC_CFG_KILLNODE);
> +	req_exec_cfg_killnode.nodeid = req_lib_cfg_killnode->nodeid;
> +	marshall_to_mar_name_t(&req_exec_cfg_killnode.reason, &req_lib_cfg_killnode->reason);
> +
> +	iovec.iov_base = (char *)&req_exec_cfg_killnode;
> +	iovec.iov_len = sizeof (struct req_exec_cfg_killnode);
> +
> +	(void)api->totem_mcast (&iovec, 1, TOTEM_SAFE);
> +
> +	res_lib_cfg_killnode.header.size = sizeof(struct res_lib_cfg_killnode);
> +	res_lib_cfg_killnode.header.id = MESSAGE_RES_CFG_KILLNODE;
> +	res_lib_cfg_killnode.header.error = CS_OK;
> +
> +	api->ipc_response_send(conn, &res_lib_cfg_killnode,
> +				    sizeof(res_lib_cfg_killnode));
> +
> +	LEAVE();
> +}
> +
> +
> +static void message_handler_req_lib_cfg_tryshutdown (
> +	void *conn,
> +	const void *msg)
> +{
> +	struct cfg_info *ci = (struct cfg_info *)api->ipc_private_data_get (conn);
> +	const struct req_lib_cfg_tryshutdown *req_lib_cfg_tryshutdown = msg;
> +	struct list_head *iter;
> +
> +	ENTER();
> +
> +	if (req_lib_cfg_tryshutdown->flags == CFG_SHUTDOWN_FLAG_IMMEDIATE) {
> +		struct res_lib_cfg_tryshutdown res_lib_cfg_tryshutdown;
> +
> +		/*
> +		 * Tell other nodes
> +		 */
> +		send_shutdown();
> +
> +		res_lib_cfg_tryshutdown.header.size = sizeof(struct res_lib_cfg_tryshutdown);
> +		res_lib_cfg_tryshutdown.header.id = MESSAGE_RES_CFG_TRYSHUTDOWN;
> +		res_lib_cfg_tryshutdown.header.error = CS_OK;
> +		api->ipc_response_send(conn, &res_lib_cfg_tryshutdown,
> +					    sizeof(res_lib_cfg_tryshutdown));
> +
> +		LEAVE();
> +		return;
> +	}
> +
> +	/*
> +	 * Shutdown in progress, return an error
> +	 */
> +	if (shutdown_con) {
> +		struct res_lib_cfg_tryshutdown res_lib_cfg_tryshutdown;
> +
> +		res_lib_cfg_tryshutdown.header.size = sizeof(struct res_lib_cfg_tryshutdown);
> +		res_lib_cfg_tryshutdown.header.id = MESSAGE_RES_CFG_TRYSHUTDOWN;
> +		res_lib_cfg_tryshutdown.header.error = CS_ERR_EXIST;
> +
> +		api->ipc_response_send(conn, &res_lib_cfg_tryshutdown,
> +					    sizeof(res_lib_cfg_tryshutdown));
> +
> +
> +		LEAVE();
> +
> +		return;
> +	}
> +
> +	ci->conn = conn;
> +	shutdown_con = (struct cfg_info *)api->ipc_private_data_get (conn);
> +	shutdown_flags = req_lib_cfg_tryshutdown->flags;
> +	shutdown_yes = 0;
> +	shutdown_no = 0;
> +
> +	/*
> +	 * Count the number of listeners
> +	 */
> +	shutdown_expected = 0;
> +
> +	for (iter = trackers_list.next; iter != &trackers_list; iter = iter->next) {
> +		struct cfg_info *testci = list_entry(iter, struct cfg_info, list);
> +		/*
> +		 * It is assumed that we will allow shutdown
> +		 */
> +		if (testci != ci) {
> +			testci->shutdown_reply = SHUTDOWN_REPLY_UNKNOWN;
> +			shutdown_expected++;
> +		}
> +	}
> +
> +	/*
> +	 * If no-one is listening for events then we can just go down now
> +	 */
> +	if (shutdown_expected == 0) {
> +		struct res_lib_cfg_tryshutdown res_lib_cfg_tryshutdown;
> +
> +		res_lib_cfg_tryshutdown.header.size = sizeof(struct res_lib_cfg_tryshutdown);
> +		res_lib_cfg_tryshutdown.header.id = MESSAGE_RES_CFG_TRYSHUTDOWN;
> +		res_lib_cfg_tryshutdown.header.error = CS_OK;
> +
> +		/*
> +		 * Tell originator that shutdown was confirmed
> +		 */
> +		api->ipc_response_send(conn, &res_lib_cfg_tryshutdown,
> +				       sizeof(res_lib_cfg_tryshutdown));
> +
> +		send_shutdown();
> +		LEAVE();
> +		return;
> +	}
> +	else {
> +		unsigned int shutdown_timeout = DEFAULT_SHUTDOWN_TIMEOUT;
> +
> +		/*
> +		 * Look for a shutdown timeout in configuration map
> +		 */
> +		icmap_get_uint32("cfg.shutdown_timeout", &shutdown_timeout);
> +
> +		/*
> +		 * Start the timer. If we don't get a full set of replies before this goes
> +		 * off we'll cancel the shutdown
> +		 */
> +		api->timer_add_duration((unsigned long long)shutdown_timeout*1000000000, NULL,
> +					shutdown_timer_fn, &shutdown_timer);
> +
> +		/*
> +		 * Tell the users we would like to shut down
> +		 */
> +		send_test_shutdown(NULL, conn, CS_OK);
> +	}
> +
> +	/*
> +	 * We don't sent a reply to the caller here.
> +	 * We send it when we know if we can shut down or not
> +	 */
> +
> +	LEAVE();
> +}
> +
> +static void message_handler_req_lib_cfg_replytoshutdown (
> +	void *conn,
> +	const void *msg)
> +{
> +	struct cfg_info *ci = (struct cfg_info *)api->ipc_private_data_get (conn);
> +	const struct req_lib_cfg_replytoshutdown *req_lib_cfg_replytoshutdown = msg;
> +	struct res_lib_cfg_replytoshutdown res_lib_cfg_replytoshutdown;
> +	int status = CS_OK;
> +
> +	ENTER();
> +	if (!shutdown_con) {
> +		status = CS_ERR_ACCESS;
> +		goto exit_fn;
> +	}
> +
> +	if (req_lib_cfg_replytoshutdown->response) {
> +		shutdown_yes++;
> +		ci->shutdown_reply = SHUTDOWN_REPLY_YES;
> +	}
> +	else {
> +		shutdown_no++;
> +		ci->shutdown_reply = SHUTDOWN_REPLY_NO;
> +	}
> +	check_shutdown_status();
> +
> +exit_fn:
> +	res_lib_cfg_replytoshutdown.header.error = status;
> +	res_lib_cfg_replytoshutdown.header.id = MESSAGE_RES_CFG_REPLYTOSHUTDOWN;
> +	res_lib_cfg_replytoshutdown.header.size = sizeof(res_lib_cfg_replytoshutdown);
> +
> +	api->ipc_response_send(conn, &res_lib_cfg_replytoshutdown,
> +			       sizeof(res_lib_cfg_replytoshutdown));
> +
> +	LEAVE();
> +}
> +
> +static void message_handler_req_lib_cfg_get_node_addrs (void *conn,
> +							const void *msg)
> +{
> +	struct totem_ip_address node_ifs[INTERFACE_MAX];
> +	char buf[PIPE_BUF];
> +	char **status;
> +	unsigned int num_interfaces = 0;
> +	int ret = CS_OK;
> +	int i;
> +	const struct req_lib_cfg_get_node_addrs *req_lib_cfg_get_node_addrs = msg;
> +	struct res_lib_cfg_get_node_addrs *res_lib_cfg_get_node_addrs = (struct res_lib_cfg_get_node_addrs *)buf;
> +	unsigned int nodeid = req_lib_cfg_get_node_addrs->nodeid;
> +	char *addr_buf;
> +
> +	if (nodeid == 0)
> +		nodeid = api->totem_nodeid_get();
> +
> +	api->totem_ifaces_get(nodeid, node_ifs, &status, &num_interfaces);
> +
> +	res_lib_cfg_get_node_addrs->header.size = sizeof(struct res_lib_cfg_get_node_addrs) + (num_interfaces * TOTEMIP_ADDRLEN);
> +	res_lib_cfg_get_node_addrs->header.id = MESSAGE_RES_CFG_GET_NODE_ADDRS;
> +	res_lib_cfg_get_node_addrs->header.error = ret;
> +	res_lib_cfg_get_node_addrs->num_addrs = num_interfaces;
> +	if (num_interfaces) {
> +		res_lib_cfg_get_node_addrs->family = node_ifs[0].family;
> +		for (i = 0, addr_buf = (char *)res_lib_cfg_get_node_addrs->addrs;
> +		    i < num_interfaces; i++, addr_buf += TOTEMIP_ADDRLEN) {
> +			memcpy(addr_buf, node_ifs[i].addr, TOTEMIP_ADDRLEN);
> +		}
> +	}
> +	else {
> +		res_lib_cfg_get_node_addrs->header.error = CS_ERR_NOT_EXIST;
> +	}
> +	api->ipc_response_send(conn, res_lib_cfg_get_node_addrs, res_lib_cfg_get_node_addrs->header.size);
> +}
> +
> +static void message_handler_req_lib_cfg_local_get (void *conn, const void *msg)
> +{
> +	struct res_lib_cfg_local_get res_lib_cfg_local_get;
> +
> +	res_lib_cfg_local_get.header.size = sizeof(res_lib_cfg_local_get);
> +	res_lib_cfg_local_get.header.id = MESSAGE_RES_CFG_LOCAL_GET;
> +	res_lib_cfg_local_get.header.error = CS_OK;
> +	res_lib_cfg_local_get.local_nodeid = api->totem_nodeid_get ();
> +
> +	api->ipc_response_send(conn, &res_lib_cfg_local_get,
> +		sizeof(res_lib_cfg_local_get));
> +}
> +
> +
> +static void message_handler_req_lib_cfg_crypto_set (
> +	void *conn,
> +	const void *msg)
> +{
> +	const struct req_lib_cfg_crypto_set *req_lib_cfg_crypto_set = msg;
> +	struct res_lib_cfg_crypto_set res_lib_cfg_crypto_set;
> +	struct req_exec_cfg_crypto_set req_exec_cfg_crypto_set;
> +	struct iovec iovec;
> +	int ret = CS_ERR_INVALID_PARAM;
> +
> +	req_exec_cfg_crypto_set.header.size =
> +		sizeof (struct req_exec_cfg_crypto_set);
> +	req_exec_cfg_crypto_set.header.id = SERVICE_ID_MAKE (CFG_SERVICE,
> +		MESSAGE_REQ_EXEC_CFG_CRYPTO_SET);
> +
> +	/*
> +	 * Set it locally first so we can tell if it is allowed
> +	 */
> +	if (api->totem_crypto_set(req_lib_cfg_crypto_set->type) == 0) {
> +
> +		req_exec_cfg_crypto_set.type = req_lib_cfg_crypto_set->type;
> +
> +		iovec.iov_base = (char *)&req_exec_cfg_crypto_set;
> +		iovec.iov_len = sizeof (struct req_exec_cfg_crypto_set);
> +		assert (api->totem_mcast (&iovec, 1, TOTEM_SAFE) == 0);
> +		ret = CS_OK;
> +	}
> +
> +	res_lib_cfg_crypto_set.header.size = sizeof(res_lib_cfg_crypto_set);
> +	res_lib_cfg_crypto_set.header.id = MESSAGE_RES_CFG_CRYPTO_SET;
> +	res_lib_cfg_crypto_set.header.error = ret;
> +
> +	api->ipc_response_send(conn, &res_lib_cfg_crypto_set,
> +		sizeof(res_lib_cfg_crypto_set));
> +}
> diff --git a/exec/cmap.c b/exec/cmap.c
> new file mode 100644
> index 0000000..447a964
> --- /dev/null
> +++ b/exec/cmap.c
> @@ -0,0 +1,603 @@
> +/*
> + * Copyright (c) 2011 Red Hat, Inc.
> + *
> + * All rights reserved.
> + *
> + * Author: Jan Friesse (jfriesse@xxxxxxxxxx)
> + *
> + * This software licensed under BSD license, the text of which follows:
> + *
> + * Redistribution and use in source and binary forms, with or without
> + * modification, are permitted provided that the following conditions are met:
> + *
> + * - Redistributions of source code must retain the above copyright notice,
> + *   this list of conditions and the following disclaimer.
> + * - Redistributions in binary form must reproduce the above copyright notice,
> + *   this list of conditions and the following disclaimer in the documentation
> + *   and/or other materials provided with the distribution.
> + * - Neither the name of the Red Hat, Inc. nor the names of its
> + *   contributors may be used to endorse or promote products derived from this
> + *   software without specific prior written permission.
> + *
> + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
> + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
> + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
> + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
> + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
> + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
> + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
> + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
> + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
> + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
> + * THE POSSIBILITY OF SUCH DAMAGE.
> + */
> +
> +#include <config.h>
> +
> +#include <sys/types.h>
> +#include <unistd.h>
> +#include <fcntl.h>
> +#include <stdlib.h>
> +#include <errno.h>
> +#include <unistd.h>
> +#include <poll.h>
> +#include <assert.h>
> +
> +#include <qb/qbloop.h>
> +#include <qb/qbipc_common.h>
> +
> +#include <corosync/corotypes.h>
> +#include <corosync/corodefs.h>
> +#include <corosync/list.h>
> +#include <corosync/mar_gen.h>
> +#include <corosync/ipc_cmap.h>
> +#include <corosync/logsys.h>
> +#include <corosync/coroapi.h>
> +#include <corosync/icmap.h>
> +
> +#define hdb_error_to_cs(_result_) qb_to_cs_error(_result_)
> +
> +LOGSYS_DECLARE_SUBSYS ("CMAP");
> +
> +struct cmap_conn_info {
> +	struct hdb_handle_database iter_db;
> +	struct hdb_handle_database track_db;
> +};
> +
> +typedef uint64_t cmap_iter_handle_t;
> +typedef uint64_t cmap_track_handle_t;
> +
> +struct cmap_track_user_data {
> +	void *conn;
> +	cmap_track_handle_t track_handle;
> +	uint64_t track_inst_handle;
> +};
> +
> +static struct corosync_api_v1 *api;
> +
> +static int cmap_exec_init_fn (struct corosync_api_v1 *corosync_api);
> +static int cmap_exec_exit_fn(void);
> +
> +static int cmap_lib_init_fn (void *conn);
> +static int cmap_lib_exit_fn (void *conn);
> +
> +static void message_handler_req_lib_cmap_set(void *conn, const void *message);
> +static void message_handler_req_lib_cmap_delete(void *conn, const void *message);
> +static void message_handler_req_lib_cmap_get(void *conn, const void *message);
> +static void message_handler_req_lib_cmap_adjust_int(void *conn, const void *message);
> +static void message_handler_req_lib_cmap_iter_init(void *conn, const void *message);
> +static void message_handler_req_lib_cmap_iter_next(void *conn, const void *message);
> +static void message_handler_req_lib_cmap_iter_finalize(void *conn, const void *message);
> +static void message_handler_req_lib_cmap_track_add(void *conn, const void *message);
> +static void message_handler_req_lib_cmap_track_delete(void *conn, const void *message);
> +
> +static void cmap_notify_fn(int32_t event,
> +		const char *key_name,
> +		struct icmap_notify_value new_val,
> +		struct icmap_notify_value old_val,
> +		void *user_data);
> +
> +/*
> + * Library Handler Definition
> + */
> +static struct corosync_lib_handler cmap_lib_engine[] =
> +{
> +	{ /* 0 */
> +		.lib_handler_fn				= message_handler_req_lib_cmap_set,
> +		.flow_control				= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> +	},
> +	{ /* 1 */
> +		.lib_handler_fn				= message_handler_req_lib_cmap_delete,
> +		.flow_control				= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> +	},
> +	{ /* 2 */
> +		.lib_handler_fn				= message_handler_req_lib_cmap_get,
> +		.flow_control				= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> +	},
> +	{ /* 3 */
> +		.lib_handler_fn				= message_handler_req_lib_cmap_adjust_int,
> +		.flow_control				= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> +	},
> +	{ /* 4 */
> +		.lib_handler_fn				= message_handler_req_lib_cmap_iter_init,
> +		.flow_control				= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> +	},
> +	{ /* 5 */
> +		.lib_handler_fn				= message_handler_req_lib_cmap_iter_next,
> +		.flow_control				= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> +	},
> +	{ /* 6 */
> +		.lib_handler_fn				= message_handler_req_lib_cmap_iter_finalize,
> +		.flow_control				= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> +	},
> +	{ /* 7 */
> +		.lib_handler_fn				= message_handler_req_lib_cmap_track_add,
> +		.flow_control				= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> +	},
> +	{ /* 8 */
> +		.lib_handler_fn				= message_handler_req_lib_cmap_track_delete,
> +		.flow_control				= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> +	},
> +};
> +
> +
> +struct corosync_service_engine cmap_service_engine = {
> +	.name				        = "corosync configuration map access",
> +	.id					= CMAP_SERVICE,
> +	.priority				= 1,
> +	.private_data_size			= sizeof(struct cmap_conn_info),
> +	.flow_control				= CS_LIB_FLOW_CONTROL_NOT_REQUIRED,
> +	.allow_inquorate			= CS_LIB_ALLOW_INQUORATE,
> +	.lib_init_fn				= cmap_lib_init_fn,
> +	.lib_exit_fn				= cmap_lib_exit_fn,
> +	.lib_engine				= cmap_lib_engine,
> +	.lib_engine_count			= sizeof (cmap_lib_engine) / sizeof (struct corosync_lib_handler),
> +	.exec_init_fn				= cmap_exec_init_fn,
> +	.exec_exit_fn				= cmap_exec_exit_fn,
> +};
> +
> +struct corosync_service_engine *cmap_get_service_engine_ver0 (void)
> +{
> +	return (&cmap_service_engine);
> +}
> +
> +static int cmap_exec_exit_fn(void)
> +{
> +	return 0;
> +}
> +
> +static int cmap_exec_init_fn (
> +	struct corosync_api_v1 *corosync_api)
> +{
> +
> +#ifdef COROSYNC_SOLARIS
> +	logsys_subsys_init();
> +#endif
> +	api = corosync_api;
> +
> +	return (0);
> +}
> +
> +static int cmap_lib_init_fn (void *conn)
> +{
> +	struct cmap_conn_info *conn_info = (struct cmap_conn_info *)api->ipc_private_data_get (conn);
> +
> +	log_printf(LOGSYS_LEVEL_DEBUG, "lib_init_fn: conn=%p\n", conn);
> +
> +	api->ipc_refcnt_inc(conn);
> +
> +	memset(conn_info, 0, sizeof(*conn_info));
> +	hdb_create(&conn_info->iter_db);
> +	hdb_create(&conn_info->track_db);
> +
> +	return (0);
> +}
> +
> +static int cmap_lib_exit_fn (void *conn)
> +{
> +	struct cmap_conn_info *conn_info = (struct cmap_conn_info *)api->ipc_private_data_get (conn);
> +	hdb_handle_t iter_handle = 0;
> +	icmap_iter_t *iter;
> +	hdb_handle_t track_handle = 0;
> +	icmap_track_t *track;
> +
> +	log_printf(LOGSYS_LEVEL_DEBUG, "exit_fn for conn=%p\n", conn);
> +
> +	hdb_iterator_reset(&conn_info->iter_db);
> +        while (hdb_iterator_next(&conn_info->iter_db,
> +                (void*)&iter, &iter_handle) == 0) {
> +
> +		icmap_iter_finalize(*iter);
> +
> +		(void)hdb_handle_put (&conn_info->iter_db, iter_handle);
> +        }
> +
> +	hdb_destroy(&conn_info->iter_db);
> +
> +	hdb_iterator_reset(&conn_info->track_db);
> +        while (hdb_iterator_next(&conn_info->track_db,
> +                (void*)&track, &track_handle) == 0) {
> +
> +		free(icmap_track_get_user_data(*track));
> +
> +		icmap_track_delete(*track);
> +
> +		(void)hdb_handle_put (&conn_info->track_db, track_handle);
> +        }
> +	hdb_destroy(&conn_info->track_db);
> +
> +	api->ipc_refcnt_dec(conn);
> +
> +	return (0);
> +}
> +
> +static void message_handler_req_lib_cmap_set(void *conn, const void *message)
> +{
> +	const struct req_lib_cmap_set *req_lib_cmap_set = message;
> +	struct res_lib_cmap_set res_lib_cmap_set;
> +	cs_error_t ret;
> +
> +	if (icmap_is_key_ro((char *)req_lib_cmap_set->key_name.value)) {
> +		ret = CS_ERR_ACCESS;
> +	} else {
> +		ret = icmap_set((char *)req_lib_cmap_set->key_name.value, &req_lib_cmap_set->value,
> +				req_lib_cmap_set->value_len, req_lib_cmap_set->type);
> +	}
> +
> +	memset(&res_lib_cmap_set, 0, sizeof(res_lib_cmap_set));
> +	res_lib_cmap_set.header.size = sizeof(res_lib_cmap_set);
> +	res_lib_cmap_set.header.id = MESSAGE_RES_CMAP_SET;
> +	res_lib_cmap_set.header.error = ret;
> +
> +	api->ipc_response_send(conn, &res_lib_cmap_set, sizeof(res_lib_cmap_set));
> +}
> +
> +static void message_handler_req_lib_cmap_delete(void *conn, const void *message)
> +{
> +	const struct req_lib_cmap_set *req_lib_cmap_set = message;
> +	struct res_lib_cmap_delete res_lib_cmap_delete;
> +	cs_error_t ret;
> +
> +	if (icmap_is_key_ro((char *)req_lib_cmap_set->key_name.value)) {
> +		ret = CS_ERR_ACCESS;
> +	} else {
> +		ret = icmap_delete((char *)req_lib_cmap_set->key_name.value);
> +	}
> +
> +	memset(&res_lib_cmap_delete, 0, sizeof(res_lib_cmap_delete));
> +	res_lib_cmap_delete.header.size = sizeof(res_lib_cmap_delete);
> +	res_lib_cmap_delete.header.id = MESSAGE_RES_CMAP_DELETE;
> +	res_lib_cmap_delete.header.error = ret;
> +
> +	api->ipc_response_send(conn, &res_lib_cmap_delete, sizeof(res_lib_cmap_delete));
> +}
> +
> +static void message_handler_req_lib_cmap_get(void *conn, const void *message)
> +{
> +	const struct req_lib_cmap_get *req_lib_cmap_get = message;
> +	struct res_lib_cmap_get *res_lib_cmap_get;
> +	struct res_lib_cmap_get error_res_lib_cmap_get;
> +	cs_error_t ret;
> +	size_t value_len;
> +	size_t res_lib_cmap_get_size;
> +	icmap_value_types_t type;
> +	void *value;
> +
> +	value_len = req_lib_cmap_get->value_len;
> +
> +	res_lib_cmap_get_size = sizeof(*res_lib_cmap_get) + value_len;
> +	res_lib_cmap_get = malloc(res_lib_cmap_get_size);
> +	if (res_lib_cmap_get == NULL) {
> +		ret = CS_ERR_NO_MEMORY;
> +		goto error_exit;
> +	}
> +
> +	memset(res_lib_cmap_get, 0, res_lib_cmap_get_size);
> +
> +	if (value_len > 0) {
> +		value = res_lib_cmap_get->value;
> +	} else {
> +		value = NULL;
> +	}
> +
> +	ret = icmap_get((char *)req_lib_cmap_get->key_name.value,
> +			value,
> +			&value_len,
> +			&type);
> +
> +	if (ret != CS_OK) {
> +		free(res_lib_cmap_get);
> +		goto error_exit;
> +	}
> +
> +	res_lib_cmap_get->header.size = res_lib_cmap_get_size;
> +	res_lib_cmap_get->header.id = MESSAGE_RES_CMAP_GET;
> +	res_lib_cmap_get->header.error = ret;
> +	res_lib_cmap_get->type = type;
> +	res_lib_cmap_get->value_len = value_len;
> +
> +	api->ipc_response_send(conn, res_lib_cmap_get, res_lib_cmap_get_size);
> +	free(res_lib_cmap_get);
> +
> +	return ;
> +
> +error_exit:
> +	memset(&error_res_lib_cmap_get, 0, sizeof(error_res_lib_cmap_get));
> +	error_res_lib_cmap_get.header.size = sizeof(error_res_lib_cmap_get);
> +	error_res_lib_cmap_get.header.id = MESSAGE_RES_CMAP_GET;
> +	error_res_lib_cmap_get.header.error = ret;
> +
> +	api->ipc_response_send(conn, &error_res_lib_cmap_get, sizeof(error_res_lib_cmap_get));
> +}
> +
> +static void message_handler_req_lib_cmap_adjust_int(void *conn, const void *message)
> +{
> +	const struct req_lib_cmap_adjust_int *req_lib_cmap_adjust_int = message;
> +	struct res_lib_cmap_adjust_int res_lib_cmap_adjust_int;
> +	cs_error_t ret;
> +
> +	ret = icmap_adjust_int((char *)req_lib_cmap_adjust_int->key_name.value, req_lib_cmap_adjust_int->step);
> +
> +	memset(&res_lib_cmap_adjust_int, 0, sizeof(res_lib_cmap_adjust_int));
> +	res_lib_cmap_adjust_int.header.size = sizeof(res_lib_cmap_adjust_int);
> +	res_lib_cmap_adjust_int.header.id = MESSAGE_RES_CMAP_ADJUST_INT;
> +	res_lib_cmap_adjust_int.header.error = ret;
> +
> +	api->ipc_response_send(conn, &res_lib_cmap_adjust_int, sizeof(res_lib_cmap_adjust_int));
> +}
> +
> +static void message_handler_req_lib_cmap_iter_init(void *conn, const void *message)
> +{
> +	const struct req_lib_cmap_iter_init *req_lib_cmap_iter_init = message;
> +	struct res_lib_cmap_iter_init res_lib_cmap_iter_init;
> +	cs_error_t ret;
> +	icmap_iter_t iter;
> +	icmap_iter_t *hdb_iter;
> +	cmap_iter_handle_t handle;
> +	const char *prefix;
> +	struct cmap_conn_info *conn_info = (struct cmap_conn_info *)api->ipc_private_data_get (conn);
> +
> +	if (req_lib_cmap_iter_init->prefix.length > 0) {
> +		prefix = (char *)req_lib_cmap_iter_init->prefix.value;
> +	} else {
> +		prefix = NULL;
> +	}
> +
> +	iter = icmap_iter_init(prefix);
> +	if (iter == NULL) {
> +		ret = CS_ERR_NO_SECTIONS;
> +		goto reply_send;
> +	}
> +
> +	ret = hdb_error_to_cs(hdb_handle_create(&conn_info->iter_db, sizeof(iter), &handle));
> +	if (ret != CS_OK) {
> +		goto reply_send;
> +	}
> +
> +	ret = hdb_error_to_cs(hdb_handle_get(&conn_info->iter_db, handle, (void *)&hdb_iter));
> +	if (ret != CS_OK) {
> +		goto reply_send;
> +	}
> +
> +	*hdb_iter = iter;
> +
> +	(void)hdb_handle_put (&conn_info->iter_db, handle);
> +
> +reply_send:
> +	memset(&res_lib_cmap_iter_init, 0, sizeof(res_lib_cmap_iter_init));
> +	res_lib_cmap_iter_init.header.size = sizeof(res_lib_cmap_iter_init);
> +	res_lib_cmap_iter_init.header.id = MESSAGE_RES_CMAP_ITER_INIT;
> +	res_lib_cmap_iter_init.header.error = ret;
> +	res_lib_cmap_iter_init.iter_handle = handle;
> +
> +	api->ipc_response_send(conn, &res_lib_cmap_iter_init, sizeof(res_lib_cmap_iter_init));
> +}
> +
> +static void message_handler_req_lib_cmap_iter_next(void *conn, const void *message)
> +{
> +	const struct req_lib_cmap_iter_next *req_lib_cmap_iter_next = message;
> +	struct res_lib_cmap_iter_next res_lib_cmap_iter_next;
> +	cs_error_t ret;
> +	icmap_iter_t *iter;
> +	size_t value_len;
> +	icmap_value_types_t type;
> +	const char *res = NULL;
> +	struct cmap_conn_info *conn_info = (struct cmap_conn_info *)api->ipc_private_data_get (conn);
> +
> +	ret = hdb_error_to_cs(hdb_handle_get(&conn_info->iter_db,
> +				req_lib_cmap_iter_next->iter_handle, (void *)&iter));
> +	if (ret != CS_OK) {
> +		goto reply_send;
> +	}
> +
> +	res = icmap_iter_next(*iter, &value_len, &type);
> +	if (res == NULL) {
> +		ret = CS_ERR_NO_SECTIONS;
> +	}
> +
> +	(void)hdb_handle_put (&conn_info->iter_db, req_lib_cmap_iter_next->iter_handle);
> +
> +reply_send:
> +	memset(&res_lib_cmap_iter_next, 0, sizeof(res_lib_cmap_iter_next));
> +	res_lib_cmap_iter_next.header.size = sizeof(res_lib_cmap_iter_next);
> +	res_lib_cmap_iter_next.header.id = MESSAGE_RES_CMAP_ITER_NEXT;
> +	res_lib_cmap_iter_next.header.error = ret;
> +
> +	if (res != NULL) {
> +		res_lib_cmap_iter_next.value_len = value_len;
> +		res_lib_cmap_iter_next.type = type;
> +
> +		memcpy(res_lib_cmap_iter_next.key_name.value, res, strlen(res));
> +	        res_lib_cmap_iter_next.key_name.length = strlen(res);
> +	}
> +
> +	api->ipc_response_send(conn, &res_lib_cmap_iter_next, sizeof(res_lib_cmap_iter_next));
> +}
> +
> +static void message_handler_req_lib_cmap_iter_finalize(void *conn, const void *message)
> +{
> +	const struct req_lib_cmap_iter_finalize *req_lib_cmap_iter_finalize = message;
> +	struct res_lib_cmap_iter_finalize res_lib_cmap_iter_finalize;
> +	cs_error_t ret;
> +	icmap_iter_t *iter;
> +	struct cmap_conn_info *conn_info = (struct cmap_conn_info *)api->ipc_private_data_get (conn);
> +
> +	ret = hdb_error_to_cs(hdb_handle_get(&conn_info->iter_db,
> +				req_lib_cmap_iter_finalize->iter_handle, (void *)&iter));
> +	if (ret != CS_OK) {
> +		goto reply_send;
> +	}
> +
> +	icmap_iter_finalize(*iter);
> +
> +	(void)hdb_handle_destroy(&conn_info->iter_db, req_lib_cmap_iter_finalize->iter_handle);
> +
> +	(void)hdb_handle_put (&conn_info->iter_db, req_lib_cmap_iter_finalize->iter_handle);
> +
> +reply_send:
> +	memset(&res_lib_cmap_iter_finalize, 0, sizeof(res_lib_cmap_iter_finalize));
> +	res_lib_cmap_iter_finalize.header.size = sizeof(res_lib_cmap_iter_finalize);
> +	res_lib_cmap_iter_finalize.header.id = MESSAGE_RES_CMAP_ITER_FINALIZE;
> +	res_lib_cmap_iter_finalize.header.error = ret;
> +
> +	api->ipc_response_send(conn, &res_lib_cmap_iter_finalize, sizeof(res_lib_cmap_iter_finalize));
> +}
> +
> +static void cmap_notify_fn(int32_t event,
> +		const char *key_name,
> +		struct icmap_notify_value new_val,
> +		struct icmap_notify_value old_val,
> +		void *user_data)
> +{
> +	struct cmap_track_user_data *cmap_track_user_data = (struct cmap_track_user_data *)user_data;
> +	struct res_lib_cmap_notify_callback res_lib_cmap_notify_callback;
> +	struct iovec iov[3];
> +
> +	memset(&res_lib_cmap_notify_callback, 0, sizeof(res_lib_cmap_notify_callback));
> +
> +	res_lib_cmap_notify_callback.header.size = sizeof(res_lib_cmap_notify_callback) + new_val.len + old_val.len;
> +	res_lib_cmap_notify_callback.header.id = MESSAGE_RES_CMAP_NOTIFY_CALLBACK;
> +	res_lib_cmap_notify_callback.header.error = CS_OK;
> +
> +	res_lib_cmap_notify_callback.new_value_type = new_val.type;
> +	res_lib_cmap_notify_callback.old_value_type = old_val.type;
> +	res_lib_cmap_notify_callback.new_value_len = new_val.len;
> +	res_lib_cmap_notify_callback.old_value_len = old_val.len;
> +	res_lib_cmap_notify_callback.event = event;
> +	res_lib_cmap_notify_callback.key_name.length = strlen(key_name);
> +	res_lib_cmap_notify_callback.track_inst_handle = cmap_track_user_data->track_inst_handle;
> +
> +	memcpy(res_lib_cmap_notify_callback.key_name.value, key_name, strlen(key_name));
> +
> +	iov[0].iov_base = (char *)&res_lib_cmap_notify_callback;
> +	iov[0].iov_len = sizeof(res_lib_cmap_notify_callback);
> +	iov[1].iov_base = (char *)new_val.data;
> +	iov[1].iov_len = new_val.len;
> +	iov[2].iov_base = (char *)old_val.data;
> +	iov[2].iov_len = old_val.len;
> +
> +	api->ipc_dispatch_iov_send(cmap_track_user_data->conn, iov, 3);
> +}
> +
> +static void message_handler_req_lib_cmap_track_add(void *conn, const void *message)
> +{
> +	const struct req_lib_cmap_track_add *req_lib_cmap_track_add = message;
> +	struct res_lib_cmap_track_add res_lib_cmap_track_add;
> +	cs_error_t ret;
> +	cmap_track_handle_t handle;
> +	icmap_track_t track;
> +	icmap_track_t *hdb_track;
> +	struct cmap_track_user_data *cmap_track_user_data;
> +	const char *key_name;
> +
> +	struct cmap_conn_info *conn_info = (struct cmap_conn_info *)api->ipc_private_data_get (conn);
> +
> +	cmap_track_user_data = malloc(sizeof(*cmap_track_user_data));
> +	if (cmap_track_user_data == NULL) {
> +		ret = CS_ERR_NO_MEMORY;
> +
> +		goto reply_send;
> +	}
> +	memset(cmap_track_user_data, 0, sizeof(*cmap_track_user_data));
> +
> +	if (req_lib_cmap_track_add->key_name.length > 0) {
> +		key_name = (char *)req_lib_cmap_track_add->key_name.value;
> +	} else {
> +		key_name = NULL;
> +	}
> +
> +	ret = icmap_track_add(key_name,
> +			req_lib_cmap_track_add->track_type,
> +			cmap_notify_fn,
> +			cmap_track_user_data,
> +			&track);
> +	if (ret != CS_OK) {
> +		free(cmap_track_user_data);
> +
> +		goto reply_send;
> +	}
> +
> +	ret = hdb_error_to_cs(hdb_handle_create(&conn_info->track_db, sizeof(track), &handle));
> +	if (ret != CS_OK) {
> +		free(cmap_track_user_data);
> +
> +		goto reply_send;
> +	}
> +
> +	ret = hdb_error_to_cs(hdb_handle_get(&conn_info->track_db, handle, (void *)&hdb_track));
> +	if (ret != CS_OK) {
> +		free(cmap_track_user_data);
> +
> +		goto reply_send;
> +	}
> +
> +	*hdb_track = track;
> +	cmap_track_user_data->conn = conn;
> +	cmap_track_user_data->track_handle = handle;
> +	cmap_track_user_data->track_inst_handle = req_lib_cmap_track_add->track_inst_handle;
> +
> +	(void)hdb_handle_put (&conn_info->track_db, handle);
> +
> +reply_send:
> +	memset(&res_lib_cmap_track_add, 0, sizeof(res_lib_cmap_track_add));
> +	res_lib_cmap_track_add.header.size = sizeof(res_lib_cmap_track_add);
> +	res_lib_cmap_track_add.header.id = MESSAGE_RES_CMAP_TRACK_ADD;
> +	res_lib_cmap_track_add.header.error = ret;
> +	res_lib_cmap_track_add.track_handle = handle;
> +
> +	api->ipc_response_send(conn, &res_lib_cmap_track_add, sizeof(res_lib_cmap_track_add));
> +}
> +
> +static void message_handler_req_lib_cmap_track_delete(void *conn, const void *message)
> +{
> +	const struct req_lib_cmap_track_delete *req_lib_cmap_track_delete = message;
> +	struct res_lib_cmap_track_delete res_lib_cmap_track_delete;
> +	cs_error_t ret;
> +	icmap_track_t *track;
> +	struct cmap_conn_info *conn_info = (struct cmap_conn_info *)api->ipc_private_data_get (conn);
> +	uint64_t track_inst_handle = 0;
> +
> +	ret = hdb_error_to_cs(hdb_handle_get(&conn_info->track_db,
> +				req_lib_cmap_track_delete->track_handle, (void *)&track));
> +	if (ret != CS_OK) {
> +		goto reply_send;
> +	}
> +
> +	track_inst_handle = ((struct cmap_track_user_data *)icmap_track_get_user_data(*track))->track_inst_handle;
> +
> +	free(icmap_track_get_user_data(*track));
> +
> +	ret = icmap_track_delete(*track);
> +
> +	(void)hdb_handle_put (&conn_info->track_db, req_lib_cmap_track_delete->track_handle);
> +	(void)hdb_handle_destroy(&conn_info->track_db, req_lib_cmap_track_delete->track_handle);
> +
> +reply_send:
> +	memset(&res_lib_cmap_track_delete, 0, sizeof(res_lib_cmap_track_delete));
> +	res_lib_cmap_track_delete.header.size = sizeof(res_lib_cmap_track_delete);
> +	res_lib_cmap_track_delete.header.id = MESSAGE_RES_CMAP_TRACK_DELETE;
> +	res_lib_cmap_track_delete.header.error = ret;
> +	res_lib_cmap_track_delete.track_inst_handle = track_inst_handle;
> +
> +	api->ipc_response_send(conn, &res_lib_cmap_track_delete, sizeof(res_lib_cmap_track_delete));
> +}
> diff --git a/exec/coroparse.c b/exec/coroparse.c
> index 417f584..d1c8ae4 100644
> --- a/exec/coroparse.c
> +++ b/exec/coroparse.c
> @@ -186,7 +186,7 @@ static char *strchr_rs (const char *haystack, int byte)
>  	return ((char *) end_address);
>  }
>  
> -static int aisparser_readconfig (const char **error_string)
> +coroparse_configparse (const char **error_string)
>  {
>  	if (read_config_file_into_icmap(error_string)) {
>  		return -1;
> @@ -195,7 +195,6 @@ static int aisparser_readconfig (const char **error_string)
>  	return 0;
>  }
>  
> -
>  static char *remove_whitespace(char *string)
>  {
>  	char *start;
> @@ -1010,43 +1009,3 @@ static int read_config_file_into_icmap(
>  
>  	return res;
>  }
> -
> -/*
> - * Dynamic Loader definition
> - */
> -
> -struct config_iface_ver0 aisparser_iface_ver0 = {
> -	.config_readconfig        = aisparser_readconfig
> -};
> -
> -struct lcr_iface corosync_aisparser_ver0[1] = {
> -	{
> -		.name				= "corosync_parser",
> -		.version			= 0,
> -		.versions_replace		= 0,
> -		.versions_replace_count		= 0,
> -		.dependencies			= 0,
> -		.dependency_count		= 0,
> -		.constructor			= NULL,
> -		.destructor			= NULL,
> -		.interfaces			= NULL,
> -	}
> -};
> -
> -struct corosync_service_handler *aisparser_get_handler_ver0 (void);
> -
> -struct lcr_comp aisparser_comp_ver0 = {
> -	.iface_count				= 1,
> -	.ifaces					= corosync_aisparser_ver0
> -};
> -
> -#ifdef COROSYNC_SOLARIS
> -void corosync_lcr_component_register (void);
> -
> -void corosync_lcr_component_register (void) {
> -#else
> -__attribute__ ((constructor)) static void corosync_lcr_component_register (void) {
> -#endif
> -        lcr_interfaces_set (&corosync_aisparser_ver0[0], &aisparser_iface_ver0);
> -	lcr_component_register (&aisparser_comp_ver0);
> -}
> diff --git a/exec/cpg.c b/exec/cpg.c
> new file mode 100644
> index 0000000..b8044a2
> --- /dev/null
> +++ b/exec/cpg.c
> @@ -0,0 +1,2064 @@
> +/*
> + * Copyright (c) 2006-2009 Red Hat, Inc.
> + *
> + * All rights reserved.
> + *
> + * Author: Christine Caulfield (ccaulfie@xxxxxxxxxx)
> + * Author: Jan Friesse (jfriesse@xxxxxxxxxx)
> + *
> + * This software licensed under BSD license, the text of which follows:
> + *
> + * Redistribution and use in source and binary forms, with or without
> + * modification, are permitted provided that the following conditions are met:
> + *
> + * - Redistributions of source code must retain the above copyright notice,
> + *   this list of conditions and the following disclaimer.
> + * - Redistributions in binary form must reproduce the above copyright notice,
> + *   this list of conditions and the following disclaimer in the documentation
> + *   and/or other materials provided with the distribution.
> + * - Neither the name of the MontaVista Software, Inc. nor the names of its
> + *   contributors may be used to endorse or promote products derived from this
> + *   software without specific prior written permission.
> + *
> + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTIBUTORS "AS IS"
> + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
> + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
> + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
> + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
> + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
> + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
> + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
> + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
> + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
> + * THE POSSIBILITY OF SUCH DAMAGE.
> + */
> +
> +#include <config.h>
> +
> +#ifdef HAVE_ALLOCA_H
> +#include <alloca.h>
> +#endif
> +#include <sys/types.h>
> +#include <sys/socket.h>
> +#include <sys/un.h>
> +#include <sys/ioctl.h>
> +#include <netinet/in.h>
> +#include <sys/uio.h>
> +#include <unistd.h>
> +#include <fcntl.h>
> +#include <stdlib.h>
> +#include <stdio.h>
> +#include <errno.h>
> +#include <time.h>
> +#include <assert.h>
> +#include <unistd.h>
> +#include <netinet/in.h>
> +#include <arpa/inet.h>
> +#include <sys/mman.h>
> +#include <qb/qbmap.h>
> +
> +#include <corosync/corotypes.h>
> +#include <qb/qbipc_common.h>
> +#include <corosync/corodefs.h>
> +#include <corosync/list.h>
> +#include <corosync/jhash.h>
> +#include <corosync/logsys.h>
> +#include <corosync/coroapi.h>
> +
> +#include <corosync/cpg.h>
> +#include <corosync/ipc_cpg.h>
> +
> +LOGSYS_DECLARE_SUBSYS ("CPG");
> +
> +#define GROUP_HASH_SIZE 32
> +
> +enum cpg_message_req_types {
> +	MESSAGE_REQ_EXEC_CPG_PROCJOIN = 0,
> +	MESSAGE_REQ_EXEC_CPG_PROCLEAVE = 1,
> +	MESSAGE_REQ_EXEC_CPG_JOINLIST = 2,
> +	MESSAGE_REQ_EXEC_CPG_MCAST = 3,
> +	MESSAGE_REQ_EXEC_CPG_DOWNLIST_OLD = 4,
> +	MESSAGE_REQ_EXEC_CPG_DOWNLIST = 5
> +};
> +
> +struct zcb_mapped {
> +	struct list_head list;
> +	void *addr;
> +	size_t size;
> +};
> +/*
> + * state`		exec deliver
> + * match group name, pid -> if matched deliver for YES:
> + * XXX indicates impossible state
> + *
> + *			join			leave			mcast
> + * UNJOINED		XXX			XXX			NO
> + * LEAVE_STARTED	XXX			YES(unjoined_enter)	YES
> + * JOIN_STARTED		YES(join_started_enter)	XXX			NO
> + * JOIN_COMPLETED	XXX			NO			YES
> + *
> + * join_started_enter
> + * 	set JOIN_COMPLETED
> + *	add entry to process_info list
> + * unjoined_enter
> + *	set UNJOINED
> + *	delete entry from process_info list
> + *
> + *
> + *			library accept join error codes
> + * UNJOINED		YES(CS_OK) 			set JOIN_STARTED
> + * LEAVE_STARTED	NO(CS_ERR_BUSY)
> + * JOIN_STARTED		NO(CS_ERR_EXIST)
> + * JOIN_COMPlETED	NO(CS_ERR_EXIST)
> + *
> + *			library accept leave error codes
> + * UNJOINED		NO(CS_ERR_NOT_EXIST)
> + * LEAVE_STARTED	NO(CS_ERR_NOT_EXIST)
> + * JOIN_STARTED		NO(CS_ERR_BUSY)
> + * JOIN_COMPLETED	YES(CS_OK)			set LEAVE_STARTED
> + *
> + *			library accept mcast
> + * UNJOINED		NO(CS_ERR_NOT_EXIST)
> + * LEAVE_STARTED	NO(CS_ERR_NOT_EXIST)
> + * JOIN_STARTED		YES(CS_OK)
> + * JOIN_COMPLETED	YES(CS_OK)
> + */
> +enum cpd_state {
> +	CPD_STATE_UNJOINED,
> +	CPD_STATE_LEAVE_STARTED,
> +	CPD_STATE_JOIN_STARTED,
> +	CPD_STATE_JOIN_COMPLETED
> +};
> +
> +enum cpg_sync_state {
> +	CPGSYNC_DOWNLIST,
> +	CPGSYNC_JOINLIST
> +};
> +
> +enum cpg_downlist_state_e {
> +       CPG_DOWNLIST_NONE,
> +       CPG_DOWNLIST_WAITING_FOR_MESSAGES,
> +       CPG_DOWNLIST_APPLYING,
> +};
> +static enum cpg_downlist_state_e downlist_state;
> +static struct list_head downlist_messages_head;
> +
> +struct cpg_pd {
> +	void *conn;
> + 	mar_cpg_name_t group_name;
> +	uint32_t pid;
> +	enum cpd_state cpd_state;
> +	unsigned int flags;
> +	int initial_totem_conf_sent;
> +	struct list_head list;
> +	struct list_head iteration_instance_list_head;
> +	struct list_head zcb_mapped_list_head;
> +};
> +
> +struct cpg_iteration_instance {
> +	hdb_handle_t handle;
> +	struct list_head list;
> +	struct list_head items_list_head; /* List of process_info */
> +	struct list_head *current_pointer;
> +};
> +
> +DECLARE_HDB_DATABASE(cpg_iteration_handle_t_db,NULL);
> +
> +DECLARE_LIST_INIT(cpg_pd_list_head);
> +
> +static unsigned int my_member_list[PROCESSOR_COUNT_MAX];
> +
> +static unsigned int my_member_list_entries;
> +
> +static unsigned int my_old_member_list[PROCESSOR_COUNT_MAX];
> +
> +static unsigned int my_old_member_list_entries = 0;
> +
> +static struct corosync_api_v1 *api = NULL;
> +
> +static enum cpg_sync_state my_sync_state = CPGSYNC_DOWNLIST;
> +
> +static mar_cpg_ring_id_t last_sync_ring_id;
> +
> +struct process_info {
> +	unsigned int nodeid;
> +	uint32_t pid;
> +	mar_cpg_name_t group;
> +	struct list_head list; /* on the group_info members list */
> +};
> +DECLARE_LIST_INIT(process_info_list_head);
> +
> +struct join_list_entry {
> +	uint32_t pid;
> +	mar_cpg_name_t group_name;
> +};
> +
> +/*
> + * Service Interfaces required by service_message_handler struct
> + */
> +static int cpg_exec_init_fn (struct corosync_api_v1 *);
> +
> +static int cpg_lib_init_fn (void *conn);
> +
> +static int cpg_lib_exit_fn (void *conn);
> +
> +static void message_handler_req_exec_cpg_procjoin (
> +	const void *message,
> +	unsigned int nodeid);
> +
> +static void message_handler_req_exec_cpg_procleave (
> +	const void *message,
> +	unsigned int nodeid);
> +
> +static void message_handler_req_exec_cpg_joinlist (
> +	const void *message,
> +	unsigned int nodeid);
> +
> +static void message_handler_req_exec_cpg_mcast (
> +	const void *message,
> +	unsigned int nodeid);
> +
> +static void message_handler_req_exec_cpg_downlist_old (
> +	const void *message,
> +	unsigned int nodeid);
> +
> +static void message_handler_req_exec_cpg_downlist (
> +	const void *message,
> +	unsigned int nodeid);
> +
> +static void exec_cpg_procjoin_endian_convert (void *msg);
> +
> +static void exec_cpg_joinlist_endian_convert (void *msg);
> +
> +static void exec_cpg_mcast_endian_convert (void *msg);
> +
> +static void exec_cpg_downlist_endian_convert_old (void *msg);
> +
> +static void exec_cpg_downlist_endian_convert (void *msg);
> +
> +static void message_handler_req_lib_cpg_join (void *conn, const void *message);
> +
> +static void message_handler_req_lib_cpg_leave (void *conn, const void *message);
> +
> +static void message_handler_req_lib_cpg_finalize (void *conn, const void *message);
> +
> +static void message_handler_req_lib_cpg_mcast (void *conn, const void *message);
> +
> +static void message_handler_req_lib_cpg_membership (void *conn,
> +						    const void *message);
> +
> +static void message_handler_req_lib_cpg_local_get (void *conn,
> +						   const void *message);
> +
> +static void message_handler_req_lib_cpg_iteration_initialize (
> +	void *conn,
> +	const void *message);
> +
> +static void message_handler_req_lib_cpg_iteration_next (
> +	void *conn,
> +	const void *message);
> +
> +static void message_handler_req_lib_cpg_iteration_finalize (
> +	void *conn,
> +	const void *message);
> +
> +static void message_handler_req_lib_cpg_zc_alloc (
> +	void *conn,
> +	const void *message);
> +
> +static void message_handler_req_lib_cpg_zc_free (
> +	void *conn,
> +	const void *message);
> +
> +static void message_handler_req_lib_cpg_zc_execute (
> +	void *conn,
> +	const void *message);
> +
> +static int cpg_node_joinleave_send (unsigned int pid, const mar_cpg_name_t *group_name, int fn, int reason);
> +
> +static int cpg_exec_send_downlist(void);
> +
> +static int cpg_exec_send_joinlist(void);
> +
> +static void downlist_messages_delete (void);
> +
> +static void downlist_master_choose_and_send (void);
> +
> +static void cpg_sync_init_v2 (
> +	const unsigned int *trans_list,
> +	size_t trans_list_entries,
> +	const unsigned int *member_list,
> +	size_t member_list_entries,
> +	const struct memb_ring_id *ring_id);
> +
> +static int  cpg_sync_process (void);
> +
> +static void cpg_sync_activate (void);
> +
> +static void cpg_sync_abort (void);
> +
> +static int notify_lib_totem_membership (
> +	void *conn,
> +	int member_list_entries,
> +	const unsigned int *member_list);
> +
> +static inline int zcb_all_free (
> +	struct cpg_pd *cpd);
> +
> +/*
> + * Library Handler Definition
> + */
> +static struct corosync_lib_handler cpg_lib_engine[] =
> +{
> +	{ /* 0 - MESSAGE_REQ_CPG_JOIN */
> +		.lib_handler_fn				= message_handler_req_lib_cpg_join,
> +		.flow_control				= CS_LIB_FLOW_CONTROL_REQUIRED
> +	},
> +	{ /* 1 - MESSAGE_REQ_CPG_LEAVE */
> +		.lib_handler_fn				= message_handler_req_lib_cpg_leave,
> +		.flow_control				= CS_LIB_FLOW_CONTROL_REQUIRED
> +	},
> +	{ /* 2 - MESSAGE_REQ_CPG_MCAST */
> +		.lib_handler_fn				= message_handler_req_lib_cpg_mcast,
> +		.flow_control				= CS_LIB_FLOW_CONTROL_REQUIRED
> +	},
> +	{ /* 3 - MESSAGE_REQ_CPG_MEMBERSHIP */
> +		.lib_handler_fn				= message_handler_req_lib_cpg_membership,
> +		.flow_control				= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> +	},
> +	{ /* 4 - MESSAGE_REQ_CPG_LOCAL_GET */
> +		.lib_handler_fn				= message_handler_req_lib_cpg_local_get,
> +		.flow_control				= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> +	},
> +	{ /* 5 - MESSAGE_REQ_CPG_ITERATIONINITIALIZE */
> +		.lib_handler_fn				= message_handler_req_lib_cpg_iteration_initialize,
> +		.flow_control				= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> +	},
> +	{ /* 6 - MESSAGE_REQ_CPG_ITERATIONNEXT */
> +		.lib_handler_fn				= message_handler_req_lib_cpg_iteration_next,
> +		.flow_control				= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> +	},
> +	{ /* 7 - MESSAGE_REQ_CPG_ITERATIONFINALIZE */
> +		.lib_handler_fn				= message_handler_req_lib_cpg_iteration_finalize,
> +		.flow_control				= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> +	},
> +	{ /* 8 - MESSAGE_REQ_CPG_FINALIZE */
> +		.lib_handler_fn				= message_handler_req_lib_cpg_finalize,
> +		.flow_control				= CS_LIB_FLOW_CONTROL_REQUIRED
> +	},
> +	{ /* 9 */
> +		.lib_handler_fn				= message_handler_req_lib_cpg_zc_alloc,
> +		.flow_control				= CS_LIB_FLOW_CONTROL_REQUIRED
> +	},
> +	{ /* 10 */
> +		.lib_handler_fn				= message_handler_req_lib_cpg_zc_free,
> +		.flow_control				= CS_LIB_FLOW_CONTROL_REQUIRED
> +	},
> +	{ /* 11 */
> +		.lib_handler_fn				= message_handler_req_lib_cpg_zc_execute,
> +		.flow_control				= CS_LIB_FLOW_CONTROL_REQUIRED
> +	},
> +
> +
> +};
> +
> +static struct corosync_exec_handler cpg_exec_engine[] =
> +{
> +	{ /* 0 - MESSAGE_REQ_EXEC_CPG_PROCJOIN */
> +		.exec_handler_fn	= message_handler_req_exec_cpg_procjoin,
> +		.exec_endian_convert_fn	= exec_cpg_procjoin_endian_convert
> +	},
> +	{ /* 1 - MESSAGE_REQ_EXEC_CPG_PROCLEAVE */
> +		.exec_handler_fn	= message_handler_req_exec_cpg_procleave,
> +		.exec_endian_convert_fn	= exec_cpg_procjoin_endian_convert
> +	},
> +	{ /* 2 - MESSAGE_REQ_EXEC_CPG_JOINLIST */
> +		.exec_handler_fn	= message_handler_req_exec_cpg_joinlist,
> +		.exec_endian_convert_fn	= exec_cpg_joinlist_endian_convert
> +	},
> +	{ /* 3 - MESSAGE_REQ_EXEC_CPG_MCAST */
> +		.exec_handler_fn	= message_handler_req_exec_cpg_mcast,
> +		.exec_endian_convert_fn	= exec_cpg_mcast_endian_convert
> +	},
> +	{ /* 4 - MESSAGE_REQ_EXEC_CPG_DOWNLIST_OLD */
> +		.exec_handler_fn	= message_handler_req_exec_cpg_downlist_old,
> +		.exec_endian_convert_fn	= exec_cpg_downlist_endian_convert_old
> +	},
> +	{ /* 5 - MESSAGE_REQ_EXEC_CPG_DOWNLIST */
> +		.exec_handler_fn	= message_handler_req_exec_cpg_downlist,
> +		.exec_endian_convert_fn	= exec_cpg_downlist_endian_convert
> +	},
> +};
> +
> +struct corosync_service_engine cpg_service_engine = {
> +	.name				        = "corosync cluster closed process group service v1.01",
> +	.id					= CPG_SERVICE,
> +	.priority				= 1,
> +	.private_data_size			= sizeof (struct cpg_pd),
> +	.flow_control				= CS_LIB_FLOW_CONTROL_REQUIRED,
> +	.allow_inquorate			= CS_LIB_ALLOW_INQUORATE,
> +	.lib_init_fn				= cpg_lib_init_fn,
> +	.lib_exit_fn				= cpg_lib_exit_fn,
> +	.lib_engine				= cpg_lib_engine,
> +	.lib_engine_count			= sizeof (cpg_lib_engine) / sizeof (struct corosync_lib_handler),
> +	.exec_init_fn				= cpg_exec_init_fn,
> +	.exec_dump_fn				= NULL,
> +	.exec_engine				= cpg_exec_engine,
> +	.exec_engine_count		        = sizeof (cpg_exec_engine) / sizeof (struct corosync_exec_handler),
> +	.sync_mode				= CS_SYNC_V1_APIV2,
> +	.sync_init                              = (sync_init_v1_fn_t)cpg_sync_init_v2,
> +	.sync_process                           = cpg_sync_process,
> +	.sync_activate                          = cpg_sync_activate,
> +	.sync_abort                             = cpg_sync_abort
> +};
> +
> +struct corosync_service_engine *cpg_get_service_engine_ver0 (void)
> +{
> +	return (&cpg_service_engine);
> +}
> +
> +struct req_exec_cpg_procjoin {
> +	struct qb_ipc_request_header header __attribute__((aligned(8)));
> +	mar_cpg_name_t group_name __attribute__((aligned(8)));
> +	mar_uint32_t pid __attribute__((aligned(8)));
> +	mar_uint32_t reason __attribute__((aligned(8)));
> +};
> +
> +struct req_exec_cpg_mcast {
> +	struct qb_ipc_request_header header __attribute__((aligned(8)));
> +	mar_cpg_name_t group_name __attribute__((aligned(8)));
> +	mar_uint32_t msglen __attribute__((aligned(8)));
> +	mar_uint32_t pid __attribute__((aligned(8)));
> +	mar_message_source_t source __attribute__((aligned(8)));
> +	mar_uint8_t message[] __attribute__((aligned(8)));
> +};
> +
> +struct req_exec_cpg_downlist_old {
> +	struct qb_ipc_request_header header __attribute__((aligned(8)));
> +	mar_uint32_t left_nodes __attribute__((aligned(8)));
> +	mar_uint32_t nodeids[PROCESSOR_COUNT_MAX]  __attribute__((aligned(8)));
> +};
> +
> +struct req_exec_cpg_downlist {
> +	struct qb_ipc_request_header header __attribute__((aligned(8)));
> +	/* merge decisions */
> +	mar_uint32_t old_members __attribute__((aligned(8)));
> +	/* downlist below */
> +	mar_uint32_t left_nodes __attribute__((aligned(8)));
> +	mar_uint32_t nodeids[PROCESSOR_COUNT_MAX]  __attribute__((aligned(8)));
> +};
> +
> +struct downlist_msg {
> +	mar_uint32_t sender_nodeid;
> +	mar_uint32_t old_members __attribute__((aligned(8)));
> +	mar_uint32_t left_nodes __attribute__((aligned(8)));
> +	mar_uint32_t nodeids[PROCESSOR_COUNT_MAX]  __attribute__((aligned(8)));
> +	struct list_head list;
> +};
> +
> +static struct req_exec_cpg_downlist g_req_exec_cpg_downlist;
> +
> +static void cpg_sync_init_v2 (
> +	const unsigned int *trans_list,
> +	size_t trans_list_entries,
> +	const unsigned int *member_list,
> +	size_t member_list_entries,
> +	const struct memb_ring_id *ring_id)
> +{
> +	int entries;
> +	int i, j;
> +	int found;
> +
> +	my_sync_state = CPGSYNC_DOWNLIST;
> +
> +	memcpy (my_member_list, member_list, member_list_entries *
> +		sizeof (unsigned int));
> +	my_member_list_entries = member_list_entries;
> +
> +	last_sync_ring_id.nodeid = ring_id->rep.nodeid;
> +	last_sync_ring_id.seq = ring_id->seq;
> +
> +	downlist_state = CPG_DOWNLIST_WAITING_FOR_MESSAGES;
> +
> +	entries = 0;
> +	/*
> +	 * Determine list of nodeids for downlist message
> +	 */
> +	for (i = 0; i < my_old_member_list_entries; i++) {
> +		found = 0;
> +		for (j = 0; j < trans_list_entries; j++) {
> +			if (my_old_member_list[i] == trans_list[j]) {
> +				found = 1;
> +				break;
> +			}
> +		}
> +		if (found == 0) {
> +			g_req_exec_cpg_downlist.nodeids[entries++] =
> +				my_old_member_list[i];
> +		}
> +	}
> +	g_req_exec_cpg_downlist.left_nodes = entries;
> +}
> +
> +static int cpg_sync_process (void)
> +{
> +	int res = -1;
> +
> +	if (my_sync_state == CPGSYNC_DOWNLIST) {
> +		res = cpg_exec_send_downlist();
> +		if (res == -1) {
> +			return (-1);
> +		}
> +		my_sync_state = CPGSYNC_JOINLIST;
> +	}
> +	if (my_sync_state == CPGSYNC_JOINLIST) {
> +		res = cpg_exec_send_joinlist();
> +	}
> +	return (res);
> +}
> +
> +static void cpg_sync_activate (void)
> +{
> +	memcpy (my_old_member_list, my_member_list,
> +		my_member_list_entries * sizeof (unsigned int));
> +	my_old_member_list_entries = my_member_list_entries;
> +
> +	if (downlist_state == CPG_DOWNLIST_WAITING_FOR_MESSAGES) {
> +		downlist_master_choose_and_send ();
> +	}
> +
> +	downlist_messages_delete ();
> +	downlist_state = CPG_DOWNLIST_NONE;
> +
> +	notify_lib_totem_membership (NULL, my_member_list_entries, my_member_list);
> +}
> +
> +static void cpg_sync_abort (void)
> +{
> +	downlist_state = CPG_DOWNLIST_NONE;
> +	downlist_messages_delete ();
> +}
> +
> +static int notify_lib_totem_membership (
> +	void *conn,
> +	int member_list_entries,
> +	const unsigned int *member_list)
> +{
> +	struct list_head *iter;
> +	char *buf;
> +	int size;
> +	struct res_lib_cpg_totem_confchg_callback *res;
> +
> +	size = sizeof(struct res_lib_cpg_totem_confchg_callback) +
> +		sizeof(mar_uint32_t) * (member_list_entries);
> +	buf = alloca(size);
> +	if (!buf)
> +		return CS_ERR_LIBRARY;
> +
> +	res = (struct res_lib_cpg_totem_confchg_callback *)buf;
> +	res->member_list_entries = member_list_entries;
> +	res->header.size = size;
> +	res->header.id = MESSAGE_RES_CPG_TOTEM_CONFCHG_CALLBACK;
> +	res->header.error = CS_OK;
> +
> +	memcpy (&res->ring_id, &last_sync_ring_id, sizeof (mar_cpg_ring_id_t));
> +	memcpy (res->member_list, member_list, res->member_list_entries * sizeof (mar_uint32_t));
> +
> +	if (conn == NULL) {
> +		for (iter = cpg_pd_list_head.next; iter != &cpg_pd_list_head; iter = iter->next) {
> +			struct cpg_pd *cpg_pd = list_entry (iter, struct cpg_pd, list);
> +			api->ipc_dispatch_send (cpg_pd->conn, buf, size);
> +		}
> +	} else {
> +		api->ipc_dispatch_send (conn, buf, size);
> +	}
> +
> +	return CS_OK;
> +}
> +
> +static int notify_lib_joinlist(
> +	const mar_cpg_name_t *group_name,
> +	void *conn,
> +	int joined_list_entries,
> +	mar_cpg_address_t *joined_list,
> +	int left_list_entries,
> +	mar_cpg_address_t *left_list,
> +	int id)
> +{
> +	int size;
> +	char *buf;
> +	struct list_head *iter;
> +	int count;
> +	struct res_lib_cpg_confchg_callback *res;
> +	mar_cpg_address_t *retgi;
> +
> +	count = 0;
> +
> +	for (iter = process_info_list_head.next; iter != &process_info_list_head; iter = iter->next) {
> +		struct process_info *pi = list_entry (iter, struct process_info, list);
> +		if (mar_name_compare (&pi->group, group_name) == 0) {
> +			int i;
> +			int founded = 0;
> +
> +			for (i = 0; i < left_list_entries; i++) {
> +				if (left_list[i].nodeid == pi->nodeid && left_list[i].pid == pi->pid) {
> +					founded++;
> +				}
> +			}
> +
> +			if (!founded)
> +				count++;
> +		}
> +	}
> +
> +	size = sizeof(struct res_lib_cpg_confchg_callback) +
> +		sizeof(mar_cpg_address_t) * (count + left_list_entries + joined_list_entries);
> +	buf = alloca(size);
> +	if (!buf)
> +		return CS_ERR_LIBRARY;
> +
> +	res = (struct res_lib_cpg_confchg_callback *)buf;
> +	res->joined_list_entries = joined_list_entries;
> +	res->left_list_entries = left_list_entries;
> +	res->member_list_entries = count;
> +	retgi = res->member_list;
> +	res->header.size = size;
> +	res->header.id = id;
> +	res->header.error = CS_OK;
> +	memcpy(&res->group_name, group_name, sizeof(mar_cpg_name_t));
> +
> +	for (iter = process_info_list_head.next; iter != &process_info_list_head; iter = iter->next) {
> +		struct process_info *pi=list_entry (iter, struct process_info, list);
> +
> +		if (mar_name_compare (&pi->group, group_name) == 0) {
> +			int i;
> +			int founded = 0;
> +
> +			for (i = 0;i < left_list_entries; i++) {
> +				if (left_list[i].nodeid == pi->nodeid && left_list[i].pid == pi->pid) {
> +					founded++;
> +				}
> +			}
> +
> +			if (!founded) {
> +				retgi->nodeid = pi->nodeid;
> +				retgi->pid = pi->pid;
> +				retgi++;
> +			}
> +		}
> +	}
> +
> +	if (left_list_entries) {
> +		memcpy (retgi, left_list, left_list_entries * sizeof(mar_cpg_address_t));
> +		retgi += left_list_entries;
> +	}
> +
> +	if (joined_list_entries) {
> +		memcpy (retgi, joined_list, joined_list_entries * sizeof(mar_cpg_address_t));
> +		retgi += joined_list_entries;
> +	}
> +
> +	if (conn) {
> +		api->ipc_dispatch_send (conn, buf, size);
> +	} else {
> +		for (iter = cpg_pd_list_head.next; iter != &cpg_pd_list_head; iter = iter->next) {
> +			struct cpg_pd *cpd = list_entry (iter, struct cpg_pd, list);
> +			if (mar_name_compare (&cpd->group_name, group_name) == 0) {
> +				assert (joined_list_entries <= 1);
> +				if (joined_list_entries) {
> +					if (joined_list[0].pid == cpd->pid &&
> +						joined_list[0].nodeid == api->totem_nodeid_get()) {
> +						cpd->cpd_state = CPD_STATE_JOIN_COMPLETED;
> +					}
> +				}
> +				if (cpd->cpd_state == CPD_STATE_JOIN_COMPLETED ||
> +					cpd->cpd_state == CPD_STATE_LEAVE_STARTED) {
> +
> +					api->ipc_dispatch_send (cpd->conn, buf, size);
> +				}
> +				if (left_list_entries) {
> +					if (left_list[0].pid == cpd->pid &&
> +						left_list[0].nodeid == api->totem_nodeid_get() &&
> +						left_list[0].reason == CONFCHG_CPG_REASON_LEAVE) {
> +
> +						cpd->pid = 0;
> +						memset (&cpd->group_name, 0, sizeof(cpd->group_name));
> +						cpd->cpd_state = CPD_STATE_UNJOINED;
> +					}
> +				}
> +			}
> +		}
> +	}
> +
> +
> +	/*
> +	 * Traverse thru cpds and send totem membership for cpd, where it is not send yet
> +	 */
> +	for (iter = cpg_pd_list_head.next; iter != &cpg_pd_list_head; iter = iter->next) {
> +		struct cpg_pd *cpd = list_entry (iter, struct cpg_pd, list);
> +
> +		if ((cpd->flags & CPG_MODEL_V1_DELIVER_INITIAL_TOTEM_CONF) && (cpd->initial_totem_conf_sent == 0)) {
> +			cpd->initial_totem_conf_sent = 1;
> +
> +			notify_lib_totem_membership (cpd->conn, my_old_member_list_entries, my_old_member_list);
> +		}
> +	}
> +
> +	return CS_OK;
> +}
> +
> +static void downlist_log(const char *msg, struct downlist_msg* dl)
> +{
> +	log_printf (LOG_DEBUG,
> +		    "%s: sender %s; members(old:%d left:%d)",
> +		    msg,
> +		    api->totem_ifaces_print(dl->sender_nodeid),
> +		    dl->old_members,
> +		    dl->left_nodes);
> +}
> +
> +static struct downlist_msg* downlist_master_choose (void)
> +{
> +	struct downlist_msg *cmp;
> +	struct downlist_msg *best = NULL;
> +	struct list_head *iter;
> +	uint32_t cmp_members;
> +	uint32_t best_members;
> +
> +	for (iter = downlist_messages_head.next;
> +		iter != &downlist_messages_head;
> +		iter = iter->next) {
> +
> +		cmp = list_entry(iter, struct downlist_msg, list);
> +		downlist_log("comparing", cmp);
> +		if (best == NULL) {
> +			best = cmp;
> +			continue;
> +		}
> +		best_members = best->old_members - best->left_nodes;
> +		cmp_members = cmp->old_members - cmp->left_nodes;
> +
> +		if (cmp_members < best_members) {
> +			continue;
> +		}
> +		else if (cmp_members > best_members) {
> +			best = cmp;
> +		}
> +		else if (cmp->sender_nodeid < best->sender_nodeid) {
> +			best = cmp;
> +		}
> +
> +	}
> +	return best;
> +}
> +
> +static void downlist_master_choose_and_send (void)
> +{
> +	struct downlist_msg *stored_msg;
> +	struct list_head *iter;
> +	struct process_info *left_pi;
> +	qb_map_t *group_map;
> +	struct cpg_name cpg_group;
> +	mar_cpg_name_t group;
> +	struct confchg_data{
> +		struct cpg_name cpg_group;
> +		mar_cpg_address_t left_list[CPG_MEMBERS_MAX];
> +		int left_list_entries;
> +		struct list_head  list;
> +	} *pcd;
> +	qb_map_iter_t *miter;
> +	int i, size;
> +
> +	downlist_state = CPG_DOWNLIST_APPLYING;
> +
> +	stored_msg = downlist_master_choose ();
> +	if (!stored_msg) {
> +		log_printf (LOGSYS_LEVEL_DEBUG, "NO chosen downlist");
> +		return;
> +	}
> +	downlist_log("chosen downlist", stored_msg);
> +
> +	group_map = qb_skiplist_create();
> +
> +	/*
> +	 * only the cpg groups included in left nodes should receive
> +	 * confchg event, so we will collect these cpg groups and
> +	 * relative left_lists here.
> +	 */
> +	for (iter = process_info_list_head.next; iter != &process_info_list_head; ) {
> +		struct process_info *pi = list_entry(iter, struct process_info, list);
> +		iter = iter->next;
> +
> +		left_pi = NULL;
> +		for (i = 0; i < stored_msg->left_nodes; i++) {
> +
> +			if (pi->nodeid == stored_msg->nodeids[i]) {
> +				left_pi = pi;
> +				break;
> +			}
> +		}
> +
> +		if (left_pi) {
> +			marshall_from_mar_cpg_name_t(&cpg_group, &left_pi->group);
> +			cpg_group.value[cpg_group.length] = 0;
> +
> +			pcd = (struct confchg_data *)qb_map_get(group_map, cpg_group.value);
> +			if (pcd == NULL) {
> +				pcd = (struct confchg_data *)calloc(1, sizeof(struct confchg_data));
> +				memcpy(&pcd->cpg_group, &cpg_group, sizeof(struct cpg_name));
> +				qb_map_put(group_map, pcd->cpg_group.value, pcd);
> +			}
> +			size = pcd->left_list_entries;
> +			pcd->left_list[size].nodeid = left_pi->nodeid;
> +			pcd->left_list[size].pid = left_pi->pid;
> +			pcd->left_list[size].reason = CONFCHG_CPG_REASON_NODEDOWN;
> +			pcd->left_list_entries++;
> +			list_del (&left_pi->list);
> +			free (left_pi);
> +		}
> +	}
> +
> +	/* send only one confchg event per cpg group */
> +	miter = qb_map_iter_create(group_map);
> +	while (qb_map_iter_next(miter, (void **)&pcd)) {
> +		marshall_to_mar_cpg_name_t(&group, &pcd->cpg_group);
> +
> +		log_printf (LOG_DEBUG, "left_list_entries:%d", pcd->left_list_entries);
> +		for (i=0; i<pcd->left_list_entries; i++) {
> +			log_printf (LOG_DEBUG, "left_list[%d] group:%d, ip:%s, pid:%d",
> +				i, pcd->cpg_group.value,
> +				(char*)api->totem_ifaces_print(pcd->left_list[i].nodeid),
> +				pcd->left_list[i].pid);
> +		}
> +
> +		/* send confchg event */
> +		notify_lib_joinlist(&group, NULL,
> +			0, NULL,
> +			pcd->left_list_entries,
> +			pcd->left_list,
> +			MESSAGE_RES_CPG_CONFCHG_CALLBACK);
> +
> +		free(pcd);
> +	}
> +	qb_map_iter_free(miter);
> +	qb_map_destroy(group_map);
> +}
> +
> +static void downlist_messages_delete (void)
> +{
> +	struct downlist_msg *stored_msg;
> +	struct list_head *iter, *iter_next;
> +
> +	for (iter = downlist_messages_head.next;
> +		iter != &downlist_messages_head;
> +		iter = iter_next) {
> +
> +		iter_next = iter->next;
> +
> +		stored_msg = list_entry(iter, struct downlist_msg, list);
> +		list_del (&stored_msg->list);
> +		free (stored_msg);
> +	}
> +}
> +
> +
> +static int cpg_exec_init_fn (struct corosync_api_v1 *corosync_api)
> +{
> +#ifdef COROSYNC_SOLARIS
> +	logsys_subsys_init();
> +#endif
> +	list_init (&downlist_messages_head);
> +	api = corosync_api;
> +	return (0);
> +}
> +
> +static void cpg_iteration_instance_finalize (struct cpg_iteration_instance *cpg_iteration_instance)
> +{
> +	struct list_head *iter, *iter_next;
> +	struct process_info *pi;
> +
> +	for (iter = cpg_iteration_instance->items_list_head.next;
> +		iter != &cpg_iteration_instance->items_list_head;
> +		iter = iter_next) {
> +
> +		iter_next = iter->next;
> +
> +		pi = list_entry (iter, struct process_info, list);
> +		list_del (&pi->list);
> +		free (pi);
> +	}
> +
> +	list_del (&cpg_iteration_instance->list);
> +	hdb_handle_destroy (&cpg_iteration_handle_t_db, cpg_iteration_instance->handle);
> +}
> +
> +static void cpg_pd_finalize (struct cpg_pd *cpd)
> +{
> +	struct list_head *iter, *iter_next;
> +	struct cpg_iteration_instance *cpii;
> +
> +	zcb_all_free(cpd);
> +	for (iter = cpd->iteration_instance_list_head.next;
> +		iter != &cpd->iteration_instance_list_head;
> +		iter = iter_next) {
> +
> +		iter_next = iter->next;
> +
> +		cpii = list_entry (iter, struct cpg_iteration_instance, list);
> +
> +		cpg_iteration_instance_finalize (cpii);
> +	}
> +
> +	list_del (&cpd->list);
> +}
> +
> +static int cpg_lib_exit_fn (void *conn)
> +{
> +	struct cpg_pd *cpd = (struct cpg_pd *)api->ipc_private_data_get (conn);
> +
> +	log_printf(LOGSYS_LEVEL_DEBUG, "exit_fn for conn=%p\n", conn);
> +
> +	if (cpd->group_name.length > 0) {
> +		cpg_node_joinleave_send (cpd->pid, &cpd->group_name,
> +				MESSAGE_REQ_EXEC_CPG_PROCLEAVE, CONFCHG_CPG_REASON_PROCDOWN);
> +	}
> +
> +	cpg_pd_finalize (cpd);
> +
> +	api->ipc_refcnt_dec (conn);
> +	return (0);
> +}
> +
> +static int cpg_node_joinleave_send (unsigned int pid, const mar_cpg_name_t *group_name, int fn, int reason)
> +{
> +	struct req_exec_cpg_procjoin req_exec_cpg_procjoin;
> +	struct iovec req_exec_cpg_iovec;
> +	int result;
> +
> +	memcpy(&req_exec_cpg_procjoin.group_name, group_name, sizeof(mar_cpg_name_t));
> +	req_exec_cpg_procjoin.pid = pid;
> +	req_exec_cpg_procjoin.reason = reason;
> +
> +	req_exec_cpg_procjoin.header.size = sizeof(req_exec_cpg_procjoin);
> +	req_exec_cpg_procjoin.header.id = SERVICE_ID_MAKE(CPG_SERVICE, fn);
> +
> +	req_exec_cpg_iovec.iov_base = (char *)&req_exec_cpg_procjoin;
> +	req_exec_cpg_iovec.iov_len = sizeof(req_exec_cpg_procjoin);
> +
> +	result = api->totem_mcast (&req_exec_cpg_iovec, 1, TOTEM_AGREED);
> +
> +	return (result);
> +}
> +
> +/* Can byteswap join & leave messages */
> +static void exec_cpg_procjoin_endian_convert (void *msg)
> +{
> +	struct req_exec_cpg_procjoin *req_exec_cpg_procjoin = msg;
> +
> +	req_exec_cpg_procjoin->pid = swab32(req_exec_cpg_procjoin->pid);
> +	swab_mar_cpg_name_t (&req_exec_cpg_procjoin->group_name);
> +	req_exec_cpg_procjoin->reason = swab32(req_exec_cpg_procjoin->reason);
> +}
> +
> +static void exec_cpg_joinlist_endian_convert (void *msg_v)
> +{
> +	char *msg = msg_v;
> +	struct qb_ipc_response_header *res = (struct qb_ipc_response_header *)msg;
> +	struct join_list_entry *jle = (struct join_list_entry *)(msg + sizeof(struct qb_ipc_response_header));
> +
> +	swab_mar_int32_t (&res->size);
> +
> +	while ((const char*)jle < msg + res->size) {
> +		jle->pid = swab32(jle->pid);
> +		swab_mar_cpg_name_t (&jle->group_name);
> +		jle++;
> +	}
> +}
> +
> +static void exec_cpg_downlist_endian_convert_old (void *msg)
> +{
> +}
> +
> +static void exec_cpg_downlist_endian_convert (void *msg)
> +{
> +	struct req_exec_cpg_downlist *req_exec_cpg_downlist = msg;
> +	unsigned int i;
> +
> +	req_exec_cpg_downlist->left_nodes = swab32(req_exec_cpg_downlist->left_nodes);
> +	req_exec_cpg_downlist->old_members = swab32(req_exec_cpg_downlist->old_members);
> +
> +	for (i = 0; i < req_exec_cpg_downlist->left_nodes; i++) {
> +		req_exec_cpg_downlist->nodeids[i] = swab32(req_exec_cpg_downlist->nodeids[i]);
> +	}
> +}
> +
> +
> +static void exec_cpg_mcast_endian_convert (void *msg)
> +{
> +	struct req_exec_cpg_mcast *req_exec_cpg_mcast = msg;
> +
> +	swab_coroipc_request_header_t (&req_exec_cpg_mcast->header);
> +	swab_mar_cpg_name_t (&req_exec_cpg_mcast->group_name);
> +	req_exec_cpg_mcast->pid = swab32(req_exec_cpg_mcast->pid);
> +	req_exec_cpg_mcast->msglen = swab32(req_exec_cpg_mcast->msglen);
> +	swab_mar_message_source_t (&req_exec_cpg_mcast->source);
> +}
> +
> +static struct process_info *process_info_find(const mar_cpg_name_t *group_name, uint32_t pid, unsigned int nodeid) {
> +	struct list_head *iter;
> +
> +	for (iter = process_info_list_head.next; iter != &process_info_list_head; ) {
> +		struct process_info *pi = list_entry (iter, struct process_info, list);
> +		iter = iter->next;
> +
> +		if (pi->pid == pid && pi->nodeid == nodeid &&
> +			mar_name_compare (&pi->group, group_name) == 0) {
> +				return pi;
> +		}
> +	}
> +
> +	return NULL;
> +}
> +
> +static void do_proc_join(
> +	const mar_cpg_name_t *name,
> +	uint32_t pid,
> +	unsigned int nodeid,
> +	int reason)
> +{
> +	struct process_info *pi;
> +	struct process_info *pi_entry;
> +	mar_cpg_address_t notify_info;
> +	struct list_head *list;
> +	struct list_head *list_to_add = NULL;
> +
> +	if (process_info_find (name, pid, nodeid) != NULL) {
> +		return ;
> + 	}
> +	pi = malloc (sizeof (struct process_info));
> +	if (!pi) {
> +		log_printf(LOGSYS_LEVEL_WARNING, "Unable to allocate process_info struct");
> +		return;
> +	}
> +	pi->nodeid = nodeid;
> +	pi->pid = pid;
> +	memcpy(&pi->group, name, sizeof(*name));
> +	list_init(&pi->list);
> +
> +	/*
> +	 * Insert new process in sorted order so synchronization works properly
> +	 */
> +	list_to_add = &process_info_list_head;
> +	for (list = process_info_list_head.next; list != &process_info_list_head; list = list->next) {
> +
> +		pi_entry = list_entry(list, struct process_info, list);
> +		if (pi_entry->nodeid > pi->nodeid ||
> +			(pi_entry->nodeid == pi->nodeid && pi_entry->pid > pi->pid)) {
> +
> +			break;
> +		}
> +		list_to_add = list;
> +	}
> +	list_add (&pi->list, list_to_add);
> +
> +	notify_info.pid = pi->pid;
> +	notify_info.nodeid = nodeid;
> +	notify_info.reason = reason;
> +
> +	notify_lib_joinlist(&pi->group, NULL,
> +			    1, &notify_info,
> +			    0, NULL,
> +			    MESSAGE_RES_CPG_CONFCHG_CALLBACK);
> +}
> +
> +static void message_handler_req_exec_cpg_downlist_old (
> +	const void *message,
> +	unsigned int nodeid)
> +{
> +	log_printf (LOGSYS_LEVEL_WARNING, "downlist OLD from node %d",
> +		nodeid);
> +}
> +
> +static void message_handler_req_exec_cpg_downlist(
> +	const void *message,
> +	unsigned int nodeid)
> +{
> +	const struct req_exec_cpg_downlist *req_exec_cpg_downlist = message;
> +	int i;
> +	struct list_head *iter;
> +	struct downlist_msg *stored_msg;
> +	int found;
> +
> +	if (downlist_state != CPG_DOWNLIST_WAITING_FOR_MESSAGES) {
> +		log_printf (LOGSYS_LEVEL_WARNING, "downlist left_list: %d received in state %d",
> +			req_exec_cpg_downlist->left_nodes, downlist_state);
> +		return;
> +	}
> +
> +	stored_msg = malloc (sizeof (struct downlist_msg));
> +	stored_msg->sender_nodeid = nodeid;
> +	stored_msg->old_members = req_exec_cpg_downlist->old_members;
> +	stored_msg->left_nodes = req_exec_cpg_downlist->left_nodes;
> +	memcpy (stored_msg->nodeids, req_exec_cpg_downlist->nodeids,
> +		req_exec_cpg_downlist->left_nodes * sizeof (mar_uint32_t));
> +	list_init (&stored_msg->list);
> +	list_add (&stored_msg->list, &downlist_messages_head);
> +
> +	for (i = 0; i < my_member_list_entries; i++) {
> +		found = 0;
> +		for (iter = downlist_messages_head.next;
> +			iter != &downlist_messages_head;
> +			iter = iter->next) {
> +
> +			stored_msg = list_entry(iter, struct downlist_msg, list);
> +			if (my_member_list[i] == stored_msg->sender_nodeid) {
> +				found = 1;
> +			}
> +		}
> +		if (!found) {
> +			return;
> +		}
> +	}
> +
> +	downlist_master_choose_and_send ();
> +}
> +
> +
> +static void message_handler_req_exec_cpg_procjoin (
> +	const void *message,
> +	unsigned int nodeid)
> +{
> +	const struct req_exec_cpg_procjoin *req_exec_cpg_procjoin = message;
> +
> +	log_printf(LOGSYS_LEVEL_DEBUG, "got procjoin message from cluster node %d\n", nodeid);
> +
> +	do_proc_join (&req_exec_cpg_procjoin->group_name,
> +		req_exec_cpg_procjoin->pid, nodeid,
> +		CONFCHG_CPG_REASON_JOIN);
> +}
> +
> +static void message_handler_req_exec_cpg_procleave (
> +	const void *message,
> +	unsigned int nodeid)
> +{
> +	const struct req_exec_cpg_procjoin *req_exec_cpg_procjoin = message;
> +	struct process_info *pi;
> +	struct list_head *iter;
> +	mar_cpg_address_t notify_info;
> +
> +	log_printf(LOGSYS_LEVEL_DEBUG, "got procleave message from cluster node %d\n", nodeid);
> +
> +	notify_info.pid = req_exec_cpg_procjoin->pid;
> +	notify_info.nodeid = nodeid;
> +	notify_info.reason = req_exec_cpg_procjoin->reason;
> +
> +	notify_lib_joinlist(&req_exec_cpg_procjoin->group_name, NULL,
> +		0, NULL,
> +		1, &notify_info,
> +		MESSAGE_RES_CPG_CONFCHG_CALLBACK);
> +
> +	for (iter = process_info_list_head.next; iter != &process_info_list_head; ) {
> +		pi = list_entry(iter, struct process_info, list);
> +		iter = iter->next;
> +
> +		if (pi->pid == req_exec_cpg_procjoin->pid && pi->nodeid == nodeid &&
> +			mar_name_compare (&pi->group, &req_exec_cpg_procjoin->group_name)==0) {
> +			list_del (&pi->list);
> +			free (pi);
> +		}
> +	}
> +}
> +
> +
> +/* Got a proclist from another node */
> +static void message_handler_req_exec_cpg_joinlist (
> +	const void *message_v,
> +	unsigned int nodeid)
> +{
> +	const char *message = message_v;
> +	const struct qb_ipc_response_header *res = (const struct qb_ipc_response_header *)message;
> +	const struct join_list_entry *jle = (const struct join_list_entry *)(message + sizeof(struct qb_ipc_response_header));
> +
> +	log_printf(LOGSYS_LEVEL_DEBUG, "got joinlist message from node %x\n",
> +		nodeid);
> +
> +	/* Ignore our own messages */
> +	if (nodeid == api->totem_nodeid_get()) {
> +		return;
> +	}
> +
> +	while ((const char*)jle < message + res->size) {
> +		do_proc_join (&jle->group_name, jle->pid, nodeid,
> +			CONFCHG_CPG_REASON_NODEUP);
> +		jle++;
> +	}
> +}
> +
> +static void message_handler_req_exec_cpg_mcast (
> +	const void *message,
> +	unsigned int nodeid)
> +{
> +	const struct req_exec_cpg_mcast *req_exec_cpg_mcast = message;
> +	struct res_lib_cpg_deliver_callback res_lib_cpg_mcast;
> +	int msglen = req_exec_cpg_mcast->msglen;
> +	struct list_head *iter, *pi_iter;
> +	struct cpg_pd *cpd;
> +	struct iovec iovec[2];
> +	int known_node = 0;
> +
> +	res_lib_cpg_mcast.header.id = MESSAGE_RES_CPG_DELIVER_CALLBACK;
> +	res_lib_cpg_mcast.header.size = sizeof(res_lib_cpg_mcast) + msglen;
> +	res_lib_cpg_mcast.msglen = msglen;
> +	res_lib_cpg_mcast.pid = req_exec_cpg_mcast->pid;
> +	res_lib_cpg_mcast.nodeid = nodeid;
> +
> +	memcpy(&res_lib_cpg_mcast.group_name, &req_exec_cpg_mcast->group_name,
> +		sizeof(mar_cpg_name_t));
> +	iovec[0].iov_base = (void *)&res_lib_cpg_mcast;
> +	iovec[0].iov_len = sizeof (res_lib_cpg_mcast);
> +
> +	iovec[1].iov_base = (char*)message+sizeof(*req_exec_cpg_mcast);
> +	iovec[1].iov_len = msglen;
> +
> +	for (iter = cpg_pd_list_head.next; iter != &cpg_pd_list_head; ) {
> +		cpd = list_entry(iter, struct cpg_pd, list);
> +		iter = iter->next;
> +
> +		if ((cpd->cpd_state == CPD_STATE_LEAVE_STARTED || cpd->cpd_state == CPD_STATE_JOIN_COMPLETED)
> +			&& (mar_name_compare (&cpd->group_name, &req_exec_cpg_mcast->group_name) == 0)) {
> +
> +			if (!known_node) {
> +				/* Try to find, if we know the node */
> +				for (pi_iter = process_info_list_head.next;
> +					pi_iter != &process_info_list_head; pi_iter = pi_iter->next) {
> +
> +					struct process_info *pi = list_entry (pi_iter, struct process_info, list);
> +
> +					if (pi->nodeid == nodeid &&
> +						mar_name_compare (&pi->group, &req_exec_cpg_mcast->group_name) == 0) {
> +						known_node = 1;
> +						break;
> +					}
> +				}
> +			}
> +
> +			if (!known_node) {
> +				log_printf(LOGSYS_LEVEL_WARNING, "Unknown node -> we will not deliver message");
> +				return ;
> +			}
> +
> +			api->ipc_dispatch_iov_send (cpd->conn, iovec, 2);
> +		}
> +	}
> +}
> +
> +
> +static int cpg_exec_send_downlist(void)
> +{
> +	struct iovec iov;
> +
> +	g_req_exec_cpg_downlist.header.id = SERVICE_ID_MAKE(CPG_SERVICE, MESSAGE_REQ_EXEC_CPG_DOWNLIST);
> +	g_req_exec_cpg_downlist.header.size = sizeof(struct req_exec_cpg_downlist);
> +
> +	g_req_exec_cpg_downlist.old_members = my_old_member_list_entries;
> +
> +	iov.iov_base = (void *)&g_req_exec_cpg_downlist;
> +	iov.iov_len = g_req_exec_cpg_downlist.header.size;
> +
> +	return (api->totem_mcast (&iov, 1, TOTEM_AGREED));
> +}
> +
> +static int cpg_exec_send_joinlist(void)
> +{
> +	int count = 0;
> +	struct list_head *iter;
> +	struct qb_ipc_response_header *res;
> + 	char *buf;
> +	struct join_list_entry *jle;
> +	struct iovec req_exec_cpg_iovec;
> +
> + 	for (iter = process_info_list_head.next; iter != &process_info_list_head; iter = iter->next) {
> + 		struct process_info *pi = list_entry (iter, struct process_info, list);
> +
> + 		if (pi->nodeid == api->totem_nodeid_get ()) {
> + 			count++;
> +		}
> +	}
> +
> +	/* Nothing to send */
> +	if (!count)
> +		return 0;
> +
> +	buf = alloca(sizeof(struct qb_ipc_response_header) + sizeof(struct join_list_entry) * count);
> +	if (!buf) {
> +		log_printf(LOGSYS_LEVEL_WARNING, "Unable to allocate joinlist buffer");
> +		return -1;
> +	}
> +
> +	jle = (struct join_list_entry *)(buf + sizeof(struct qb_ipc_response_header));
> +	res = (struct qb_ipc_response_header *)buf;
> +
> + 	for (iter = process_info_list_head.next; iter != &process_info_list_head; iter = iter->next) {
> + 		struct process_info *pi = list_entry (iter, struct process_info, list);
> +
> +		if (pi->nodeid == api->totem_nodeid_get ()) {
> +			memcpy (&jle->group_name, &pi->group, sizeof (mar_cpg_name_t));
> +			jle->pid = pi->pid;
> +			jle++;
> +		}
> +	}
> +
> +	res->id = SERVICE_ID_MAKE(CPG_SERVICE, MESSAGE_REQ_EXEC_CPG_JOINLIST);
> +	res->size = sizeof(struct qb_ipc_response_header)+sizeof(struct join_list_entry) * count;
> +
> +	req_exec_cpg_iovec.iov_base = buf;
> +	req_exec_cpg_iovec.iov_len = res->size;
> +
> +	return (api->totem_mcast (&req_exec_cpg_iovec, 1, TOTEM_AGREED));
> +}
> +
> +static int cpg_lib_init_fn (void *conn)
> +{
> +	struct cpg_pd *cpd = (struct cpg_pd *)api->ipc_private_data_get (conn);
> +	memset (cpd, 0, sizeof(struct cpg_pd));
> +	cpd->conn = conn;
> +	list_add (&cpd->list, &cpg_pd_list_head);
> +
> +	list_init (&cpd->iteration_instance_list_head);
> +	list_init (&cpd->zcb_mapped_list_head);
> +
> +	api->ipc_refcnt_inc (conn);
> +	log_printf(LOGSYS_LEVEL_DEBUG, "lib_init_fn: conn=%p, cpd=%p\n", conn, cpd);
> +	return (0);
> +}
> +
> +/* Join message from the library */
> +static void message_handler_req_lib_cpg_join (void *conn, const void *message)
> +{
> +	const struct req_lib_cpg_join *req_lib_cpg_join = message;
> +	struct cpg_pd *cpd = (struct cpg_pd *)api->ipc_private_data_get (conn);
> +	struct res_lib_cpg_join res_lib_cpg_join;
> +	cs_error_t error = CS_OK;
> +	struct list_head *iter;
> +
> +	/* Test, if we don't have same pid and group name joined */
> +	for (iter = cpg_pd_list_head.next; iter != &cpg_pd_list_head; iter = iter->next) {
> +		struct cpg_pd *cpd_item = list_entry (iter, struct cpg_pd, list);
> +
> +		if (cpd_item->pid == req_lib_cpg_join->pid &&
> +			mar_name_compare(&req_lib_cpg_join->group_name, &cpd_item->group_name) == 0) {
> +
> +			/* We have same pid and group name joined -> return error */
> +			error = CS_ERR_EXIST;
> +			goto response_send;
> +		}
> +	}
> +
> +	/*
> +	 * Same check must be done in process info list, because there may be not yet delivered
> +	 * leave of client.
> +	 */
> +	for (iter = process_info_list_head.next; iter != &process_info_list_head; iter = iter->next) {
> +		struct process_info *pi = list_entry (iter, struct process_info, list);
> +
> +		if (pi->nodeid == api->totem_nodeid_get () && pi->pid == req_lib_cpg_join->pid &&
> +		    mar_name_compare(&req_lib_cpg_join->group_name, &pi->group) == 0) {
> +			/* We have same pid and group name joined -> return error */
> +			error = CS_ERR_TRY_AGAIN;
> +			goto response_send;
> +		}
> +	}
> +
> +	switch (cpd->cpd_state) {
> +	case CPD_STATE_UNJOINED:
> +		error = CS_OK;
> +		cpd->cpd_state = CPD_STATE_JOIN_STARTED;
> +		cpd->pid = req_lib_cpg_join->pid;
> +		cpd->flags = req_lib_cpg_join->flags;
> +		memcpy (&cpd->group_name, &req_lib_cpg_join->group_name,
> +			sizeof (cpd->group_name));
> +
> +		cpg_node_joinleave_send (req_lib_cpg_join->pid,
> +			&req_lib_cpg_join->group_name,
> +			MESSAGE_REQ_EXEC_CPG_PROCJOIN, CONFCHG_CPG_REASON_JOIN);
> +		break;
> +	case CPD_STATE_LEAVE_STARTED:
> +		error = CS_ERR_BUSY;
> +		break;
> +	case CPD_STATE_JOIN_STARTED:
> +		error = CS_ERR_EXIST;
> +		break;
> +	case CPD_STATE_JOIN_COMPLETED:
> +		error = CS_ERR_EXIST;
> +		break;
> +	}
> +
> +response_send:
> +	res_lib_cpg_join.header.size = sizeof(res_lib_cpg_join);
> +        res_lib_cpg_join.header.id = MESSAGE_RES_CPG_JOIN;
> +        res_lib_cpg_join.header.error = error;
> +        api->ipc_response_send (conn, &res_lib_cpg_join, sizeof(res_lib_cpg_join));
> +}
> +
> +/* Leave message from the library */
> +static void message_handler_req_lib_cpg_leave (void *conn, const void *message)
> +{
> +	struct res_lib_cpg_leave res_lib_cpg_leave;
> +	cs_error_t error = CS_OK;
> +	struct req_lib_cpg_leave  *req_lib_cpg_leave = (struct req_lib_cpg_leave *)message;
> +	struct cpg_pd *cpd = (struct cpg_pd *)api->ipc_private_data_get (conn);
> +
> +	log_printf(LOGSYS_LEVEL_DEBUG, "got leave request on %p\n", conn);
> +
> +	switch (cpd->cpd_state) {
> +	case CPD_STATE_UNJOINED:
> +		error = CS_ERR_NOT_EXIST;
> +		break;
> +	case CPD_STATE_LEAVE_STARTED:
> +		error = CS_ERR_NOT_EXIST;
> +		break;
> +	case CPD_STATE_JOIN_STARTED:
> +		error = CS_ERR_BUSY;
> +		break;
> +	case CPD_STATE_JOIN_COMPLETED:
> +		error = CS_OK;
> +		cpd->cpd_state = CPD_STATE_LEAVE_STARTED;
> +		cpg_node_joinleave_send (req_lib_cpg_leave->pid,
> +			&req_lib_cpg_leave->group_name,
> +			MESSAGE_REQ_EXEC_CPG_PROCLEAVE,
> +			CONFCHG_CPG_REASON_LEAVE);
> +		break;
> +	}
> +
> +	/* send return */
> +	res_lib_cpg_leave.header.size = sizeof(res_lib_cpg_leave);
> +	res_lib_cpg_leave.header.id = MESSAGE_RES_CPG_LEAVE;
> +	res_lib_cpg_leave.header.error = error;
> +	api->ipc_response_send(conn, &res_lib_cpg_leave, sizeof(res_lib_cpg_leave));
> +}
> +
> +/* Finalize message from library */
> +static void message_handler_req_lib_cpg_finalize (
> +	void *conn,
> +	const void *message)
> +{
> +	struct cpg_pd *cpd = (struct cpg_pd *)api->ipc_private_data_get (conn);
> +	struct res_lib_cpg_finalize res_lib_cpg_finalize;
> +	cs_error_t error = CS_OK;
> +
> +	log_printf (LOGSYS_LEVEL_DEBUG, "cpg finalize for conn=%p\n", conn);
> +
> +	/*
> +	 * We will just remove cpd from list. After this call, connection will be
> +	 * closed on lib side, and cpg_lib_exit_fn will be called
> +	 */
> +	list_del (&cpd->list);
> +	list_init (&cpd->list);
> +
> +	res_lib_cpg_finalize.header.size = sizeof (res_lib_cpg_finalize);
> +	res_lib_cpg_finalize.header.id = MESSAGE_RES_CPG_FINALIZE;
> +	res_lib_cpg_finalize.header.error = error;
> +
> +	api->ipc_response_send (conn, &res_lib_cpg_finalize,
> +		sizeof (res_lib_cpg_finalize));
> +}
> +
> +static int
> +memory_map (
> +	const char *path,
> +	size_t bytes,
> +	void **buf)
> +{
> +	int32_t fd;
> +	void *addr_orig;
> +	void *addr;
> +	int32_t res;
> +
> +	fd = open (path, O_RDWR, 0600);
> +
> +	unlink (path);
> +
> +	if (fd == -1) {
> +		return (-1);
> +	}
> +
> +	res = ftruncate (fd, bytes);
> +	if (res == -1) {
> +		goto error_close_unlink;
> +	}
> +
> +	addr_orig = mmap (NULL, bytes, PROT_NONE,
> +		MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
> +
> +	if (addr_orig == MAP_FAILED) {
> +		goto error_close_unlink;
> +	}
> +
> +	addr = mmap (addr_orig, bytes, PROT_READ | PROT_WRITE,
> +		MAP_FIXED | MAP_SHARED, fd, 0);
> +
> +	if (addr != addr_orig) {
> +		munmap(addr_orig, bytes);
> +		goto error_close_unlink;
> +	}
> +#ifdef COROSYNC_BSD
> +	madvise(addr, bytes, MADV_NOSYNC);
> +#endif
> +
> +	res = close (fd);
> +	if (res) {
> +		return (-1);
> +	}
> +	*buf = addr_orig;
> +	return (0);
> +
> +error_close_unlink:
> +	close (fd);
> +	unlink(path);
> +	return -1;
> +}
> +
> +static inline int zcb_alloc (
> +	struct cpg_pd *cpd,
> +	const char *path_to_file,
> +	size_t size,
> +	void **addr)
> +{
> +	struct zcb_mapped *zcb_mapped;
> +	unsigned int res;
> +
> +	zcb_mapped = malloc (sizeof (struct zcb_mapped));
> +	if (zcb_mapped == NULL) {
> +		return (-1);
> +	}
> +
> +	res = memory_map (
> +		path_to_file,
> +		size,
> +		addr);
> +	if (res == -1) {
> +		free (zcb_mapped);
> +		return (-1);
> +	}
> +
> +	list_init (&zcb_mapped->list);
> +	zcb_mapped->addr = *addr;
> +	zcb_mapped->size = size;
> +	list_add_tail (&zcb_mapped->list, &cpd->zcb_mapped_list_head);
> +	return (0);
> +}
> +
> +
> +static inline int zcb_free (struct zcb_mapped *zcb_mapped)
> +{
> +	unsigned int res;
> +
> +	res = munmap (zcb_mapped->addr, zcb_mapped->size);
> +	list_del (&zcb_mapped->list);
> +	free (zcb_mapped);
> +	return (res);
> +}
> +
> +static inline int zcb_by_addr_free (struct cpg_pd *cpd, void *addr)
> +{
> +	struct list_head *list;
> +	struct zcb_mapped *zcb_mapped;
> +	unsigned int res = 0;
> +
> +	for (list = cpd->zcb_mapped_list_head.next;
> +		list != &cpd->zcb_mapped_list_head; list = list->next) {
> +
> +		zcb_mapped = list_entry (list, struct zcb_mapped, list);
> +
> +		if (zcb_mapped->addr == addr) {
> +			res = zcb_free (zcb_mapped);
> +			break;
> +		}
> +
> +	}
> +	return (res);
> +}
> +
> +static inline int zcb_all_free (
> +	struct cpg_pd *cpd)
> +{
> +	struct list_head *list;
> +	struct zcb_mapped *zcb_mapped;
> +
> +	for (list = cpd->zcb_mapped_list_head.next;
> +		list != &cpd->zcb_mapped_list_head;) {
> +
> +		zcb_mapped = list_entry (list, struct zcb_mapped, list);
> +
> +		list = list->next;
> +
> +		zcb_free (zcb_mapped);
> +	}
> +	return (0);
> +}
> +
> +union u {
> +	uint64_t server_addr;
> +	void *server_ptr;
> +};
> +
> +static uint64_t void2serveraddr (void *server_ptr)
> +{
> +	union u u;
> +
> +	u.server_ptr = server_ptr;
> +	return (u.server_addr);
> +}
> +
> +static void *serveraddr2void (uint64_t server_addr)
> +{
> +	union u u;
> +
> +	u.server_addr = server_addr;
> +	return (u.server_ptr);
> +};
> +
> +static void message_handler_req_lib_cpg_zc_alloc (
> +	void *conn,
> +	const void *message)
> +{
> +	mar_req_coroipcc_zc_alloc_t *hdr = (mar_req_coroipcc_zc_alloc_t *)message;
> +	struct qb_ipc_response_header res_header;
> +	void *addr = NULL;
> +	struct coroipcs_zc_header *zc_header;
> +	unsigned int res;
> +	struct cpg_pd *cpd = (struct cpg_pd *)api->ipc_private_data_get (conn);
> +
> +	log_printf(LOGSYS_LEVEL_DEBUG, "path: %s", hdr->path_to_file);
> +
> +	res = zcb_alloc (cpd, hdr->path_to_file, hdr->map_size,
> +		&addr);
> +	assert(res == 0);
> +
> +	zc_header = (struct coroipcs_zc_header *)addr;
> +	zc_header->server_address = void2serveraddr(addr);
> +
> +	res_header.size = sizeof (struct qb_ipc_response_header);
> +	res_header.id = 0;
> +	api->ipc_response_send (conn,
> +		&res_header,
> +		res_header.size);
> +}
> +
> +static void message_handler_req_lib_cpg_zc_free (
> +	void *conn,
> +	const void *message)
> +{
> +	mar_req_coroipcc_zc_free_t *hdr = (mar_req_coroipcc_zc_free_t *)message;
> +	struct qb_ipc_response_header res_header;
> +	void *addr = NULL;
> +	struct cpg_pd *cpd = (struct cpg_pd *)api->ipc_private_data_get (conn);
> +
> +	log_printf(LOGSYS_LEVEL_DEBUG, " free'ing");
> +
> +	addr = serveraddr2void (hdr->server_address);
> +
> +	zcb_by_addr_free (cpd, addr);
> +
> +	res_header.size = sizeof (struct qb_ipc_response_header);
> +	res_header.id = 0;
> +	api->ipc_response_send (
> +		conn, &res_header,
> +		res_header.size);
> +}
> +
> +/* Mcast message from the library */
> +static void message_handler_req_lib_cpg_mcast (void *conn, const void *message)
> +{
> +	const struct req_lib_cpg_mcast *req_lib_cpg_mcast = message;
> +	struct cpg_pd *cpd = (struct cpg_pd *)api->ipc_private_data_get (conn);
> +	mar_cpg_name_t group_name = cpd->group_name;
> +
> +	struct iovec req_exec_cpg_iovec[2];
> +	struct req_exec_cpg_mcast req_exec_cpg_mcast;
> +	int msglen = req_lib_cpg_mcast->msglen;
> +	int result;
> +	cs_error_t error = CS_ERR_NOT_EXIST;
> +
> +	log_printf(LOGSYS_LEVEL_DEBUG, "got mcast request on %p\n", conn);
> +
> +	switch (cpd->cpd_state) {
> +	case CPD_STATE_UNJOINED:
> +		error = CS_ERR_NOT_EXIST;
> +		break;
> +	case CPD_STATE_LEAVE_STARTED:
> +		error = CS_ERR_NOT_EXIST;
> +		break;
> +	case CPD_STATE_JOIN_STARTED:
> +		error = CS_OK;
> +		break;
> +	case CPD_STATE_JOIN_COMPLETED:
> +		error = CS_OK;
> +		break;
> +	}
> +
> +	if (error == CS_OK) {
> +		req_exec_cpg_mcast.header.size = sizeof(req_exec_cpg_mcast) + msglen;
> +		req_exec_cpg_mcast.header.id = SERVICE_ID_MAKE(CPG_SERVICE,
> +			MESSAGE_REQ_EXEC_CPG_MCAST);
> +		req_exec_cpg_mcast.pid = cpd->pid;
> +		req_exec_cpg_mcast.msglen = msglen;
> +		api->ipc_source_set (&req_exec_cpg_mcast.source, conn);
> +		memcpy(&req_exec_cpg_mcast.group_name, &group_name,
> +			sizeof(mar_cpg_name_t));
> +
> +		req_exec_cpg_iovec[0].iov_base = (char *)&req_exec_cpg_mcast;
> +		req_exec_cpg_iovec[0].iov_len = sizeof(req_exec_cpg_mcast);
> +		req_exec_cpg_iovec[1].iov_base = (char *)&req_lib_cpg_mcast->message;
> +		req_exec_cpg_iovec[1].iov_len = msglen;
> +
> +		result = api->totem_mcast (req_exec_cpg_iovec, 2, TOTEM_AGREED);
> +		assert(result == 0);
> +	} else {
> +		log_printf(LOGSYS_LEVEL_ERROR, "*** %p can't mcast to group %s state:%d, error:%d\n",
> +			conn, group_name.value, cpd->cpd_state, error);
> +	}
> +}
> +
> +static void message_handler_req_lib_cpg_zc_execute (
> +	void *conn,
> +	const void *message)
> +{
> +	mar_req_coroipcc_zc_execute_t *hdr = (mar_req_coroipcc_zc_execute_t *)message;
> +	struct qb_ipc_request_header *header;
> +	struct res_lib_cpg_mcast res_lib_cpg_mcast;
> +	struct cpg_pd *cpd = (struct cpg_pd *)api->ipc_private_data_get (conn);
> +	struct iovec req_exec_cpg_iovec[2];
> +	struct req_exec_cpg_mcast req_exec_cpg_mcast;
> +	struct req_lib_cpg_mcast *req_lib_cpg_mcast;
> +	int result;
> +	cs_error_t error = CS_ERR_NOT_EXIST;
> +
> +	log_printf(LOGSYS_LEVEL_DEBUG, "got ZC mcast request on %p\n", conn);
> +
> +	header = (struct qb_ipc_request_header *)(((char *)serveraddr2void(hdr->server_address) + sizeof (struct coroipcs_zc_header)));
> +	req_lib_cpg_mcast = (struct req_lib_cpg_mcast *)header;
> +
> +	switch (cpd->cpd_state) {
> +	case CPD_STATE_UNJOINED:
> +		error = CS_ERR_NOT_EXIST;
> +		break;
> +	case CPD_STATE_LEAVE_STARTED:
> +		error = CS_ERR_NOT_EXIST;
> +		break;
> +	case CPD_STATE_JOIN_STARTED:
> +		error = CS_OK;
> +		break;
> +	case CPD_STATE_JOIN_COMPLETED:
> +		error = CS_OK;
> +		break;
> +	}
> +
> +	res_lib_cpg_mcast.header.size = sizeof(res_lib_cpg_mcast);
> +	res_lib_cpg_mcast.header.id = MESSAGE_RES_CPG_MCAST;
> +	if (error == CS_OK) {
> +		req_exec_cpg_mcast.header.size = sizeof(req_exec_cpg_mcast) + req_lib_cpg_mcast->msglen;
> +		req_exec_cpg_mcast.header.id = SERVICE_ID_MAKE(CPG_SERVICE,
> +			MESSAGE_REQ_EXEC_CPG_MCAST);
> +		req_exec_cpg_mcast.pid = cpd->pid;
> +		req_exec_cpg_mcast.msglen = req_lib_cpg_mcast->msglen;
> +		api->ipc_source_set (&req_exec_cpg_mcast.source, conn);
> +		memcpy(&req_exec_cpg_mcast.group_name, &cpd->group_name,
> +			sizeof(mar_cpg_name_t));
> +
> +		req_exec_cpg_iovec[0].iov_base = (char *)&req_exec_cpg_mcast;
> +		req_exec_cpg_iovec[0].iov_len = sizeof(req_exec_cpg_mcast);
> +		req_exec_cpg_iovec[1].iov_base = (char *)header + sizeof(struct req_lib_cpg_mcast);
> +		req_exec_cpg_iovec[1].iov_len = req_exec_cpg_mcast.msglen;
> +
> +		result = api->totem_mcast (req_exec_cpg_iovec, 2, TOTEM_AGREED);
> +		if (result == 0) {
> +			res_lib_cpg_mcast.header.error = CS_OK;
> +		} else {
> +			res_lib_cpg_mcast.header.error = CS_ERR_TRY_AGAIN;
> +		}
> +	} else {
> +		res_lib_cpg_mcast.header.error = error;
> +	}
> +
> +	api->ipc_response_send (conn, &res_lib_cpg_mcast,
> +		sizeof (res_lib_cpg_mcast));
> +
> +}
> +
> +static void message_handler_req_lib_cpg_membership (void *conn,
> +						    const void *message)
> +{
> +	struct req_lib_cpg_membership_get *req_lib_cpg_membership_get =
> +		(struct req_lib_cpg_membership_get *)message;
> +	struct res_lib_cpg_membership_get res_lib_cpg_membership_get;
> +	struct list_head *iter;
> +	int member_count = 0;
> +
> +	res_lib_cpg_membership_get.header.id = MESSAGE_RES_CPG_MEMBERSHIP;
> +	res_lib_cpg_membership_get.header.error = CS_OK;
> +	res_lib_cpg_membership_get.header.size =
> +		sizeof (struct req_lib_cpg_membership_get);
> +
> +	for (iter = process_info_list_head.next;
> +		iter != &process_info_list_head; iter = iter->next) {
> +
> +		struct process_info *pi = list_entry (iter, struct process_info, list);
> +		if (mar_name_compare (&pi->group, &req_lib_cpg_membership_get->group_name) == 0) {
> +			res_lib_cpg_membership_get.member_list[member_count].nodeid = pi->nodeid;
> +			res_lib_cpg_membership_get.member_list[member_count].pid = pi->pid;
> +			member_count += 1;
> +		}
> +	}
> +	res_lib_cpg_membership_get.member_count = member_count;
> +
> +	api->ipc_response_send (conn, &res_lib_cpg_membership_get,
> +		sizeof (res_lib_cpg_membership_get));
> +}
> +
> +static void message_handler_req_lib_cpg_local_get (void *conn,
> +						   const void *message)
> +{
> +	struct res_lib_cpg_local_get res_lib_cpg_local_get;
> +
> +	res_lib_cpg_local_get.header.size = sizeof (res_lib_cpg_local_get);
> +	res_lib_cpg_local_get.header.id = MESSAGE_RES_CPG_LOCAL_GET;
> +	res_lib_cpg_local_get.header.error = CS_OK;
> +	res_lib_cpg_local_get.local_nodeid = api->totem_nodeid_get ();
> +
> +	api->ipc_response_send (conn, &res_lib_cpg_local_get,
> +		sizeof (res_lib_cpg_local_get));
> +}
> +
> +static void message_handler_req_lib_cpg_iteration_initialize (
> +	void *conn,
> +	const void *message)
> +{
> +	const struct req_lib_cpg_iterationinitialize *req_lib_cpg_iterationinitialize = message;
> +	struct cpg_pd *cpd = (struct cpg_pd *)api->ipc_private_data_get (conn);
> +	hdb_handle_t cpg_iteration_handle = 0;
> +	struct res_lib_cpg_iterationinitialize res_lib_cpg_iterationinitialize;
> +	struct list_head *iter, *iter2;
> +	struct cpg_iteration_instance *cpg_iteration_instance;
> +	cs_error_t error = CS_OK;
> +	int res;
> +
> +	log_printf (LOGSYS_LEVEL_DEBUG, "cpg iteration initialize\n");
> +
> +	/* Because between calling this function and *next can be some operations which will
> +	 * change list, we must do full copy.
> +	 */
> +
> +	/*
> +	 * Create new iteration instance
> +	 */
> +	res = hdb_handle_create (&cpg_iteration_handle_t_db, sizeof (struct cpg_iteration_instance),
> +			&cpg_iteration_handle);
> +
> +	if (res != 0) {
> +		error = CS_ERR_NO_MEMORY;
> +		goto response_send;
> +	}
> +
> +	res = hdb_handle_get (&cpg_iteration_handle_t_db, cpg_iteration_handle, (void *)&cpg_iteration_instance);
> +
> +	if (res != 0) {
> +		error = CS_ERR_BAD_HANDLE;
> +		goto error_destroy;
> +	}
> +
> +	list_init (&cpg_iteration_instance->items_list_head);
> +	cpg_iteration_instance->handle = cpg_iteration_handle;
> +
> +	/*
> +	 * Create copy of process_info list "grouped by" group name
> +	 */
> +	for (iter = process_info_list_head.next; iter != &process_info_list_head; iter = iter->next) {
> +		struct process_info *pi = list_entry (iter, struct process_info, list);
> +		struct process_info *new_pi;
> +
> +		if (req_lib_cpg_iterationinitialize->iteration_type == CPG_ITERATION_NAME_ONLY) {
> +			/*
> +			 * Try to find processed group name in our list new list
> +			 */
> +			int found = 0;
> +
> +			for (iter2 = cpg_iteration_instance->items_list_head.next;
> +			     iter2 != &cpg_iteration_instance->items_list_head;
> +			     iter2 = iter2->next) {
> +				 struct process_info *pi2 = list_entry (iter2, struct process_info, list);
> +
> +				 if (mar_name_compare (&pi2->group, &pi->group) == 0) {
> +					found = 1;
> +					break;
> +				 }
> +			}
> +
> +			if (found) {
> +				/*
> +				 * We have this name in list -> don't add
> +				 */
> +				continue ;
> +			}
> +		} else if (req_lib_cpg_iterationinitialize->iteration_type == CPG_ITERATION_ONE_GROUP) {
> +			/*
> +			 * Test pi group name with request
> +			 */
> +			if (mar_name_compare (&pi->group, &req_lib_cpg_iterationinitialize->group_name) != 0)
> +				/*
> +				 * Not same -> don't add
> +				 */
> +				continue ;
> +		}
> +
> +		new_pi = malloc (sizeof (struct process_info));
> +		if (!new_pi) {
> +			log_printf(LOGSYS_LEVEL_WARNING, "Unable to allocate process_info struct");
> +
> +			error = CS_ERR_NO_MEMORY;
> +
> +			goto error_put_destroy;
> +		}
> +
> +		memcpy (new_pi, pi, sizeof (struct process_info));
> +		list_init (&new_pi->list);
> +
> +		if (req_lib_cpg_iterationinitialize->iteration_type == CPG_ITERATION_NAME_ONLY) {
> +			/*
> +			 * pid and nodeid -> undefined
> +			 */
> +			new_pi->pid = new_pi->nodeid = 0;
> +		}
> +
> +		/*
> +		 * We will return list "grouped" by "group name", so try to find right place to add
> +		 */
> +		for (iter2 = cpg_iteration_instance->items_list_head.next;
> +		     iter2 != &cpg_iteration_instance->items_list_head;
> +		     iter2 = iter2->next) {
> +			 struct process_info *pi2 = list_entry (iter2, struct process_info, list);
> +
> +			 if (mar_name_compare (&pi2->group, &pi->group) == 0) {
> +				break;
> +			 }
> +		}
> +
> +		list_add (&new_pi->list, iter2);
> +	}
> +
> +	/*
> +	 * Now we have a full "grouped by" copy of process_info list
> +	 */
> +
> +	/*
> +	 * Add instance to current cpd list
> +	 */
> +	list_init (&cpg_iteration_instance->list);
> +	list_add (&cpg_iteration_instance->list, &cpd->iteration_instance_list_head);
> +
> +	cpg_iteration_instance->current_pointer = &cpg_iteration_instance->items_list_head;
> +
> +error_put_destroy:
> +	hdb_handle_put (&cpg_iteration_handle_t_db, cpg_iteration_handle);
> +error_destroy:
> +	if (error != CS_OK) {
> +		hdb_handle_destroy (&cpg_iteration_handle_t_db, cpg_iteration_handle);
> +	}
> +
> +response_send:
> +	res_lib_cpg_iterationinitialize.header.size = sizeof (res_lib_cpg_iterationinitialize);
> +	res_lib_cpg_iterationinitialize.header.id = MESSAGE_RES_CPG_ITERATIONINITIALIZE;
> +	res_lib_cpg_iterationinitialize.header.error = error;
> +	res_lib_cpg_iterationinitialize.iteration_handle = cpg_iteration_handle;
> +
> +	api->ipc_response_send (conn, &res_lib_cpg_iterationinitialize,
> +		sizeof (res_lib_cpg_iterationinitialize));
> +}
> +
> +static void message_handler_req_lib_cpg_iteration_next (
> +	void *conn,
> +	const void *message)
> +{
> +	const struct req_lib_cpg_iterationnext *req_lib_cpg_iterationnext = message;
> +	struct res_lib_cpg_iterationnext res_lib_cpg_iterationnext;
> +	struct cpg_iteration_instance *cpg_iteration_instance;
> +	cs_error_t error = CS_OK;
> +	int res;
> +	struct process_info *pi;
> +
> +	log_printf (LOGSYS_LEVEL_DEBUG, "cpg iteration next\n");
> +
> +	res = hdb_handle_get (&cpg_iteration_handle_t_db,
> +			req_lib_cpg_iterationnext->iteration_handle,
> +			(void *)&cpg_iteration_instance);
> +
> +	if (res != 0) {
> +		error = CS_ERR_LIBRARY;
> +		goto error_exit;
> +	}
> +
> +	assert (cpg_iteration_instance);
> +
> +	cpg_iteration_instance->current_pointer = cpg_iteration_instance->current_pointer->next;
> +
> +	if (cpg_iteration_instance->current_pointer == &cpg_iteration_instance->items_list_head) {
> +		error = CS_ERR_NO_SECTIONS;
> +		goto error_put;
> +	}
> +
> +	pi = list_entry (cpg_iteration_instance->current_pointer, struct process_info, list);
> +
> +	/*
> +	 * Copy iteration data
> +	 */
> +	res_lib_cpg_iterationnext.description.nodeid = pi->nodeid;
> +	res_lib_cpg_iterationnext.description.pid = pi->pid;
> +	memcpy (&res_lib_cpg_iterationnext.description.group,
> +			&pi->group,
> +			sizeof (mar_cpg_name_t));
> +
> +error_put:
> +	hdb_handle_put (&cpg_iteration_handle_t_db, req_lib_cpg_iterationnext->iteration_handle);
> +error_exit:
> +	res_lib_cpg_iterationnext.header.size = sizeof (res_lib_cpg_iterationnext);
> +	res_lib_cpg_iterationnext.header.id = MESSAGE_RES_CPG_ITERATIONNEXT;
> +	res_lib_cpg_iterationnext.header.error = error;
> +
> +	api->ipc_response_send (conn, &res_lib_cpg_iterationnext,
> +		sizeof (res_lib_cpg_iterationnext));
> +}
> +
> +static void message_handler_req_lib_cpg_iteration_finalize (
> +	void *conn,
> +	const void *message)
> +{
> +	const struct req_lib_cpg_iterationfinalize *req_lib_cpg_iterationfinalize = message;
> +	struct res_lib_cpg_iterationfinalize res_lib_cpg_iterationfinalize;
> +	struct cpg_iteration_instance *cpg_iteration_instance;
> +	cs_error_t error = CS_OK;
> +	int res;
> +
> +	log_printf (LOGSYS_LEVEL_DEBUG, "cpg iteration finalize\n");
> +
> +	res = hdb_handle_get (&cpg_iteration_handle_t_db,
> +			req_lib_cpg_iterationfinalize->iteration_handle,
> +			(void *)&cpg_iteration_instance);
> +
> +	if (res != 0) {
> +		error = CS_ERR_LIBRARY;
> +		goto error_exit;
> +	}
> +
> +	assert (cpg_iteration_instance);
> +
> +	cpg_iteration_instance_finalize (cpg_iteration_instance);
> +	hdb_handle_put (&cpg_iteration_handle_t_db, cpg_iteration_instance->handle);
> +
> +error_exit:
> +	res_lib_cpg_iterationfinalize.header.size = sizeof (res_lib_cpg_iterationfinalize);
> +	res_lib_cpg_iterationfinalize.header.id = MESSAGE_RES_CPG_ITERATIONFINALIZE;
> +	res_lib_cpg_iterationfinalize.header.error = error;
> +
> +	api->ipc_response_send (conn, &res_lib_cpg_iterationfinalize,
> +		sizeof (res_lib_cpg_iterationfinalize));
> +}
> diff --git a/exec/evs.c b/exec/evs.c
> new file mode 100644
> index 0000000..eb8a8d4
> --- /dev/null
> +++ b/exec/evs.c
> @@ -0,0 +1,489 @@
> +/*
> + * Copyright (c) 2004-2006 MontaVista Software, Inc.
> + * Copyright (c) 2006-2009 Red Hat, Inc.
> + *
> + * All rights reserved.
> + *
> + * Author: Steven Dake (sdake@xxxxxxxxxx)
> + *
> + * This software licensed under BSD license, the text of which follows:
> + *
> + * Redistribution and use in source and binary forms, with or without
> + * modification, are permitted provided that the following conditions are met:
> + *
> + * - Redistributions of source code must retain the above copyright notice,
> + *   this list of conditions and the following disclaimer.
> + * - Redistributions in binary form must reproduce the above copyright notice,
> + *   this list of conditions and the following disclaimer in the documentation
> + *   and/or other materials provided with the distribution.
> + * - Neither the name of the MontaVista Software, Inc. nor the names of its
> + *   contributors may be used to endorse or promote products derived from this
> + *   software without specific prior written permission.
> + *
> + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
> + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
> + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
> + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
> + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
> + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
> + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
> + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
> + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
> + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
> + * THE POSSIBILITY OF SUCH DAMAGE.
> + */
> +
> +#include <config.h>
> +
> +#include <sys/types.h>
> +#include <sys/socket.h>
> +#include <sys/un.h>
> +#include <sys/ioctl.h>
> +#include <netinet/in.h>
> +#include <sys/uio.h>
> +#include <unistd.h>
> +#include <fcntl.h>
> +#include <stdlib.h>
> +#include <stdio.h>
> +#include <errno.h>
> +#include <assert.h>
> +#include <time.h>
> +#include <netinet/in.h>
> +#include <arpa/inet.h>
> +
> +#include <corosync/swab.h>
> +#include <corosync/corotypes.h>
> +#include <qb/qbipc_common.h>
> +#include <corosync/corodefs.h>
> +#include <corosync/mar_gen.h>
> +#include <corosync/coroapi.h>
> +#include <corosync/logsys.h>
> +#include <corosync/list.h>
> +
> +#include <corosync/evs.h>
> +#include <corosync/ipc_evs.h>
> +
> +LOGSYS_DECLARE_SUBSYS ("EVS");
> +
> +enum evs_exec_message_req_types {
> +	MESSAGE_REQ_EXEC_EVS_MCAST = 0
> +};
> +
> +/*
> + * Service Interfaces required by service_message_handler struct
> + */
> +static int evs_exec_init_fn (
> +	struct corosync_api_v1 *corosync_api);
> +
> +static void evs_confchg_fn (
> +	enum totem_configuration_type configuration_type,
> +	const unsigned int *member_list, size_t member_list_entries,
> +	const unsigned int *left_list, size_t left_list_entries,
> +	const unsigned int *joined_list, size_t joined_list_entries,
> +	const struct memb_ring_id *ring_id);
> +
> +static void message_handler_req_exec_mcast (const void *msg, unsigned int nodeid);
> +
> +static void req_exec_mcast_endian_convert (void *msg);
> +
> +static void message_handler_req_evs_join (void *conn, const void *msg);
> +static void message_handler_req_evs_leave (void *conn, const void *msg);
> +static void message_handler_req_evs_mcast_joined (void *conn, const void *msg);
> +static void message_handler_req_evs_mcast_groups (void *conn, const void *msg);
> +static void message_handler_req_evs_membership_get (void *conn, const void *msg);
> +
> +static int evs_lib_init_fn (void *conn);
> +static int evs_lib_exit_fn (void *conn);
> +
> +struct evs_pd {
> +	struct evs_group *groups;
> +	int group_entries;
> +	struct list_head list;
> +	void *conn;
> +};
> +
> +static struct corosync_api_v1 *api;
> +
> +static struct corosync_lib_handler evs_lib_engine[] =
> +{
> +	{ /* 0 */
> +		.lib_handler_fn				= message_handler_req_evs_join,
> +		.flow_control				= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> +	},
> +	{ /* 1 */
> +		.lib_handler_fn				= message_handler_req_evs_leave,
> +		.flow_control				= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> +	},
> +	{ /* 2 */
> +		.lib_handler_fn				= message_handler_req_evs_mcast_joined,
> +		.flow_control				= CS_LIB_FLOW_CONTROL_REQUIRED
> +	},
> +	{ /* 3 */
> +		.lib_handler_fn				= message_handler_req_evs_mcast_groups,
> +		.flow_control				= CS_LIB_FLOW_CONTROL_REQUIRED
> +	},
> +	{ /* 4 */
> +		.lib_handler_fn				= message_handler_req_evs_membership_get,
> +		.flow_control				= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> +	}
> +};
> +
> +static struct corosync_exec_handler evs_exec_engine[] =
> +{
> +	{
> +		.exec_handler_fn 	= message_handler_req_exec_mcast,
> +		.exec_endian_convert_fn	= req_exec_mcast_endian_convert
> +	}
> +};
> +
> +struct corosync_service_engine evs_service_engine = {
> +	.name			= "corosync extended virtual synchrony service",
> +	.id			= EVS_SERVICE,
> +	.priority		= 1,
> +	.private_data_size	= sizeof (struct evs_pd),
> +	.flow_control		= CS_LIB_FLOW_CONTROL_REQUIRED,
> +	.lib_init_fn		= evs_lib_init_fn,
> +	.lib_exit_fn		= evs_lib_exit_fn,
> +	.lib_engine		= evs_lib_engine,
> +	.lib_engine_count	= sizeof (evs_lib_engine) / sizeof (struct corosync_lib_handler),
> +	.exec_engine		= evs_exec_engine,
> +	.exec_engine_count	= sizeof (evs_exec_engine) / sizeof (struct corosync_exec_handler),
> +	.confchg_fn		= evs_confchg_fn,
> +	.exec_init_fn		= evs_exec_init_fn,
> +	.exec_dump_fn		= NULL,
> +	.sync_mode		= CS_SYNC_V1
> +};
> +
> +static DECLARE_LIST_INIT (confchg_notify);
> +
> +struct corosync_service_engine *evs_get_service_engine_ver0 (void)
> +{
> +	return (&evs_service_engine);
> +}
> +
> +static int evs_exec_init_fn (
> +	struct corosync_api_v1 *corosync_api)
> +{
> +#ifdef COROSYNC_SOLARIS
> +	logsys_subsys_init();
> +#endif
> +
> +	api = corosync_api;
> +
> +	return 0;
> +}
> +
> +struct res_evs_confchg_callback res_evs_confchg_callback;
> +
> +static void evs_confchg_fn (
> +	enum totem_configuration_type configuration_type,
> +	const unsigned int *member_list, size_t member_list_entries,
> +	const unsigned int *left_list, size_t left_list_entries,
> +	const unsigned int *joined_list, size_t joined_list_entries,
> +	const struct memb_ring_id *ring_id)
> +{
> +	struct list_head *list;
> +	struct evs_pd *evs_pd;
> +
> +	/*
> +	 * Build configuration change message
> +	 */
> +	res_evs_confchg_callback.header.size = sizeof (struct res_evs_confchg_callback);
> +	res_evs_confchg_callback.header.id = MESSAGE_RES_EVS_CONFCHG_CALLBACK;
> +	res_evs_confchg_callback.header.error = CS_OK;
> +
> +	memcpy (res_evs_confchg_callback.member_list,
> +		member_list, member_list_entries * sizeof(*member_list));
> +	res_evs_confchg_callback.member_list_entries = member_list_entries;
> +
> +	memcpy (res_evs_confchg_callback.left_list,
> +		left_list, left_list_entries * sizeof(*left_list));
> +	res_evs_confchg_callback.left_list_entries = left_list_entries;
> +
> +	memcpy (res_evs_confchg_callback.joined_list,
> +		joined_list, joined_list_entries * sizeof(*joined_list));
> +	res_evs_confchg_callback.joined_list_entries = joined_list_entries;
> +
> +	/*
> +	 * Send configuration change message to every EVS library user
> +	 */
> +	for (list = confchg_notify.next; list != &confchg_notify; list = list->next) {
> +		evs_pd = list_entry (list, struct evs_pd, list);
> +		api->ipc_dispatch_send (evs_pd->conn,
> +			&res_evs_confchg_callback,
> +			sizeof (res_evs_confchg_callback));
> +	}
> +}
> +
> +static int evs_lib_init_fn (void *conn)
> +{
> +	struct evs_pd *evs_pd = (struct evs_pd *)api->ipc_private_data_get (conn);
> +
> +	log_printf (LOGSYS_LEVEL_DEBUG, "Got request to initalize evs service.\n");
> +
> +	evs_pd->groups = NULL;
> +	evs_pd->group_entries = 0;
> +	evs_pd->conn = conn;
> +	list_init (&evs_pd->list);
> +	list_add (&evs_pd->list, &confchg_notify);
> +
> +	api->ipc_dispatch_send (conn, &res_evs_confchg_callback,
> +		sizeof (res_evs_confchg_callback));
> +
> +	return (0);
> +}
> +
> +static int evs_lib_exit_fn (void *conn)
> +{
> +    struct evs_pd *evs_pd = (struct evs_pd *)api->ipc_private_data_get (conn);
> +
> +	list_del (&evs_pd->list);
> +	return (0);
> +}
> +
> +static void message_handler_req_evs_join (void *conn, const void *msg)
> +{
> +	cs_error_t error = CS_OK;
> +	const struct req_lib_evs_join *req_lib_evs_join = msg;
> +	struct res_lib_evs_join res_lib_evs_join;
> +	void *addr;
> +	struct evs_pd *evs_pd = (struct evs_pd *)api->ipc_private_data_get (conn);
> +
> +	if (req_lib_evs_join->group_entries > 50) {
> +		error = CS_ERR_TOO_MANY_GROUPS;
> +		goto exit_error;
> +	}
> +
> +	addr = realloc (evs_pd->groups, sizeof (struct evs_group) *
> +		(evs_pd->group_entries + req_lib_evs_join->group_entries));
> +	if (addr == NULL) {
> +		error = CS_ERR_NO_MEMORY;
> +		goto exit_error;
> +	}
> +	evs_pd->groups = addr;
> +
> +	memcpy (&evs_pd->groups[evs_pd->group_entries],
> +		req_lib_evs_join->groups,
> +		sizeof (struct evs_group) * req_lib_evs_join->group_entries);
> +
> +	evs_pd->group_entries += req_lib_evs_join->group_entries;
> +
> +exit_error:
> +	res_lib_evs_join.header.size = sizeof (struct res_lib_evs_join);
> +	res_lib_evs_join.header.id = MESSAGE_RES_EVS_JOIN;
> +	res_lib_evs_join.header.error = error;
> +
> +	api->ipc_response_send (conn, &res_lib_evs_join,
> +		sizeof (struct res_lib_evs_join));
> +}
> +
> +static void message_handler_req_evs_leave (void *conn, const void *msg)
> +{
> +	const struct req_lib_evs_leave *req_lib_evs_leave = msg;
> +	struct res_lib_evs_leave res_lib_evs_leave;
> +	cs_error_t error = CS_OK;
> +	int i, j;
> +	int found;
> +	struct evs_pd *evs_pd = (struct evs_pd *)api->ipc_private_data_get (conn);
> +
> +	for (i = 0; i < req_lib_evs_leave->group_entries; i++) {
> +		found = 0;
> +		for (j = 0; j < evs_pd->group_entries;) {
> +
> +			if (memcmp (&req_lib_evs_leave->groups[i],
> +				&evs_pd->groups[j], sizeof (struct evs_group)) == 0) {
> +
> +				/*
> +				 * Delete entry
> +				 */
> +				memmove (&evs_pd->groups[j], &evs_pd->groups[j + 1],
> +					(evs_pd->group_entries - j - 1) * sizeof (struct evs_group));
> +
> +				evs_pd->group_entries -= 1;
> +
> +				found = 1;
> +				break;
> +			} else {
> +				j++;
> +			}
> +		}
> +		if (found == 0) {
> +			error = CS_ERR_NOT_EXIST;
> +			break;
> +		}
> +	}
> +
> +	res_lib_evs_leave.header.size = sizeof (struct res_lib_evs_leave);
> +	res_lib_evs_leave.header.id = MESSAGE_RES_EVS_LEAVE;
> +	res_lib_evs_leave.header.error = error;
> +
> +	api->ipc_response_send (conn, &res_lib_evs_leave,
> +		sizeof (struct res_lib_evs_leave));
> +}
> +
> +static void message_handler_req_evs_mcast_joined (void *conn, const void *msg)
> +{
> +	cs_error_t error = CS_ERR_TRY_AGAIN;
> +	const struct req_lib_evs_mcast_joined *req_lib_evs_mcast_joined = msg;
> +	struct res_lib_evs_mcast_joined res_lib_evs_mcast_joined;
> +	struct iovec req_exec_evs_mcast_iovec[3];
> +	struct req_exec_evs_mcast req_exec_evs_mcast;
> +	int res;
> +	struct evs_pd *evs_pd = (struct evs_pd *)api->ipc_private_data_get (conn);
> +
> +	req_exec_evs_mcast.header.size = sizeof (struct req_exec_evs_mcast) +
> +		evs_pd->group_entries * sizeof (struct evs_group) +
> +		req_lib_evs_mcast_joined->msg_len;
> +
> +	req_exec_evs_mcast.header.id =
> +		SERVICE_ID_MAKE (EVS_SERVICE, MESSAGE_REQ_EXEC_EVS_MCAST);
> +	req_exec_evs_mcast.msg_len = req_lib_evs_mcast_joined->msg_len;
> +	req_exec_evs_mcast.group_entries = evs_pd->group_entries;
> +
> +	req_exec_evs_mcast_iovec[0].iov_base = (char *)&req_exec_evs_mcast;
> +	req_exec_evs_mcast_iovec[0].iov_len = sizeof (req_exec_evs_mcast);
> +	req_exec_evs_mcast_iovec[1].iov_base = (char *)evs_pd->groups;
> +	req_exec_evs_mcast_iovec[1].iov_len = evs_pd->group_entries * sizeof (struct evs_group);
> +	req_exec_evs_mcast_iovec[2].iov_base = (char *)&req_lib_evs_mcast_joined->msg;
> +	req_exec_evs_mcast_iovec[2].iov_len = req_lib_evs_mcast_joined->msg_len;
> +
> +	res = api->totem_mcast (req_exec_evs_mcast_iovec, 3, TOTEM_AGREED);
> +		// TODO
> +	if (res == 0) {
> +		error = CS_OK;
> +	}
> +
> +	res_lib_evs_mcast_joined.header.size = sizeof (struct res_lib_evs_mcast_joined);
> +	res_lib_evs_mcast_joined.header.id = MESSAGE_RES_EVS_MCAST_JOINED;
> +	res_lib_evs_mcast_joined.header.error = error;
> +
> +	api->ipc_response_send (conn, &res_lib_evs_mcast_joined,
> +		sizeof (struct res_lib_evs_mcast_joined));
> +}
> +
> +static void message_handler_req_evs_mcast_groups (void *conn, const void *msg)
> +{
> +	cs_error_t error = CS_ERR_TRY_AGAIN;
> +	const struct req_lib_evs_mcast_groups *req_lib_evs_mcast_groups = msg;
> +	struct res_lib_evs_mcast_groups res_lib_evs_mcast_groups;
> +	struct iovec req_exec_evs_mcast_iovec[3];
> +	struct req_exec_evs_mcast req_exec_evs_mcast;
> +	const char *msg_addr;
> +	int res;
> +
> +	req_exec_evs_mcast.header.size = sizeof (struct req_exec_evs_mcast) +
> +		sizeof (struct evs_group) * req_lib_evs_mcast_groups->group_entries +
> +		req_lib_evs_mcast_groups->msg_len;
> +
> +	req_exec_evs_mcast.header.id =
> +		SERVICE_ID_MAKE (EVS_SERVICE, MESSAGE_REQ_EXEC_EVS_MCAST);
> +	req_exec_evs_mcast.msg_len = req_lib_evs_mcast_groups->msg_len;
> +	req_exec_evs_mcast.group_entries = req_lib_evs_mcast_groups->group_entries;
> +
> +	msg_addr = (const char *)req_lib_evs_mcast_groups +
> +		sizeof (struct req_lib_evs_mcast_groups) +
> +		(sizeof (struct evs_group) * req_lib_evs_mcast_groups->group_entries);
> +
> +	req_exec_evs_mcast_iovec[0].iov_base = (char *)&req_exec_evs_mcast;
> +	req_exec_evs_mcast_iovec[0].iov_len = sizeof (req_exec_evs_mcast);
> +	req_exec_evs_mcast_iovec[1].iov_base = (char *)&req_lib_evs_mcast_groups->groups;
> +	req_exec_evs_mcast_iovec[1].iov_len = sizeof (struct evs_group) * req_lib_evs_mcast_groups->group_entries;
> +	req_exec_evs_mcast_iovec[2].iov_base = (void *) msg_addr; /* discard const */
> +	req_exec_evs_mcast_iovec[2].iov_len = req_lib_evs_mcast_groups->msg_len;
> +
> +	res = api->totem_mcast (req_exec_evs_mcast_iovec, 3, TOTEM_AGREED);
> +	if (res == 0) {
> +		error = CS_OK;
> +	}
> +
> +	res_lib_evs_mcast_groups.header.size = sizeof (struct res_lib_evs_mcast_groups);
> +	res_lib_evs_mcast_groups.header.id = MESSAGE_RES_EVS_MCAST_GROUPS;
> +	res_lib_evs_mcast_groups.header.error = error;
> +
> +	api->ipc_response_send (conn, &res_lib_evs_mcast_groups,
> +		sizeof (struct res_lib_evs_mcast_groups));
> +}
> +
> +static void message_handler_req_evs_membership_get (void *conn, const void *msg)
> +{
> +	struct res_lib_evs_membership_get res_lib_evs_membership_get;
> +
> +	res_lib_evs_membership_get.header.size = sizeof (struct res_lib_evs_membership_get);
> +	res_lib_evs_membership_get.header.id = MESSAGE_RES_EVS_MEMBERSHIP_GET;
> +	res_lib_evs_membership_get.header.error = CS_OK;
> +	res_lib_evs_membership_get.local_nodeid = api->totem_nodeid_get ();
> +	memcpy (&res_lib_evs_membership_get.member_list,
> +		&res_evs_confchg_callback.member_list,
> +		sizeof (res_lib_evs_membership_get.member_list));
> +
> +	res_lib_evs_membership_get.member_list_entries =
> +		res_evs_confchg_callback.member_list_entries;
> +
> +	api->ipc_response_send (conn, &res_lib_evs_membership_get,
> +		sizeof (struct res_lib_evs_membership_get));
> +}
> +
> +static void req_exec_mcast_endian_convert (void *msg)
> +{
> +	struct req_exec_evs_mcast *req_exec_evs_mcast =
> +		(struct req_exec_evs_mcast *)msg;
> +	req_exec_evs_mcast->group_entries =
> +		swab32 (req_exec_evs_mcast->group_entries);
> +	req_exec_evs_mcast->msg_len = swab32 (req_exec_evs_mcast->msg_len);
> +}
> +
> +static void message_handler_req_exec_mcast (
> +	const void *msg,
> +	unsigned int nodeid)
> +{
> +	const struct req_exec_evs_mcast *req_exec_evs_mcast = msg;
> +	struct res_evs_deliver_callback res_evs_deliver_callback;
> +	const char *msg_addr;
> +	struct list_head *list;
> +	int found = 0;
> +	int i, j;
> +	struct evs_pd *evs_pd;
> +	struct iovec iov[2];
> +
> +	res_evs_deliver_callback.header.size = sizeof (struct res_evs_deliver_callback) +
> +		req_exec_evs_mcast->msg_len;
> +	res_evs_deliver_callback.header.id = MESSAGE_RES_EVS_DELIVER_CALLBACK;
> +	res_evs_deliver_callback.header.error = CS_OK;
> +	res_evs_deliver_callback.msglen = req_exec_evs_mcast->msg_len;
> +
> +	msg_addr = (const char *)req_exec_evs_mcast + sizeof (struct req_exec_evs_mcast) +
> +		(sizeof (struct evs_group) * req_exec_evs_mcast->group_entries);
> +
> +	for (list = confchg_notify.next; list != &confchg_notify; list = list->next) {
> +		found = 0;
> +		evs_pd = list_entry (list, struct evs_pd, list);
> +
> +		for (i = 0; i < evs_pd->group_entries; i++) {
> +			for (j = 0; j < req_exec_evs_mcast->group_entries; j++) {
> +
> +				if (memcmp (&evs_pd->groups[i], &req_exec_evs_mcast->groups[j],
> +					sizeof (struct evs_group)) == 0) {
> +
> +					found = 1;
> +					break;
> +				}
> +			}
> +			if (found) {
> +				break;
> +			}
> +		}
> +
> +		if (found) {
> +			res_evs_deliver_callback.local_nodeid = nodeid;
> +			iov[0].iov_base = (void *)&res_evs_deliver_callback;
> +			iov[0].iov_len = sizeof (struct res_evs_deliver_callback);
> +			iov[1].iov_base = (void *) msg_addr; /* discard const */
> +			iov[1].iov_len = req_exec_evs_mcast->msg_len;
> +
> +			api->ipc_dispatch_iov_send (
> +				evs_pd->conn,
> +				iov,
> +				2);
> +		}
> +	}
> +}
> diff --git a/exec/main.c b/exec/main.c
> index 952c7c7..374763a 100644
> --- a/exec/main.c
> +++ b/exec/main.c
> @@ -105,7 +105,6 @@
>  #include <corosync/corotypes.h>
>  #include <corosync/corodefs.h>
>  #include <corosync/list.h>
> -#include <corosync/lcr/lcr_ifact.h>
>  #include <corosync/totem/totempg.h>
>  #include <corosync/engine/config.h>
>  #include <corosync/logsys.h>
> @@ -1133,49 +1132,11 @@ int main (int argc, char **argv, char **envp)
>  
>  	num_config_modules = 0;
>  
> -	/*
> -	 * Bootstrap in the default configuration parser or use
> -	 * the corosync default built in parser if the configuration parser
> -	 * isn't overridden
> -	 */
> -	config_iface_init = getenv("COROSYNC_DEFAULT_CONFIG_IFACE");
> -	if (!config_iface_init) {
> -		config_iface_init = "corosync_parser";
> -	}
> -
> -	/* Make a copy so we can deface it with strtok */
> -	if ((config_iface = strdup(config_iface_init)) == NULL) {
> -		log_printf (LOGSYS_LEVEL_ERROR, "exhausted virtual memory");
> -		corosync_exit_error (COROSYNC_DONE_OBJDB);
> -	}
> -
> -	iface = strtok_r(config_iface, ":", &strtok_save_pt);
> -	while (iface)
> -	{
> -		res = lcr_ifact_reference (
> -			&config_handle,
> -			iface,
> -			config_version,
> -			&config_p,
> -			0);
> -
> -		config = (struct config_iface_ver0 *)config_p;
> -		if (res == -1) {
> -			log_printf (LOGSYS_LEVEL_ERROR, "Corosync Executive couldn't open configuration component '%s'\n", iface);
> -			corosync_exit_error (COROSYNC_DONE_MAINCONFIGREAD);
> -		}
> -
> -		res = config->config_readconfig(&error_string);
> -		if (res == -1) {
> -			log_printf (LOGSYS_LEVEL_ERROR, "%s", error_string);
> -			corosync_exit_error (COROSYNC_DONE_MAINCONFIGREAD);
> -		}
> -		log_printf (LOGSYS_LEVEL_NOTICE, "%s", error_string);
> -		config_modules[num_config_modules++] = config;
> -
> -		iface = strtok_r(NULL, ":", &strtok_save_pt);
> +	coroparse_configparse(&error_string);
> +	if (res == -1) {
> +		log_printf (LOGSYS_LEVEL_ERROR, "%s", error_string);
> +		corosync_exit_error (COROSYNC_DONE_MAINCONFIGREAD);
>  	}
> -	free(config_iface);
>  
>  	res = corosync_main_config_read (&error_string);
>  	if (res == -1) {
> diff --git a/exec/main.h b/exec/main.h
> index ad73d87..6cbbeb4 100644
> --- a/exec/main.h
> +++ b/exec/main.h
> @@ -122,4 +122,6 @@ extern void cs_ipc_refcnt_inc(void *conn);
>  
>  extern void cs_ipc_refcnt_dec(void *conn);
>  
> +int coroparse_configparse (const char **error_string);
> +
>  #endif /* MAIN_H_DEFINED */
> diff --git a/exec/mon.c b/exec/mon.c
> new file mode 100644
> index 0000000..1927df5
> --- /dev/null
> +++ b/exec/mon.c
> @@ -0,0 +1,506 @@
> +/*
> + * Copyright (c) 2010 Red Hat, Inc.
> + *
> + * All rights reserved.
> + *
> + * Author: Angus Salkeld <asalkeld@xxxxxxxxxx>
> + *
> + * This software licensed under BSD license, the text of which follows:
> + *
> + * Redistribution and use in source and binary forms, with or without
> + * modification, are permitted provided that the following conditions are met:
> + *
> + * - Redistributions of source code must retain the above copyright notice,
> + *   this list of conditions and the following disclaimer.
> + * - Redistributions in binary form must reproduce the above copyright notice,
> + *   this list of conditions and the following disclaimer in the documentation
> + *   and/or other materials provided with the distribution.
> + * - Neither the name of the MontaVista Software, Inc. nor the names of its
> + *   contributors may be used to endorse or promote products derived from this
> + *   software without specific prior written permission.
> + *
> + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
> + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
> + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
> + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
> + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
> + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
> + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
> + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
> + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
> + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
> + * THE POSSIBILITY OF SUCH DAMAGE.
> + */
> +
> +#include <config.h>
> +
> +#include <unistd.h>
> +#if defined(HAVE_LIBSTATGRAB)
> +#include <statgrab.h>
> +#endif
> +
> +#include <corosync/corotypes.h>
> +#include <corosync/corodefs.h>
> +#include <corosync/coroapi.h>
> +#include <corosync/list.h>
> +#include <corosync/logsys.h>
> +#include <corosync/icmap.h>
> +#include "../exec/fsm.h"
> +
> +
> +LOGSYS_DECLARE_SUBSYS ("MON");
> +
> +/*
> + * Service Interfaces required by service_message_handler struct
> + */
> +static int mon_exec_init_fn (
> +	struct corosync_api_v1 *corosync_api);
> +
> +static struct corosync_api_v1 *api;
> +#define MON_DEFAULT_PERIOD 3000
> +#define MON_MIN_PERIOD 500
> +#define MON_MAX_PERIOD (120 * CS_TIME_MS_IN_SEC)
> +
> +struct corosync_service_engine mon_service_engine = {
> +	.name			= "corosync resource monitoring service",
> +	.id			= MON_SERVICE,
> +	.priority		= 1,
> +	.private_data_size	= 0,
> +	.flow_control		= CS_LIB_FLOW_CONTROL_NOT_REQUIRED,
> +	.lib_init_fn		= NULL,
> +	.lib_exit_fn		= NULL,
> +	.lib_engine		= NULL,
> +	.lib_engine_count	= 0,
> +	.exec_engine		= NULL,
> +	.exec_engine_count	= 0,
> +	.confchg_fn		= NULL,
> +	.exec_init_fn		= mon_exec_init_fn,
> +	.exec_dump_fn		= NULL,
> +	.sync_mode		= CS_SYNC_V2
> +};
> +
> +static DECLARE_LIST_INIT (confchg_notify);
> +
> +
> +struct resource_instance {
> +	const char *icmap_path;
> +	const char *name;
> +	corosync_timer_handle_t timer_handle;
> +	void (*update_stats_fn) (void *data);
> +	struct cs_fsm fsm;
> +	uint64_t period;
> +	icmap_value_types_t max_type;
> +	union {
> +		int32_t int32;
> +		double dbl;
> +	} max;
> +};
> +
> +static void mem_update_stats_fn (void *data);
> +static void load_update_stats_fn (void *data);
> +
> +static struct resource_instance memory_used_inst = {
> +	.name = "memory_used",
> +	.icmap_path = "resources.system.memory_used.",
> +	.update_stats_fn = mem_update_stats_fn,
> +	.max_type = ICMAP_VALUETYPE_INT32,
> +	.max.int32 = INT32_MAX,
> +	.period = MON_DEFAULT_PERIOD,
> +};
> +
> +static struct resource_instance load_15min_inst = {
> +	.name = "load_15min",
> +	.icmap_path = "resources.system.load_15min.",
> +	.update_stats_fn = load_update_stats_fn,
> +	.max_type = ICMAP_VALUETYPE_DOUBLE,
> +	.max.dbl = INT32_MAX,
> +	.period = MON_DEFAULT_PERIOD,
> +};
> +
> +
> +/*
> + * F S M
> + */
> +static void mon_config_changed (struct cs_fsm* fsm, int32_t event, void * data);
> +static void mon_resource_failed (struct cs_fsm* fsm, int32_t event, void * data);
> +
> +const char * mon_running_str = "running";
> +const char * mon_failed_str = "failed";
> +const char * mon_failure_str = "failure";
> +const char * mon_stopped_str = "stopped";
> +const char * mon_config_changed_str = "config_changed";
> +
> +enum mon_resource_state {
> +	MON_S_STOPPED,
> +	MON_S_RUNNING,
> +	MON_S_FAILED
> +};
> +enum mon_resource_event {
> +	MON_E_CONFIG_CHANGED,
> +	MON_E_FAILURE
> +};
> +
> +struct cs_fsm_entry mon_fsm_table[] = {
> +	{ MON_S_STOPPED, MON_E_CONFIG_CHANGED,	mon_config_changed,	{MON_S_STOPPED, MON_S_RUNNING, -1} },
> +	{ MON_S_STOPPED, MON_E_FAILURE,		NULL,			{-1} },
> +	{ MON_S_RUNNING, MON_E_CONFIG_CHANGED,	mon_config_changed,	{MON_S_RUNNING, MON_S_STOPPED, -1} },
> +	{ MON_S_RUNNING, MON_E_FAILURE,		mon_resource_failed,	{MON_S_FAILED, -1} },
> +	{ MON_S_FAILED,  MON_E_CONFIG_CHANGED,	mon_config_changed,	{MON_S_RUNNING, MON_S_STOPPED, -1} },
> +	{ MON_S_FAILED,  MON_E_FAILURE,		NULL,			{-1} },
> +};
> +
> +struct corosync_service_engine *mon_get_service_engine_ver0 (void)
> +{
> +	return (&mon_service_engine);
> +}
> +
> +static const char * mon_res_state_to_str(struct cs_fsm* fsm,
> +	int32_t state)
> +{
> +	switch (state) {
> +	case MON_S_STOPPED:
> +		return mon_stopped_str;
> +		break;
> +	case MON_S_RUNNING:
> +		return mon_running_str;
> +		break;
> +	case MON_S_FAILED:
> +		return mon_failed_str;
> +		break;
> +	}
> +	return NULL;
> +}
> +
> +static const char * mon_res_event_to_str(struct cs_fsm* fsm,
> +	int32_t event)
> +{
> +	switch (event) {
> +	case MON_E_CONFIG_CHANGED:
> +		return mon_config_changed_str;
> +		break;
> +	case MON_E_FAILURE:
> +		return mon_failure_str;
> +		break;
> +	}
> +	return NULL;
> +}
> +
> +static void mon_fsm_state_set (struct cs_fsm* fsm,
> +	enum mon_resource_state next_state, struct resource_instance* inst)
> +{
> +	enum mon_resource_state prev_state = fsm->curr_state;
> +	const char *state_str;
> +	char key_name[ICMAP_KEYNAME_MAXLEN];
> +
> +	ENTER();
> +
> +	cs_fsm_state_set(fsm, next_state, inst);
> +
> +	if (prev_state == fsm->curr_state) {
> +		return;
> +	}
> +	state_str = mon_res_state_to_str(fsm, fsm->curr_state);
> +
> +	snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "%s%s", inst->icmap_path, "state");
> +	icmap_set_string(key_name, state_str);
> +}
> +
> +
> +static void mon_config_changed (struct cs_fsm* fsm, int32_t event, void * data)
> +{
> +	struct resource_instance * inst = (struct resource_instance *)data;
> +	uint64_t tmp_value;
> +	char key_name[ICMAP_KEYNAME_MAXLEN];
> +	int run_updater;
> +
> +	ENTER();
> +
> +	snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "%s%s", inst->icmap_path, "poll_period");
> +	if (icmap_get_uint64(key_name, &tmp_value) == CS_OK) {
> +		if (tmp_value >= MON_MIN_PERIOD && tmp_value <= MON_MAX_PERIOD) {
> +			log_printf (LOGSYS_LEVEL_DEBUG,
> +				"poll_period changing from:%"PRIu64" to %"PRIu64".",
> +				inst->period, tmp_value);
> +			inst->period = tmp_value;
> +		} else {
> +			log_printf (LOGSYS_LEVEL_WARNING,
> +				"Could NOT use poll_period:%"PRIu64" ms for resource %s",
> +				tmp_value, inst->name);
> +		}
> +	}
> +
> +	if (inst->timer_handle) {
> +		api->timer_delete(inst->timer_handle);
> +		inst->timer_handle = 0;
> +	}
> +
> +	run_updater = 0;
> +
> +	snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "%s%s", inst->icmap_path, "max");
> +	if (inst->max_type == ICMAP_VALUETYPE_INT32) {
> +		if (icmap_get_int32(key_name, &inst->max.int32) != CS_OK) {
> +			inst->max.int32 = INT32_MAX;
> +
> +			mon_fsm_state_set (fsm, MON_S_STOPPED, inst);
> +		} else {
> +			run_updater = 1;
> +		}
> +	}
> +	if (inst->max_type == ICMAP_VALUETYPE_DOUBLE) {
> +		if (icmap_get_double(key_name, &inst->max.dbl) != CS_OK) {
> +			inst->max.dbl = INT32_MAX;
> +
> +			mon_fsm_state_set (fsm, MON_S_STOPPED, inst);
> +		} else {
> +			run_updater = 1;
> +		}
> +	}
> +
> +	if (run_updater) {
> +		mon_fsm_state_set (fsm, MON_S_RUNNING, inst);
> +		/*
> +		 * run the updater, incase the period has shortened
> +		 * and to start the timer.
> +		 */
> +		inst->update_stats_fn (inst);
> +	}
> +}
> +
> +void mon_resource_failed (struct cs_fsm* fsm, int32_t event, void * data)
> +{
> +	struct resource_instance * inst = (struct resource_instance *)data;
> +	ENTER();
> +	mon_fsm_state_set (fsm, MON_S_FAILED, inst);
> +}
> +
> +static int32_t percent_mem_used_get(void)
> +{
> +#if defined(HAVE_LIBSTATGRAB)
> +	sg_mem_stats *mem_stats;
> +	sg_swap_stats *swap_stats;
> +	long long total, freemem;
> +
> +	mem_stats = sg_get_mem_stats();
> +	swap_stats = sg_get_swap_stats();
> +
> +	if (mem_stats == NULL || swap_stats != NULL) {
> +		log_printf (LOGSYS_LEVEL_ERROR, "Unable to get memory stats: %s\n",
> +			sg_str_error(sg_get_error()));
> +		return -1;
> +	}
> +	total = mem_stats->total + swap_stats->total;
> +	freemem = mem_stats->free + swap_stats->free;
> +	return ((total - freemem) * 100) / total;
> +#else
> +#if defined(COROSYNC_LINUX)
> +	char *line_ptr;
> +	char line[512];
> +	unsigned long long value;
> +	FILE *f;
> +	long long total = 0;
> +	long long freemem = 0;
> +
> +	if ((f = fopen("/proc/meminfo", "r")) == NULL) {
> +		return -1;
> +	}
> +
> +	while ((line_ptr = fgets(line, sizeof(line), f)) != NULL) {
> +		if (sscanf(line_ptr, "%*s %llu kB", &value) != 1) {
> +			continue;
> +		}
> +		value *= 1024;
> +
> +		if (strncmp(line_ptr, "MemTotal:", 9) == 0) {
> +			total += value;
> +		} else if (strncmp(line_ptr, "MemFree:", 8) == 0) {
> +			freemem += value;
> +		} else if (strncmp(line_ptr, "SwapTotal:", 10) == 0) {
> +			total += value;
> +		} else if (strncmp(line_ptr, "SwapFree:", 9) == 0) {
> +			freemem += value;
> +		}
> +	}
> +
> +	fclose(f);
> +	return ((total - freemem) * 100) / total;
> +#else
> +#error need libstatgrab or linux.
> +#endif /* COROSYNC_LINUX */
> +#endif /* HAVE_LIBSTATGRAB */
> +}
> +
> +
> +static void mem_update_stats_fn (void *data)
> +{
> +	struct resource_instance * inst = (struct resource_instance *)data;
> +	int32_t new_value;
> +	uint64_t timestamp;
> +	char key_name[ICMAP_KEYNAME_MAXLEN];
> +
> +	new_value = percent_mem_used_get();
> +	fprintf(stderr,"BLA = %u\n", new_value);
> +	if (new_value > 0) {
> +		snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "%s%s", inst->icmap_path, "current");
> +		icmap_set_uint32(key_name, new_value);
> +
> +		timestamp = cs_timestamp_get();
> +
> +		snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "%s%s", inst->icmap_path, "last_updated");
> +		icmap_set_uint64(key_name, timestamp);
> +
> +		if (new_value > inst->max.int32 && inst->fsm.curr_state != MON_S_FAILED) {
> +			cs_fsm_process (&inst->fsm, MON_E_FAILURE, inst);
> +		}
> +	}
> +	api->timer_add_duration(inst->period * MILLI_2_NANO_SECONDS,
> +		inst, inst->update_stats_fn, &inst->timer_handle);
> +}
> +
> +static double min15_loadavg_get(void)
> +{
> +#if defined(HAVE_LIBSTATGRAB)
> +	sg_load_stats *load_stats;
> +	load_stats = sg_get_load_stats ();
> +	if (load_stats == NULL) {
> +		log_printf (LOGSYS_LEVEL_ERROR, "Unable to get load stats: %s\n",
> +			sg_str_error (sg_get_error()));
> +		return -1;
> +	}
> +	return load_stats->min15;
> +#else
> +#if defined(COROSYNC_LINUX)
> +	double loadav[3];
> +	if (getloadavg(loadav,3) < 0) {
> +		return -1;
> +	}
> +	return loadav[2];
> +#else
> +#error need libstatgrab or linux.
> +#endif /* COROSYNC_LINUX */
> +#endif /* HAVE_LIBSTATGRAB */
> +}
> +
> +static void load_update_stats_fn (void *data)
> +{
> +	struct resource_instance * inst = (struct resource_instance *)data;
> +	uint64_t timestamp;
> +	char key_name[ICMAP_KEYNAME_MAXLEN];
> +	double min15 = min15_loadavg_get();
> +
> +	if (min15 > 0) {
> +		snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "%s%s", inst->icmap_path, "current");
> +		icmap_set_double(key_name, min15);
> +
> +		timestamp = cs_timestamp_get();
> +
> +		snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "%s%s", inst->icmap_path, "last_updated");
> +		icmap_set_uint64(key_name, timestamp);
> +
> +		if (min15 > inst->max.dbl && inst->fsm.curr_state != MON_S_FAILED) {
> +			cs_fsm_process (&inst->fsm, MON_E_FAILURE, &inst);
> +		}
> +	}
> +
> +	api->timer_add_duration(inst->period * MILLI_2_NANO_SECONDS,
> +		inst, inst->update_stats_fn, &inst->timer_handle);
> +}
> +
> +static void mon_key_changed_cb (
> +	int32_t event,
> +	const char *key_name,
> +	struct icmap_notify_value new_value,
> +	struct icmap_notify_value old_value,
> +	void *user_data)
> +{
> +	struct resource_instance* inst = (struct resource_instance*)user_data;
> +	char *last_key_part;
> +
> +	if (event == ICMAP_TRACK_DELETE && inst) {
> +		log_printf (LOGSYS_LEVEL_WARNING,
> +			"resource \"%s\" deleted from cmap!",
> +			inst->name);
> +
> +		cs_fsm_process (&inst->fsm, MON_E_CONFIG_CHANGED, inst);
> +	}
> +
> +	if (event == ICMAP_TRACK_MODIFY) {
> +		last_key_part = strrchr(key_name, '.');
> +		if (last_key_part == NULL)
> +			return ;
> +
> +		last_key_part++;
> +		if (strcmp(last_key_part, "max") == 0 ||
> +		    strcmp(last_key_part, "poll_period") == 0) {
> +			ENTER();
> +			cs_fsm_process (&inst->fsm, MON_E_CONFIG_CHANGED, inst);
> +		}
> +	}
> +}
> +
> +static void mon_instance_init (struct resource_instance* inst)
> +{
> +	uint64_t tmp_value;
> +	char key_name[ICMAP_KEYNAME_MAXLEN];
> +	icmap_track_t icmap_track;
> +
> +	snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "%s%s", inst->icmap_path, "current");
> +	if (inst->max_type == ICMAP_VALUETYPE_INT32) {
> +		icmap_set_int32(key_name, 0);
> +	} else {
> +		icmap_set_double(key_name, 0);
> +	}
> +
> +	snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "%s%s", inst->icmap_path, "last_updated");
> +	icmap_set_uint64(key_name, 0);
> +
> +	snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "%s%s", inst->icmap_path, "state");
> +	icmap_set_string(key_name, mon_stopped_str);
> +
> +	inst->fsm.name = inst->name;
> +	inst->fsm.curr_entry = 0;
> +	inst->fsm.curr_state = MON_S_STOPPED;
> +	inst->fsm.table = mon_fsm_table;
> +	inst->fsm.entries = sizeof(mon_fsm_table) / sizeof(struct cs_fsm_entry);
> +	inst->fsm.state_to_str = mon_res_state_to_str;
> +	inst->fsm.event_to_str = mon_res_event_to_str;
> +
> +	snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "%s%s", inst->icmap_path, "poll_period");
> +	if (icmap_get_uint64(key_name, &tmp_value) != CS_OK) {
> +		icmap_set_uint64(key_name, inst->period);
> +	}
> +	else {
> +		if (tmp_value >= MON_MIN_PERIOD && tmp_value <= MON_MAX_PERIOD) {
> +			inst->period = tmp_value;
> +		} else {
> +			log_printf (LOGSYS_LEVEL_WARNING,
> +				"Could NOT use poll_period:%"PRIu64" ms for resource %s",
> +				tmp_value, inst->name);
> +		}
> +	}
> +	cs_fsm_process (&inst->fsm, MON_E_CONFIG_CHANGED, inst);
> +
> +	icmap_track_add(inst->icmap_path,
> +			ICMAP_TRACK_ADD | ICMAP_TRACK_MODIFY | ICMAP_TRACK_DELETE | ICMAP_TRACK_PREFIX,
> +			mon_key_changed_cb, inst, &icmap_track);
> +}
> +
> +static int mon_exec_init_fn (
> +	struct corosync_api_v1 *corosync_api)
> +{
> +
> +#ifdef HAVE_LIBSTATGRAB
> +	sg_init();
> +#endif /* HAVE_LIBSTATGRAB */
> +
> +#ifdef COROSYNC_SOLARIS
> +	logsys_subsys_init();
> +#endif
> +	api = corosync_api;
> +
> +	mon_instance_init (&memory_used_inst);
> +	mon_instance_init (&load_15min_inst);
> +
> +	return 0;
> +}
> +
> +
> diff --git a/exec/pload.c b/exec/pload.c
> new file mode 100644
> index 0000000..eeb8100
> --- /dev/null
> +++ b/exec/pload.c
> @@ -0,0 +1,337 @@
> +/*
> + * Copyright (c) 2008-2009 Red Hat, Inc.
> + *
> + * All rights reserved.
> + *
> + * Author: Steven Dake (sdake@xxxxxxxxxx)
> + *
> + * This software licensed under BSD license, the text of which follows:
> + *
> + * Redistribution and use in source and binary forms, with or without
> + * modification, are permitted provided that the following conditions are met:
> + *
> + * - Redistributions of source code must retain the above copyright notice,
> + *   this list of conditions and the following disclaimer.
> + * - Redistributions in binary form must reproduce the above copyright notice,
> + *   this list of conditions and the following disclaimer in the documentation
> + *   and/or other materials provided with the distribution.
> + * - Neither the name of the MontaVista Software, Inc. nor the names of its
> + *   contributors may be used to endorse or promote products derived from this
> + *   software without specific prior written permission.
> + *
> + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
> + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
> + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
> + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
> + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
> + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
> + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
> + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
> + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
> + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
> + * THE POSSIBILITY OF SUCH DAMAGE.
> + */
> +
> +#include <config.h>
> +
> +#include <sys/types.h>
> +#include <sys/socket.h>
> +#include <sys/un.h>
> +#include <sys/time.h>
> +#include <sys/ioctl.h>
> +#include <netinet/in.h>
> +#include <sys/uio.h>
> +#include <unistd.h>
> +#include <fcntl.h>
> +#include <stdlib.h>
> +#include <stdio.h>
> +#include <errno.h>
> +#include <time.h>
> +#include <netinet/in.h>
> +#include <arpa/inet.h>
> +#include <string.h>
> +#include <assert.h>
> +
> +#include <qb/qblist.h>
> +#include <qb/qbutil.h>
> +#include <qb/qbipc_common.h>
> +
> +#include <corosync/swab.h>
> +#include <corosync/corotypes.h>
> +#include <corosync/corodefs.h>
> +#include <corosync/mar_gen.h>
> +#include <corosync/coroapi.h>
> +#include <corosync/ipc_pload.h>
> +#include <corosync/list.h>
> +#include <corosync/logsys.h>
> +
> +LOGSYS_DECLARE_SUBSYS ("PLOAD");
> +
> +enum pload_exec_message_req_types {
> +	MESSAGE_REQ_EXEC_PLOAD_START = 0,
> +	MESSAGE_REQ_EXEC_PLOAD_MCAST = 1
> +};
> +
> +/*
> + * Service Interfaces required by service_message_handler struct
> + */
> +static int pload_exec_init_fn (
> +	struct corosync_api_v1 *corosync_api);
> +
> +static void pload_confchg_fn (
> +	enum totem_configuration_type configuration_type,
> +	const unsigned int *member_list, size_t member_list_entries,
> +	const unsigned int *left_list, size_t left_list_entries,
> +	const unsigned int *joined_list, size_t joined_list_entries,
> +	const struct memb_ring_id *ring_id);
> +
> +static void message_handler_req_exec_pload_start (const void *msg,
> +						  unsigned int nodeid);
> +
> +static void message_handler_req_exec_pload_mcast (const void *msg,
> +						  unsigned int nodeid);
> +
> +static void req_exec_pload_start_endian_convert (void *msg);
> +
> +static void req_exec_pload_mcast_endian_convert (void *msg);
> +
> +static void message_handler_req_pload_start (void *conn, const void *msg);
> +
> +static int pload_lib_init_fn (void *conn);
> +
> +static int pload_lib_exit_fn (void *conn);
> +
> +static char buffer[1000000];
> +
> +static unsigned int msgs_delivered = 0;
> +
> +static unsigned int msgs_wanted = 0;
> +
> +static unsigned int msg_size = 0;
> +
> +static unsigned int msg_code = 1;
> +
> +static unsigned int msgs_sent = 0;
> +
> +
> +static struct corosync_api_v1 *api;
> +
> +struct req_exec_pload_start {
> +	struct qb_ipc_request_header header;
> +	unsigned int msg_code;
> +	unsigned int msg_count;
> +	unsigned int msg_size;
> +	unsigned int time_interval;
> +};
> +
> +struct req_exec_pload_mcast {
> +	struct qb_ipc_request_header header;
> +	unsigned int msg_code;
> +};
> +
> +static struct corosync_lib_handler pload_lib_engine[] =
> +{
> +	{ /* 0 */
> +		.lib_handler_fn		= message_handler_req_pload_start,
> +		.flow_control		= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> +	}
> +};
> +
> +static struct corosync_exec_handler pload_exec_engine[] =
> +{
> +	{
> +		.exec_handler_fn 	= message_handler_req_exec_pload_start,
> +		.exec_endian_convert_fn	= req_exec_pload_start_endian_convert
> +	},
> +	{
> +		.exec_handler_fn 	= message_handler_req_exec_pload_mcast,
> +		.exec_endian_convert_fn	= req_exec_pload_mcast_endian_convert
> +	}
> +};
> +
> +struct corosync_service_engine pload_service_engine = {
> +	.name			= "corosync profile loading service",
> +	.id			= PLOAD_SERVICE,
> +	.priority		= 1,
> +	.private_data_size	= 0,
> +	.flow_control		= CS_LIB_FLOW_CONTROL_REQUIRED,
> +	.lib_init_fn		= pload_lib_init_fn,
> +	.lib_exit_fn		= pload_lib_exit_fn,
> +	.lib_engine		= pload_lib_engine,
> +	.lib_engine_count	= sizeof (pload_lib_engine) / sizeof (struct corosync_lib_handler),
> +	.exec_engine		= pload_exec_engine,
> +	.exec_engine_count	= sizeof (pload_exec_engine) / sizeof (struct corosync_exec_handler),
> +	.confchg_fn		= pload_confchg_fn,
> +	.exec_init_fn		= pload_exec_init_fn,
> +	.exec_dump_fn		= NULL,
> +	.sync_mode		= CS_SYNC_V2
> +};
> +
> +static DECLARE_LIST_INIT (confchg_notify);
> +
> +struct corosync_service_engine *pload_get_service_engine_ver0 (void)
> +{
> +	return (&pload_service_engine);
> +}
> +
> +static int pload_exec_init_fn (
> +	struct corosync_api_v1 *corosync_api)
> +{
> +#ifdef COROSYNC_SOLARIS
> +	logsys_subsys_init();
> +#endif
> +	api = corosync_api;
> +
> +	return 0;
> +}
> +
> +static void pload_confchg_fn (
> +	enum totem_configuration_type configuration_type,
> +	const unsigned int *member_list, size_t member_list_entries,
> +	const unsigned int *left_list, size_t left_list_entries,
> +	const unsigned int *joined_list, size_t joined_list_entries,
> +	const struct memb_ring_id *ring_id)
> +{
> +}
> +
> +static int pload_lib_init_fn (void *conn)
> +{
> +	return (0);
> +}
> +
> +static int pload_lib_exit_fn (void *conn)
> +{
> +	return (0);
> +}
> +
> +static void message_handler_req_pload_start (void *conn, const void *msg)
> +{
> +	const struct req_lib_pload_start *req_lib_pload_start = msg;
> +	struct req_exec_pload_start req_exec_pload_start;
> +	struct iovec iov;
> +
> +	req_exec_pload_start.header.id =
> +		SERVICE_ID_MAKE (PLOAD_SERVICE, MESSAGE_REQ_EXEC_PLOAD_START);
> +	req_exec_pload_start.msg_code = req_lib_pload_start->msg_code;
> +	req_exec_pload_start.msg_size = req_lib_pload_start->msg_size;
> +	req_exec_pload_start.msg_count = req_lib_pload_start->msg_count;
> +	req_exec_pload_start.time_interval = req_lib_pload_start->time_interval;
> +	iov.iov_base = (void *)&req_exec_pload_start;
> +	iov.iov_len = sizeof (struct req_exec_pload_start);
> +
> +	msgs_delivered = 0;
> +
> +	msgs_wanted = 0;
> +
> +	msgs_sent = 0;
> +
> +	api->totem_mcast (&iov, 1, TOTEM_AGREED);
> +}
> +
> +static void req_exec_pload_start_endian_convert (void *msg)
> +{
> +}
> +
> +static void req_exec_pload_mcast_endian_convert (void *msg)
> +{
> +}
> +
> +static int send_message (const void *arg)
> +{
> +	struct req_exec_pload_mcast req_exec_pload_mcast;
> +	struct iovec iov[2];
> +	unsigned int res;
> +	unsigned int iov_len = 1;
> +
> +	req_exec_pload_mcast.header.id =
> +		SERVICE_ID_MAKE (PLOAD_SERVICE, MESSAGE_REQ_EXEC_PLOAD_MCAST);
> +	req_exec_pload_mcast.header.size = sizeof (struct req_exec_pload_mcast) + msg_size;
> +
> +	iov[0].iov_base = (void *)&req_exec_pload_mcast;
> +	iov[0].iov_len = sizeof (struct req_exec_pload_mcast);
> +	if (msg_size > sizeof (req_exec_pload_mcast)) {
> +		iov[1].iov_base = buffer;
> +		iov[1].iov_len = msg_size - sizeof (req_exec_pload_mcast);
> +		iov_len = 2;
> +	}
> +
> +	do {
> +		res = api->totem_mcast (iov, iov_len, TOTEM_AGREED);
> +		if (res == -1) {
> +			break;
> +		} else {
> +			msgs_sent++;
> +			msg_code++;
> +		}
> +	} while (msgs_sent < msgs_wanted);
> +	if (msgs_sent == msgs_wanted) {
> +		return (0);
> +	} else {
> +		return (-1);
> +	}
> +}
> +
> +hdb_handle_t start_mcasting_handle;
> +
> +static void start_mcasting (void)
> +{
> +	api->schedwrk_create (
> +		&start_mcasting_handle,
> +		send_message,
> +		&start_mcasting_handle);
> +}
> +
> +static void message_handler_req_exec_pload_start (
> +	const void *msg,
> +	unsigned int nodeid)
> +{
> +	const struct req_exec_pload_start *req_exec_pload_start = msg;
> +
> +	msgs_wanted = req_exec_pload_start->msg_count;
> +	msg_size = req_exec_pload_start->msg_size;
> +	msg_code = req_exec_pload_start->msg_code;
> +
> +	start_mcasting ();
> +}
> +#ifndef timersub
> +#define timersub(a, b, result)					\
> +do {								\
> +	(result)->tv_sec = (a)->tv_sec - (b)->tv_sec;		\
> +	(result)->tv_usec = (a)->tv_usec - (b)->tv_usec;	\
> +	if ((result)->tv_usec < 0) {				\
> +		--(result)->tv_sec;				\
> +		(result)->tv_usec += 1000000;			\
> +	}							\
> +} while (0)
> +#endif /* timersub */
> +
> +unsigned long long int tv1;
> +unsigned long long int tv2;
> +unsigned long long int tv_elapsed;
> +int last_msg_no = 0;
> +
> +static void message_handler_req_exec_pload_mcast (
> +	const void *msg,
> +	unsigned int nodeid)
> +{
> +	const struct req_exec_pload_mcast *pload_mcast = msg;
> +	char log_buffer[1024];
> +
> +	last_msg_no = pload_mcast->msg_code;
> +	if (msgs_delivered == 0) {
> +		tv1 = qb_util_nano_current_get ();
> +	}
> +	msgs_delivered += 1;
> +	if (msgs_delivered == msgs_wanted) {
> +		tv2 = qb_util_nano_current_get ();
> +		tv_elapsed = tv2 - tv1;
> +		sprintf (log_buffer, "%5d Writes %d bytes per write %7.3f seconds runtime, %9.3f TP/S, %9.3f MB/S.\n",
> +			msgs_delivered,
> +			msg_size,
> +			(tv_elapsed / 1000000000.0),
> +			((float)msgs_delivered) /  (tv_elapsed / 1000000000.0),
> +			(((float)msgs_delivered) * ((float)msg_size) /
> +				(tv_elapsed / 1000000000.0)) / (1024.0 * 1024.0));
> +		log_printf (LOGSYS_LEVEL_NOTICE, "%s", log_buffer);
> +	}
> +}
> diff --git a/exec/quorum.c b/exec/quorum.c
> index 1e76827..a98ef7a 100644
> --- a/exec/quorum.c
> +++ b/exec/quorum.c
> @@ -54,7 +54,6 @@
>  #include <corosync/swab.h>
>  #include <corosync/totem/totempg.h>
>  #include <corosync/totem/totem.h>
> -#include <corosync/lcr/lcr_ifact.h>
>  #include <corosync/logsys.h>
>  
>  #include "quorum.h"
> diff --git a/exec/service.c b/exec/service.c
> index d9bc1b9..8f701eb 100644
> --- a/exec/service.c
> +++ b/exec/service.c
> @@ -62,44 +62,55 @@ LOGSYS_DECLARE_SUBSYS ("SERV");
>  struct default_service {
>  	const char *name;
>  	int ver;
> +	struct corosync_service_engine *(*loader)(void);
>  };
>  
>  static struct default_service default_services[] = {
>  	{
> -		.name			 = "corosync_evs",
> -		.ver			 = 0,
> +		.name		= "corosync_evs",
> +		.ver		= 0,
> +		.loader		= evs_get_service_engine_ver0
>  	},
>  	{
> -		.name			 = "corosync_cfg",
> -		.ver			 = 0,
> +		.name		= "corosync_cfg",
> +		.ver		= 0,
> +		.loader		= cfg_get_service_engine_ver0
>  	},
>  	{
> -		.name			 = "corosync_cpg",
> -		.ver			 = 0,
> +		.name		= "corosync_cpg",
> +		.ver		= 0,
> +		.loader		= cpg_get_service_engine_ver0
>  	},
>  	{
> -		.name			 = "corosync_pload",
> -		.ver			 = 0,
> +		.name		= "corosync_pload",
> +		.ver		= 0,
> +		.loader		= pload_get_service_engine_ver0
>  	},
>  #ifdef HAVE_MONITORING
>  	{
> -		.name			 = "corosync_mon",
> -		.ver			 = 0,
> +		.name		= "corosync_mon",
> +		.ver		= 0,
> +		.loader		= mon_get_service_engine_ver0
>  	},
>  #endif
>  #ifdef HAVE_WATCHDOG
>  	{
> -		.name			 = "corosync_wd",
> -		.ver			 = 0,
> +		.name		= "corosync_wd",
> +		.ver		= 0,
> +		.loader		= wd_get_service_engine_ver0
>  	},
>  #endif
> +#ifdef HAVE_VOTEQUORUM
>  	{
> -		.name			 = "corosync_quorum",
> -		.ver			 = 0,
> +		.name		= "corosync_quorum",
> +		.ver		= 0,
> +		.loader		= votequorum_get_service_engine_ver0
>  	},
> +#endif
>  	{
> -		.name			 = "corosync_cmap",
> -		.ver			 = 0,
> +		.name		= "corosync_cmap",
> +		.ver		= 0,
> +		.loader		= cmap_get_service_engine_ver0
>  	},
>  };
>  
> @@ -121,34 +132,12 @@ int corosync_service_exiting[SERVICE_HANDLER_MAXIMUM_COUNT];
>  
>  static void (*service_unlink_all_complete) (void) = NULL;
>  
> -static unsigned int default_services_requested (struct corosync_api_v1 *corosync_api)
> -{
> -	char *value = NULL;
> -	int res;
> -
> -	/*
> -	 * Don't link default services if they have been disabled
> -	 */
> -	if (icmap_get_string("aisexec.defaultservices", &value) == CS_OK &&
> -			value != NULL && strcmp(value, "no") == 0) {
> -		res = 0;
> -	} else {
> -		res = -1;
> -	}
> -
> -	free(value);
> -	return (res);
> -}
> -
>  unsigned int corosync_service_link_and_init (
>  	struct corosync_api_v1 *corosync_api,
> -	const char *service_name,
> -	unsigned int service_ver)
> +	struct default_service *service)
>  {
> -	struct corosync_service_engine_iface_ver0 *iface_ver0;
> -	void *iface_ver0_p;
>  	hdb_handle_t handle;
> -	struct corosync_service_engine *service;
> +	struct corosync_service_engine *service_engine;
>  	int res;
>  	int fn;
>  	char *name_sufix;
> @@ -157,80 +146,63 @@ unsigned int corosync_service_link_and_init (
>  	char key_name[ICMAP_KEYNAME_MAXLEN];
>  
>  	/*
> -	 * reference the service interface
> -	 */
> -	iface_ver0_p = NULL;
> -	res = lcr_ifact_reference (
> -		&handle,
> -		service_name,
> -		service_ver,
> -		&iface_ver0_p,
> -		(void *)0);
> -
> -	iface_ver0 = (struct corosync_service_engine_iface_ver0 *)iface_ver0_p;
> -
> -	if (res == -1 || iface_ver0 == 0) {
> -		log_printf(LOGSYS_LEVEL_ERROR, "Service failed to load '%s'.\n", service_name);
> -		return (-1);
> -	}
> -
> -
> -	/*
>  	 * Initialize service
>  	 */
> -	service = iface_ver0->corosync_get_service_engine_ver0();
> +	service_engine = service->loader();
>  
> -	corosync_service[service->id] = service;
> +	corosync_service[service_engine->id] = service_engine;
>  
>  	/*
>  	 * Register the log sites with libqb
>  	 */
> +/* SDAKE
>  	_start = lcr_ifact_addr_get(handle, "__start___verbose");
>  	_stop = lcr_ifact_addr_get(handle, "__stop___verbose");
>  	qb_log_callsites_register(_start, _stop);
> +*/
>  
> -	if (service->config_init_fn) {
> -		res = service->config_init_fn (corosync_api);
> +	if (service_engine->config_init_fn) {
> +		res = service_engine->config_init_fn (corosync_api);
>  	}
>  
> -	if (service->exec_init_fn) {
> -		res = service->exec_init_fn (corosync_api);
> +	if (service_engine->exec_init_fn) {
> +		res = service_engine->exec_init_fn (corosync_api);
>  	}
>  
>  	/*
>  	 * Store service in cmap db
>  	 */
> -	snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "internal_configuration.service.%u.name", service->id);
> -	icmap_set_string(key_name, service_name);
> +	snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "internal_configuration.service.%u.name", service_engine->id);
> +	icmap_set_string(key_name, service->name);
>  
> -	snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "internal_configuration.service.%u.ver", service->id);
> -	icmap_set_uint32(key_name, service_ver);
> +	snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "internal_configuration.service.%u.ver", service_engine->id);
> +	icmap_set_uint32(key_name, service->ver);
>  
> -	snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "internal_configuration.service.%u.handle", service->id);
> +	snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "internal_configuration.service.%u.handle", service_engine->id);
>  	icmap_set_uint64(key_name, handle);
>  
> -	name_sufix = strrchr (service_name, '_');
> +	name_sufix = strrchr (service->name, '_');
>  	if (name_sufix)
>  		name_sufix++;
>  	else
> -		name_sufix = (char*)service_name;
> +		name_sufix = (char*)service->name;
>  
>  	snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "runtime.services.%s.service_id", name_sufix);
> -	icmap_set_uint16(key_name, service->id);
> +	icmap_set_uint16(key_name, service_engine->id);
>  
> -	for (fn = 0; fn < service->exec_engine_count; fn++) {
> +	for (fn = 0; fn < service_engine->exec_engine_count; fn++) {
>  		snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "runtime.services.%s.%d.tx", name_sufix, fn);
>  		icmap_set_uint64(key_name, 0);
> -		service_stats_tx[service->id][fn] = strdup(key_name);
> +		service_stats_tx[service_engine->id][fn] = strdup(key_name);
>  
>  		snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "runtime.services.%s.%d.rx", name_sufix, fn);
>  		icmap_set_uint64(key_name, 0);
> -		service_stats_rx[service->id][fn] = strdup(key_name);
> +		service_stats_rx[service_engine->id][fn] = strdup(key_name);
>  	}
>  
>  	log_printf (LOGSYS_LEVEL_NOTICE,
> -		"Service engine loaded: %s [%d]\n", service->name, service->id);
> -	cs_ipcs_service_init(service);
> +		"Service engine loaded: %s [%d]\n", service_engine->name, service_engine->id);
> +	cs_ipcs_service_init(service_engine);
>  	return (res);
>  }
>  
> @@ -313,7 +285,6 @@ static unsigned int service_unlink_and_exit (
>  	unsigned int service_ver)
>  {
>  	unsigned short service_id;
> -	hdb_handle_t found_service_handle;
>  	char *name_sufix;
>  	int res;
>  	const char *iter_key_name;
> @@ -376,10 +347,12 @@ static unsigned int service_unlink_and_exit (
>  
>  		cs_ipcs_service_destroy (service_id);
>  
> +#ifdef SDAKE
>  		snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "internal_configuration.service.%u.handle", service_id);
>  		if (icmap_get_uint64(key_name, &found_service_handle) == CS_OK) {
>  			lcr_ifact_release (found_service_handle);
>  		}
> +#endif
>  
>  		snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "internal_configuration.service.%u.handle", service_id);
>  		icmap_delete(key_name);
> @@ -399,57 +372,13 @@ unsigned int corosync_service_defaults_link_and_init (struct corosync_api_v1 *co
>  {
>  	unsigned int i;
>  
> -	icmap_iter_t iter;
> -	char *found_service_name;
> -	int res;
> -	unsigned int found_service_ver;
> -	const char *iter_key_name;
> -	unsigned int service_pos;
> -	char key_name[ICMAP_KEYNAME_MAXLEN];
> -
> -	icmap_set_ro_access("internal_configuration.", 1, 1);
> -	icmap_set_ro_access("runtime.services.", 1, 1);
> -
> -	found_service_name = NULL;
> -	iter = icmap_iter_init("service.");
> -	while ((iter_key_name = icmap_iter_next(iter, NULL, NULL)) != NULL) {
> -		res = sscanf(iter_key_name, "service.%u.%s", &service_pos, key_name);
> -		if (res != 2) {
> -			continue;
> -		}
> -		if (strcmp(key_name, "name") != 0) {
> -			continue;
> -		}
> -
> -		snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "service.%u.name", service_pos);
> -		free(found_service_name);
> -		if (icmap_get_string(key_name, &found_service_name) != CS_OK) {
> -			continue;
> -		}
> -
> -		snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "service.%u.ver", service_pos);
> -		if (icmap_get_uint32(key_name, &found_service_ver) != CS_OK) {
> -			continue;
> -		}
> -
> -		corosync_service_link_and_init (
> -			corosync_api,
> -			found_service_name,
> -			found_service_ver);
> -	}
> -	icmap_iter_finalize(iter);
> -
> - 	if (default_services_requested (corosync_api) == 0) {
> - 		return (0);
> - 	}
> -
>  	for (i = 0;
>  		i < sizeof (default_services) / sizeof (struct default_service); i++) {
>  
> +		default_services[i].loader();
>  		corosync_service_link_and_init (
>  			corosync_api,
> -			default_services[i].name,
> -			default_services[i].ver);
> +			&default_services[i]);
>  	}
>  
>  	return (0);
> @@ -477,7 +406,9 @@ static void service_unlink_schedwrk_handler (void *data) {
>  
>  	corosync_service[cb_data->service_engine] = NULL;
>  
> +#ifdef SDAKE
>  	lcr_ifact_release (cb_data->service_handle);
> +#endif
>  
>  	qb_loop_job_add(cs_poll_handle_get(),
>  		QB_LOOP_HIGH,
> diff --git a/exec/service.h b/exec/service.h
> index 4fd0b50..33d6053 100644
> --- a/exec/service.h
> +++ b/exec/service.h
> @@ -39,13 +39,14 @@
>  
>  struct corosync_api_v1;
>  
> +struct default_service;
> +
>  /**
>   * Link and initialize a service
>   */
> -extern unsigned int corosync_service_link_and_init (
> +unsigned int corosync_service_link_and_init (
>  	struct corosync_api_v1 *objdb,
> -	const char *service_name,
> -	unsigned int service_ver);
> +	struct default_service *service_engine);
>  
>  /**
>   * Unlink and exit a service
> @@ -75,4 +76,13 @@ extern int corosync_service_exiting[];
>  extern const char *service_stats_rx[SERVICE_HANDLER_MAXIMUM_COUNT][64];
>  extern const char *service_stats_tx[SERVICE_HANDLER_MAXIMUM_COUNT][64];
>  
> +struct corosync_service_engine *votequorum_get_service_engine_ver0 (void);
> +struct corosync_service_engine *pload_get_service_engine_ver0 (void);
> +struct corosync_service_engine *cfg_get_service_engine_ver0 (void);
> +struct corosync_service_engine *evs_get_service_engine_ver0 (void);
> +struct corosync_service_engine *cpg_get_service_engine_ver0 (void);
> +struct corosync_service_engine *mon_get_service_engine_ver0 (void);
> +struct corosync_service_engine *wd_get_service_engine_ver0 (void);
> +struct corosync_service_engine *cmap_get_service_engine_ver0 (void);
> +
>  #endif /* SERVICE_H_DEFINED */
> diff --git a/exec/testquorum.c b/exec/testquorum.c
> new file mode 100644
> index 0000000..e69de29
> diff --git a/exec/votequorum.c b/exec/votequorum.c
> new file mode 100644
> index 0000000..6e7d852
> --- /dev/null
> +++ b/exec/votequorum.c
> @@ -0,0 +1,1580 @@
> +/*
> + * Copyright (c) 2009-2011 Red Hat, Inc.
> + *
> + * All rights reserved.
> + *
> + * Authors: Christine Caulfield (ccaulfie@xxxxxxxxxx)
> + *          Fabio M. Di Nitto   (fdinitto@xxxxxxxxxx)
> + *
> + * This software licensed under BSD license, the text of which follows:
> + *
> + * Redistribution and use in source and binary forms, with or without
> + * modification, are permitted provided that the following conditions are met:
> + *
> + * - Redistributions of source code must retain the above copyright notice,
> + *   this list of conditions and the following disclaimer.
> + * - Redistributions in binary form must reproduce the above copyright notice,
> + *   this list of conditions and the following disclaimer in the documentation
> + *   and/or other materials provided with the distribution.
> + * - Neither the name of the MontaVista Software, Inc. nor the names of its
> + *   contributors may be used to endorse or promote products derived from this
> + *   software without specific prior written permission.
> + *
> + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTIBUTORS "AS IS"
> + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
> + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
> + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
> + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
> + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
> + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
> + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
> + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
> + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
> + * THE POSSIBILITY OF SUCH DAMAGE.
> + */
> +
> +#include <config.h>
> +
> +#include <sys/types.h>
> +#ifdef HAVE_ALLOCA_H
> +#include <alloca.h>
> +#endif
> +#include <sys/types.h>
> +#include <sys/socket.h>
> +#include <sys/un.h>
> +#include <sys/time.h>
> +#include <sys/ioctl.h>
> +#include <netinet/in.h>
> +#include <sys/uio.h>
> +#include <unistd.h>
> +#include <fcntl.h>
> +#include <stdlib.h>
> +#include <stdio.h>
> +#include <errno.h>
> +#include <time.h>
> +#include <unistd.h>
> +#include <netinet/in.h>
> +#include <arpa/inet.h>
> +
> +#include <qb/qbipc_common.h>
> +#include <qb/qbdefs.h>
> +#include <qb/qbutil.h>
> +
> +#include <corosync/corotypes.h>
> +#include <corosync/corodefs.h>
> +#include <corosync/cfg.h>
> +#include <corosync/list.h>
> +#include <corosync/logsys.h>
> +#include <corosync/mar_gen.h>
> +#include <corosync/coroapi.h>
> +#include <corosync/engine/quorum.h>
> +#include <corosync/icmap.h>
> +#include <corosync/ipc_votequorum.h>
> +
> +#define VOTEQUORUM_MAJOR_VERSION 7
> +#define VOTEQUORUM_MINOR_VERSION 0
> +#define VOTEQUORUM_PATCH_VERSION 0
> +
> +/*
> + * Silly default to prevent accidents!
> + */
> +#define DEFAULT_EXPECTED   1024
> +#define DEFAULT_QDEV_POLL 10000
> +#define DEFAULT_LEAVE_TMO 10000
> +#define DEFAULT_LMS_WIN   10000
> +
> +LOGSYS_DECLARE_SUBSYS ("VOTEQ");
> +
> +enum quorum_message_req_types {
> +	MESSAGE_REQ_EXEC_VOTEQUORUM_NODEINFO  = 0,
> +	MESSAGE_REQ_EXEC_VOTEQUORUM_RECONFIGURE = 1,
> +};
> +
> +#define NODE_FLAGS_BEENDOWN         1
> +#define NODE_FLAGS_QDISK            8
> +#define NODE_FLAGS_REMOVED         16
> +#define NODE_FLAGS_US              32
> +
> +#define NODEID_US 0
> +#define NODEID_QDEVICE -1
> +
> +typedef enum {
> +	NODESTATE_JOINING=1,
> +	NODESTATE_MEMBER,
> +	NODESTATE_DEAD,
> +	NODESTATE_LEAVING
> +} nodestate_t;
> +
> +struct cluster_node {
> +	int flags;
> +	int node_id;
> +	unsigned int expected_votes;
> +	unsigned int votes;
> +	time_t join_time;
> +	nodestate_t state;
> +	unsigned long long int last_hello; /* Only used for quorum devices */
> +	struct list_head list;
> +};
> +
> +static int quorum;
> +static int cluster_is_quorate;
> +static int first_trans = 1;
> +static unsigned int quorumdev_poll = DEFAULT_QDEV_POLL;
> +
> +static uint8_t two_node = 0;
> +static uint8_t wait_for_all = 0;
> +static uint8_t wait_for_all_status = 0;
> +static uint8_t auto_tie_breaker = 0;
> +static int lowest_node_id = -1;
> +static uint8_t last_man_standing = 0;
> +static uint32_t last_man_standing_window = DEFAULT_LMS_WIN;
> +static int last_man_standing_timer_set = 0;
> +static corosync_timer_handle_t last_man_standing_timer;
> +
> +static struct cluster_node *us;
> +static struct cluster_node *quorum_device = NULL;
> +static char quorum_device_name[VOTEQUORUM_MAX_QDISK_NAME_LEN];
> +static corosync_timer_handle_t quorum_device_timer;
> +static struct list_head cluster_members_list;
> +static struct corosync_api_v1 *corosync_api;
> +static struct list_head trackers_list;
> +static unsigned int quorum_members[PROCESSOR_COUNT_MAX+1];
> +static int quorum_members_entries = 0;
> +static struct memb_ring_id quorum_ringid;
> +
> +#define max(a,b) (((a) > (b)) ? (a) : (b))
> +static struct cluster_node *find_node_by_nodeid(int nodeid);
> +static struct cluster_node *allocate_node(int nodeid);
> +
> +#define list_iterate(v, head) \
> +	for (v = (head)->next; v != head; v = v->next)
> +
> +struct quorum_pd {
> +	unsigned char track_flags;
> +	int tracking_enabled;
> +	uint64_t tracking_context;
> +	struct list_head list;
> +	void *conn;
> +};
> +
> +/*
> + * Service Interfaces required by service_message_handler struct
> + */
> +
> +static void votequorum_init(struct corosync_api_v1 *api,
> +			    quorum_set_quorate_fn_t report);
> +
> +static void quorum_confchg_fn (
> +	enum totem_configuration_type configuration_type,
> +	const unsigned int *member_list, size_t member_list_entries,
> +	const unsigned int *left_list, size_t left_list_entries,
> +	const unsigned int *joined_list, size_t joined_list_entries,
> +	const struct memb_ring_id *ring_id);
> +
> +static int votequorum_exec_init_fn (struct corosync_api_v1 *api);
> +
> +static int quorum_lib_init_fn (void *conn);
> +
> +static int quorum_lib_exit_fn (void *conn);
> +
> +static void message_handler_req_exec_votequorum_nodeinfo (
> +	const void *message,
> +	unsigned int nodeid);
> +
> +static void message_handler_req_exec_votequorum_reconfigure (
> +	const void *message,
> +	unsigned int nodeid);
> +
> +static void message_handler_req_lib_votequorum_getinfo (void *conn,
> +							const void *message);
> +
> +static void message_handler_req_lib_votequorum_setexpected (void *conn,
> +							    const void *message);
> +
> +static void message_handler_req_lib_votequorum_setvotes (void *conn,
> +							 const void *message);
> +
> +static void message_handler_req_lib_votequorum_qdisk_register (void *conn,
> +							       const void *message);
> +
> +static void message_handler_req_lib_votequorum_qdisk_unregister (void *conn,
> +								 const void *message);
> +
> +static void message_handler_req_lib_votequorum_qdisk_poll (void *conn,
> +							   const void *message);
> +
> +static void message_handler_req_lib_votequorum_qdisk_getinfo (void *conn,
> +							      const void *message);
> +
> +static void message_handler_req_lib_votequorum_trackstart (void *conn,
> +							   const void *message);
> +static void message_handler_req_lib_votequorum_trackstop (void *conn,
> +							  const void *message);
> +
> +static int quorum_exec_send_nodeinfo(void);
> +static int quorum_exec_send_reconfigure(int param, int nodeid, int value);
> +
> +static void exec_votequorum_nodeinfo_endian_convert (void *message);
> +static void exec_votequorum_reconfigure_endian_convert (void *message);
> +
> +static void add_votequorum_config_notification(void);
> +
> +static void recalculate_quorum(int allow_decrease, int by_current_nodes);
> +
> +/*
> + * Library Handler Definition
> + */
> +static struct corosync_lib_handler quorum_lib_service[] =
> +{
> +	{ /* 0 */
> +		.lib_handler_fn		= message_handler_req_lib_votequorum_getinfo,
> +		.flow_control		= COROSYNC_LIB_FLOW_CONTROL_NOT_REQUIRED
> +	},
> +	{ /* 1 */
> +		.lib_handler_fn		= message_handler_req_lib_votequorum_setexpected,
> +		.flow_control		= COROSYNC_LIB_FLOW_CONTROL_NOT_REQUIRED
> +	},
> +	{ /* 2 */
> +		.lib_handler_fn		= message_handler_req_lib_votequorum_setvotes,
> +		.flow_control		= COROSYNC_LIB_FLOW_CONTROL_NOT_REQUIRED
> +	},
> +	{ /* 3 */
> +		.lib_handler_fn		= message_handler_req_lib_votequorum_qdisk_register,
> +		.flow_control		= COROSYNC_LIB_FLOW_CONTROL_NOT_REQUIRED
> +	},
> +	{ /* 4 */
> +		.lib_handler_fn		= message_handler_req_lib_votequorum_qdisk_unregister,
> +		.flow_control		= COROSYNC_LIB_FLOW_CONTROL_NOT_REQUIRED
> +	},
> +	{ /* 5 */
> +		.lib_handler_fn		= message_handler_req_lib_votequorum_qdisk_poll,
> +		.flow_control		= COROSYNC_LIB_FLOW_CONTROL_NOT_REQUIRED
> +	},
> +	{ /* 6 */
> +		.lib_handler_fn		= message_handler_req_lib_votequorum_qdisk_getinfo,
> +		.flow_control		= COROSYNC_LIB_FLOW_CONTROL_NOT_REQUIRED
> +	},
> +	{ /* 7 */
> +		.lib_handler_fn		= message_handler_req_lib_votequorum_trackstart,
> +		.flow_control		= COROSYNC_LIB_FLOW_CONTROL_NOT_REQUIRED
> +	},
> +	{ /* 8 */
> +		.lib_handler_fn		= message_handler_req_lib_votequorum_trackstop,
> +		.flow_control		= COROSYNC_LIB_FLOW_CONTROL_NOT_REQUIRED
> +	}
> +};
> +
> +static struct corosync_exec_handler votequorum_exec_engine[] =
> +{
> +	{ /* 0 */
> +		.exec_handler_fn	= message_handler_req_exec_votequorum_nodeinfo,
> +		.exec_endian_convert_fn	= exec_votequorum_nodeinfo_endian_convert
> +	},
> +	{ /* 1 */
> +		.exec_handler_fn	= message_handler_req_exec_votequorum_reconfigure,
> +		.exec_endian_convert_fn	= exec_votequorum_reconfigure_endian_convert
> +	},
> +};
> +
> +static quorum_set_quorate_fn_t set_quorum;
> +
> +static struct corosync_service_engine quorum_service_handler = {
> +	.name					= "corosync votes quorum service v0.91",
> +	.id					= VOTEQUORUM_SERVICE,
> +	.private_data_size			= sizeof (struct quorum_pd),
> +	.allow_inquorate			= CS_LIB_ALLOW_INQUORATE,
> +	.flow_control				= COROSYNC_LIB_FLOW_CONTROL_REQUIRED,
> +	.lib_init_fn				= quorum_lib_init_fn,
> +	.lib_exit_fn				= quorum_lib_exit_fn,
> +	.lib_engine				= quorum_lib_service,
> +	.lib_engine_count			= sizeof (quorum_lib_service) / sizeof (struct corosync_lib_handler),
> +	.exec_init_fn				= votequorum_exec_init_fn,
> +	.exec_engine				= votequorum_exec_engine,
> +	.exec_engine_count			= sizeof (votequorum_exec_engine) / sizeof (struct corosync_exec_handler),
> +	.confchg_fn				= quorum_confchg_fn,
> +	.sync_mode				= CS_SYNC_V1
> +};
> +
> +struct corosync_service_engine *votequorum_get_service_engine_ver0 (void)
> +{
> +	return (&quorum_service_handler);
> +}
> +
> +static void votequorum_init(struct corosync_api_v1 *api,
> +			    quorum_set_quorate_fn_t report)
> +{
> +	ENTER();
> +
> +	set_quorum = report;
> +
> +	icmap_get_uint8("quorum.wait_for_all", &wait_for_all);
> +	icmap_get_uint8("quorum.auto_tie_breaker", &auto_tie_breaker);
> +	icmap_get_uint8("quorum.last_man_standing", &last_man_standing);
> +	icmap_get_uint32("quorum.last_man_standing_window", &last_man_standing_window);
> +
> +	/*
> +	 * TODO: we need to know the lowest node-id in the cluster
> +	 * current lack of node list with node-id's requires us to see all nodes
> +	 * to determine which is the lowest.
> +	 */
> +	if (auto_tie_breaker) {
> +		wait_for_all = 1;
> +	}
> +
> +	if (wait_for_all) {
> +		wait_for_all_status = 1;
> +	}
> +
> +	/* Load the library-servicing part of this module */
> +	api->service_link_and_init(api, "corosync_votequorum_iface", 0);
> +
> +	LEAVE();
> +}
> +
> +struct req_exec_quorum_nodeinfo {
> +	struct qb_ipc_request_header header __attribute__((aligned(8)));
> +	unsigned int first_trans;
> +	unsigned int votes;
> +	unsigned int expected_votes;
> +	unsigned int major_version;	/* Not backwards compatible */
> +	unsigned int minor_version;	/* Backwards compatible */
> +	unsigned int patch_version;	/* Backwards/forwards compatible */
> +	unsigned int config_version;
> +	unsigned int flags;
> +	unsigned int wait_for_all_status;
> +	unsigned int quorate;
> +} __attribute__((packed));
> +
> +/*
> + * Parameters for RECONFIG command
> + */
> +#define RECONFIG_PARAM_EXPECTED_VOTES 1
> +#define RECONFIG_PARAM_NODE_VOTES     2
> +
> +struct req_exec_quorum_reconfigure {
> +	struct qb_ipc_request_header header __attribute__((aligned(8)));
> +	unsigned int param;
> +	unsigned int nodeid;
> +	unsigned int value;
> +};
> +
> +static void read_quorum_config(void)
> +{
> +	int cluster_members = 0;
> +	struct list_head *tmp;
> +
> +	ENTER();
> +
> +	log_printf(LOGSYS_LEVEL_DEBUG, "Reading configuration\n");
> +
> +	if (icmap_get_uint32("quorum.expected_votes", &us->expected_votes) != CS_OK) {
> +		us->expected_votes = DEFAULT_EXPECTED;
> +	}
> +
> +	if (icmap_get_uint32("quorum.votes", &us->votes) != CS_OK) {
> +		us->votes = 1;
> +	}
> +
> +	if (icmap_get_uint32("quorum.quorumdev_poll", &quorumdev_poll) != CS_OK) {
> +		quorumdev_poll = DEFAULT_QDEV_POLL;
> +	}
> +
> +	icmap_get_uint8("quorum.two_node", &two_node);
> +
> +	/*
> +	 * two_node mode is invalid if there are more than 2 nodes in the cluster!
> +	 */
> +	list_iterate(tmp, &cluster_members_list) {
> +		cluster_members++;
> +        }
> +
> +	if (two_node && cluster_members > 2) {
> +		log_printf(LOGSYS_LEVEL_WARNING, "quorum.two_node was set but there are more than 2 nodes in the cluster. It will be ignored.\n");
> +		two_node = 0;
> +	}
> +
> +	LEAVE();
> +}
> +
> +static int votequorum_exec_init_fn (struct corosync_api_v1 *api)
> +{
> +#ifdef COROSYNC_SOLARIS
> +	logsys_subsys_init();
> +#endif
> +
> +	ENTER();
> +
> +	corosync_api = api;
> +
> +	list_init(&cluster_members_list);
> +	list_init(&trackers_list);
> +
> +	/*
> +	 * Allocate a cluster_node for us
> +	 */
> +	us = allocate_node(corosync_api->totem_nodeid_get());
> +	if (!us) {
> +		LEAVE();
> +		return (1);
> +	}
> +
> +	us->flags |= NODE_FLAGS_US;
> +	us->state = NODESTATE_MEMBER;
> +	us->expected_votes = DEFAULT_EXPECTED;
> +	us->votes = 1;
> +	time(&us->join_time);
> +
> +	read_quorum_config();
> +	recalculate_quorum(0, 0);
> +
> +	/*
> +	 * Listen for changes
> +	 */
> +	add_votequorum_config_notification();
> +
> +	/*
> +	 * Start us off with one node
> +	 */
> +	quorum_exec_send_nodeinfo();
> +
> +	LEAVE();
> +
> +	return (0);
> +}
> +
> +static int quorum_lib_exit_fn (void *conn)
> +{
> +	struct quorum_pd *quorum_pd = (struct quorum_pd *)corosync_api->ipc_private_data_get (conn);
> +
> +	ENTER();
> +
> +	if (quorum_pd->tracking_enabled) {
> +		list_del (&quorum_pd->list);
> +		list_init (&quorum_pd->list);
> +	}
> +
> +	LEAVE();
> +
> +	return (0);
> +}
> +
> +
> +static int send_quorum_notification(void *conn, uint64_t context)
> +{
> +	struct res_lib_votequorum_notification *res_lib_votequorum_notification;
> +	struct list_head *tmp;
> +	struct cluster_node *node;
> +	int cluster_members = 0;
> +	int i = 0;
> +	int size;
> +	char *buf;
> +
> +	ENTER();
> +
> +	list_iterate(tmp, &cluster_members_list) {
> +		node = list_entry(tmp, struct cluster_node, list);
> +		cluster_members++;
> +        }
> +	if (quorum_device) {
> +		cluster_members++;
> +	}
> +
> +	size = sizeof(struct res_lib_votequorum_notification) + sizeof(struct votequorum_node) * cluster_members;
> +	buf = alloca(size);
> +	if (!buf) {
> +		LEAVE();
> +		return -1;
> +	}
> +
> +	res_lib_votequorum_notification = (struct res_lib_votequorum_notification *)buf;
> +	res_lib_votequorum_notification->quorate = cluster_is_quorate;
> +	res_lib_votequorum_notification->node_list_entries = cluster_members;
> +	res_lib_votequorum_notification->context = context;
> +	list_iterate(tmp, &cluster_members_list) {
> +		node = list_entry(tmp, struct cluster_node, list);
> +		res_lib_votequorum_notification->node_list[i].nodeid = node->node_id;
> +		res_lib_votequorum_notification->node_list[i++].state = node->state;
> +        }
> +	if (quorum_device) {
> +		res_lib_votequorum_notification->node_list[i].nodeid = 0;
> +		res_lib_votequorum_notification->node_list[i++].state = quorum_device->state | 0x80;
> +	}
> +	res_lib_votequorum_notification->header.id = MESSAGE_RES_VOTEQUORUM_NOTIFICATION;
> +	res_lib_votequorum_notification->header.size = size;
> +	res_lib_votequorum_notification->header.error = CS_OK;
> +
> +	/* Send it to all interested parties */
> +	if (conn) {
> +		int ret = corosync_api->ipc_dispatch_send(conn, buf, size);
> +		LEAVE();
> +		return ret;
> +	} else {
> +		struct quorum_pd *qpd;
> +
> +		list_iterate(tmp, &trackers_list) {
> +			qpd = list_entry(tmp, struct quorum_pd, list);
> +			res_lib_votequorum_notification->context = qpd->tracking_context;
> +			corosync_api->ipc_dispatch_send(qpd->conn, buf, size);
> +		}
> +	}
> +
> +	LEAVE();
> +
> +	return 0;
> +}
> +
> +static void send_expectedvotes_notification(void)
> +{
> +	struct res_lib_votequorum_expectedvotes_notification res_lib_votequorum_expectedvotes_notification;
> +	struct quorum_pd *qpd;
> +	struct list_head *tmp;
> +
> +	ENTER();
> +
> +	log_printf(LOGSYS_LEVEL_DEBUG, "Sending expected votes callback\n");
> +
> +	res_lib_votequorum_expectedvotes_notification.header.id = MESSAGE_RES_VOTEQUORUM_EXPECTEDVOTES_NOTIFICATION;
> +	res_lib_votequorum_expectedvotes_notification.header.size = sizeof(res_lib_votequorum_expectedvotes_notification);
> +	res_lib_votequorum_expectedvotes_notification.header.error = CS_OK;
> +	res_lib_votequorum_expectedvotes_notification.expected_votes = us->expected_votes;
> +
> +	list_iterate(tmp, &trackers_list) {
> +		qpd = list_entry(tmp, struct quorum_pd, list);
> +		res_lib_votequorum_expectedvotes_notification.context = qpd->tracking_context;
> +		corosync_api->ipc_dispatch_send(qpd->conn, &res_lib_votequorum_expectedvotes_notification,
> +						sizeof(struct res_lib_votequorum_expectedvotes_notification));
> +	}
> +
> +	LEAVE();
> +}
> +
> +static void get_lowest_node_id(void)
> +{
> +	struct cluster_node *node = NULL;
> +	struct list_head *tmp;
> +
> +	ENTER();
> +
> +	lowest_node_id = us->node_id;
> +
> +	list_iterate(tmp, &cluster_members_list) {
> +		node = list_entry(tmp, struct cluster_node, list);
> +		if (node->node_id < lowest_node_id) {
> +			lowest_node_id = node->node_id;
> +		}
> +	}
> +	log_printf(LOGSYS_LEVEL_DEBUG, "lowest node id: %d us: %d\n", lowest_node_id, us->node_id);
> +
> +	LEAVE();
> +}
> +
> +static int check_low_node_id_partition(void)
> +{
> +	struct cluster_node *node = NULL;
> +	struct list_head *tmp;
> +	int found = 0;
> +
> +	ENTER();
> +
> +	list_iterate(tmp, &cluster_members_list) {
> +		node = list_entry(tmp, struct cluster_node, list);
> +		if (node->state == NODESTATE_MEMBER) {
> +			if (node->node_id == lowest_node_id) {
> +				found = 1;
> +			}
> +		}
> +	}
> +
> +	LEAVE();
> +	return found;
> +}
> +
> +static void set_quorate(int total_votes)
> +{
> +	int quorate;
> +	int quorum_change = 0;
> +
> +	ENTER();
> +
> +	/*
> +	 * wait for all nodes to show up before granting quorum
> +	 */
> +
> +	if ((wait_for_all) && (wait_for_all_status)) {
> +		if (total_votes != us->expected_votes) {
> +			log_printf(LOGSYS_LEVEL_NOTICE,
> +				   "Waiting for all cluster members. "
> +				   "Current votes: %d expected_votes: %d\n",
> +				   total_votes, us->expected_votes);
> +			cluster_is_quorate = 0;
> +			return;
> +		}
> +		wait_for_all_status = 0;
> +		get_lowest_node_id();
> +	}
> +
> +	if (quorum > total_votes) {
> +		quorate = 0;
> +	} else {
> +		quorate = 1;
> +	}
> +
> +	if ((auto_tie_breaker) &&
> +	    (total_votes == (us->expected_votes / 2)) &&
> +	    (check_low_node_id_partition() == 1)) {
> +		quorate = 1;
> +	}
> +
> +	if (cluster_is_quorate && !quorate) {
> +		quorum_change = 1;
> +		log_printf(LOGSYS_LEVEL_DEBUG, "quorum lost, blocking activity\n");
> +	}
> +	if (!cluster_is_quorate && quorate) {
> +		quorum_change = 1;
> +		log_printf(LOGSYS_LEVEL_DEBUG, "quorum regained, resuming activity\n");
> +	}
> +
> +	cluster_is_quorate = quorate;
> +
> +	if (wait_for_all) {
> +		if (quorate) {
> +			wait_for_all_status = 0;
> +		} else {
> +			wait_for_all_status = 1;
> +		}
> +	}
> +
> +	if (quorum_change) {
> +		set_quorum(quorum_members, quorum_members_entries,
> +			   cluster_is_quorate, &quorum_ringid);
> +	}
> +
> +	LEAVE();
> +}
> +
> +static int calculate_quorum(int allow_decrease, int max_expected, unsigned int *ret_total_votes)
> +{
> +	struct list_head *nodelist;
> +	struct cluster_node *node;
> +	unsigned int total_votes = 0;
> +	unsigned int highest_expected = 0;
> +	unsigned int newquorum, q1, q2;
> +	unsigned int total_nodes = 0;
> +
> +	ENTER();
> +
> +	list_iterate(nodelist, &cluster_members_list) {
> +		node = list_entry(nodelist, struct cluster_node, list);
> +
> +		log_printf(LOGSYS_LEVEL_DEBUG, "node %x state=%d, votes=%d, expected=%d\n",
> +			   node->node_id, node->state, node->votes, node->expected_votes);
> +
> +		if (node->state == NODESTATE_MEMBER) {
> +			if (max_expected) {
> +				node->expected_votes = max_expected;
> +			} else {
> +				highest_expected = max(highest_expected, node->expected_votes);
> +			}
> +			total_votes += node->votes;
> +			total_nodes++;
> +		}
> +	}
> +
> +	if (quorum_device && quorum_device->state == NODESTATE_MEMBER) {
> +		total_votes += quorum_device->votes;
> +	}
> +
> +	if (max_expected > 0) {
> +		highest_expected = max_expected;
> +	}
> +
> +	/*
> +	 * This quorum calculation is taken from the OpenVMS Cluster Systems
> +	 * manual, but, then, you guessed that didn't you
> +	 */
> +	q1 = (highest_expected + 2) / 2;
> +	q2 = (total_votes + 2) / 2;
> +	newquorum = max(q1, q2);
> +
> +	/*
> +	 * Normally quorum never decreases but the system administrator can
> +	 * force it down by setting expected votes to a maximum value
> +	 */
> +	if (!allow_decrease) {
> +		newquorum = max(quorum, newquorum);
> +	}
> +
> +	/*
> +	 * The special two_node mode allows each of the two nodes to retain
> +	 * quorum if the other fails.  Only one of the two should live past
> +	 * fencing (as both nodes try to fence each other in split-brain.)
> +	 * Also: if there are more than two nodes, force us inquorate to avoid
> +	 * any damage or confusion.
> +	 */
> +	if (two_node && total_nodes <= 2) {
> +		newquorum = 1;
> +	}
> +
> +	if (ret_total_votes) {
> +		*ret_total_votes = total_votes;
> +	}
> +
> +	LEAVE();
> +	return newquorum;
> +}
> +
> +/* Recalculate cluster quorum, set quorate and notify changes */
> +static void recalculate_quorum(int allow_decrease, int by_current_nodes)
> +{
> +	unsigned int total_votes = 0;
> +	int cluster_members = 0;
> +	struct list_head *nodelist;
> +	struct cluster_node *node;
> +
> +	ENTER();
> +
> +	list_iterate(nodelist, &cluster_members_list) {
> +		node = list_entry(nodelist, struct cluster_node, list);
> +		if (node->state == NODESTATE_MEMBER) {
> +			if (by_current_nodes) {
> +				cluster_members++;
> +			}
> +			total_votes += node->votes;
> +		}
> +	}
> +
> +	/*
> +	 * Keep expected_votes at the highest number of votes in the cluster
> +	 */
> +	log_printf(LOGSYS_LEVEL_DEBUG, "total_votes=%d, expected_votes=%d\n", total_votes, us->expected_votes);
> +	if (total_votes > us->expected_votes) {
> +		us->expected_votes = total_votes;
> +		send_expectedvotes_notification();
> +	}
> +
> +	quorum = calculate_quorum(allow_decrease, cluster_members, &total_votes);
> +	set_quorate(total_votes);
> +
> +	send_quorum_notification(NULL, 0L);
> +
> +	LEAVE();
> +}
> +
> +static void node_add_ordered(struct cluster_node *newnode)
> +{
> +	struct cluster_node *node = NULL;
> +	struct list_head *tmp;
> +	struct list_head *newlist = &newnode->list;
> +
> +	ENTER();
> +
> +	list_iterate(tmp, &cluster_members_list) {
> +		node = list_entry(tmp, struct cluster_node, list);
> +		if (newnode->node_id < node->node_id) {
> +			break;
> +		}
> +	}
> +
> +	if (!node) {
> +		list_add(&newnode->list, &cluster_members_list);
> +	} else {
> +		newlist->prev = tmp->prev;
> +		newlist->next = tmp;
> +		tmp->prev->next = newlist;
> +		tmp->prev = newlist;
> +	}
> +
> +	LEAVE();
> +}
> +
> +static struct cluster_node *allocate_node(int nodeid)
> +{
> +	struct cluster_node *cl;
> +
> +	ENTER();
> +
> +	cl = malloc(sizeof(struct cluster_node));
> +	if (cl) {
> +		memset(cl, 0, sizeof(struct cluster_node));
> +		cl->node_id = nodeid;
> +		if (nodeid) {
> +			node_add_ordered(cl);
> +		}
> +	}
> +
> +	LEAVE();
> +
> +	return cl;
> +}
> +
> +static struct cluster_node *find_node_by_nodeid(int nodeid)
> +{
> +	struct cluster_node *node;
> +	struct list_head *tmp;
> +
> +	ENTER();
> +
> +	if (nodeid == NODEID_US) {
> +		LEAVE();
> +		return us;
> +	}
> +
> +	if (nodeid == NODEID_QDEVICE) {
> +		LEAVE();
> +		return quorum_device;
> +	}
> +
> +	list_iterate(tmp, &cluster_members_list) {
> +		node = list_entry(tmp, struct cluster_node, list);
> +		if (node->node_id == nodeid) {
> +			LEAVE();
> +			return node;
> +		}
> +	}
> +
> +	LEAVE();
> +	return NULL;
> +}
> +
> +
> +static int quorum_exec_send_nodeinfo()
> +{
> +	struct req_exec_quorum_nodeinfo req_exec_quorum_nodeinfo;
> +	struct iovec iov[1];
> +	int ret;
> +
> +	ENTER();
> +
> +	req_exec_quorum_nodeinfo.expected_votes = us->expected_votes;
> +	req_exec_quorum_nodeinfo.votes = us->votes;
> +	req_exec_quorum_nodeinfo.major_version = VOTEQUORUM_MAJOR_VERSION;
> +	req_exec_quorum_nodeinfo.minor_version = VOTEQUORUM_MINOR_VERSION;
> +	req_exec_quorum_nodeinfo.patch_version = VOTEQUORUM_PATCH_VERSION;
> +	req_exec_quorum_nodeinfo.flags = us->flags;
> +	req_exec_quorum_nodeinfo.first_trans = first_trans;
> +	req_exec_quorum_nodeinfo.wait_for_all_status = wait_for_all_status;
> +	req_exec_quorum_nodeinfo.quorate = cluster_is_quorate;
> +
> +	req_exec_quorum_nodeinfo.header.id = SERVICE_ID_MAKE(VOTEQUORUM_SERVICE, MESSAGE_REQ_EXEC_VOTEQUORUM_NODEINFO);
> +	req_exec_quorum_nodeinfo.header.size = sizeof(req_exec_quorum_nodeinfo);
> +
> +	iov[0].iov_base = (void *)&req_exec_quorum_nodeinfo;
> +	iov[0].iov_len = sizeof(req_exec_quorum_nodeinfo);
> +
> +	ret = corosync_api->totem_mcast (iov, 1, TOTEM_AGREED);
> +
> +	LEAVE();
> +	return ret;
> +}
> +
> +
> +static int quorum_exec_send_reconfigure(int param, int nodeid, int value)
> +{
> +	struct req_exec_quorum_reconfigure req_exec_quorum_reconfigure;
> +	struct iovec iov[1];
> +	int ret;
> +
> +	ENTER();
> +
> +	req_exec_quorum_reconfigure.param = param;
> +	req_exec_quorum_reconfigure.nodeid = nodeid;
> +	req_exec_quorum_reconfigure.value = value;
> +
> +	req_exec_quorum_reconfigure.header.id = SERVICE_ID_MAKE(VOTEQUORUM_SERVICE, MESSAGE_REQ_EXEC_VOTEQUORUM_RECONFIGURE);
> +	req_exec_quorum_reconfigure.header.size = sizeof(req_exec_quorum_reconfigure);
> +
> +	iov[0].iov_base = (void *)&req_exec_quorum_reconfigure;
> +	iov[0].iov_len = sizeof(req_exec_quorum_reconfigure);
> +
> +	ret = corosync_api->totem_mcast (iov, 1, TOTEM_AGREED);
> +
> +	LEAVE();
> +	return ret;
> +}
> +
> +static void lms_timer_fn(void *arg)
> +{
> +	ENTER();
> +
> +	last_man_standing_timer_set = 0;
> +	if (cluster_is_quorate) {
> +		recalculate_quorum(1,1);
> +	}
> +
> +	LEAVE();
> +}
> +
> +static void quorum_confchg_fn (
> +	enum totem_configuration_type configuration_type,
> +	const unsigned int *member_list, size_t member_list_entries,
> +	const unsigned int *left_list, size_t left_list_entries,
> +	const unsigned int *joined_list, size_t joined_list_entries,
> +	const struct memb_ring_id *ring_id)
> +{
> +	int i;
> +	int leaving = 0;
> +	struct cluster_node *node;
> +
> +	ENTER();
> +
> +	if (member_list_entries > 1) {
> +		first_trans = 0;
> +	}
> +
> +	if (left_list_entries) {
> +		for (i = 0; i< left_list_entries; i++) {
> +			node = find_node_by_nodeid(left_list[i]);
> +			if (node) {
> +				if (node->state == NODESTATE_LEAVING) {
> +					leaving = 1;
> +				}
> +				node->state = NODESTATE_DEAD;
> +				node->flags |= NODE_FLAGS_BEENDOWN;
> +			}
> +		}
> +	}
> +
> +	if (last_man_standing) {
> +		if (((member_list_entries >= quorum) && (left_list_entries)) ||
> +		    ((member_list_entries <= quorum) && (auto_tie_breaker) && (check_low_node_id_partition() == 1))) {
> +			if (last_man_standing_timer_set) {
> +				corosync_api->timer_delete(last_man_standing_timer);
> +				last_man_standing_timer_set = 0;
> +			}
> +			corosync_api->timer_add_duration((unsigned long long)last_man_standing_window*1000000, NULL, lms_timer_fn, &last_man_standing_timer);
> +			last_man_standing_timer_set = 1;
> +		}
> +	}
> +
> +	if (member_list_entries) {
> +		memcpy(quorum_members, member_list, sizeof(unsigned int) * member_list_entries);
> +		quorum_members_entries = member_list_entries;
> +		if (quorum_device) {
> +			quorum_members[quorum_members_entries++] = 0;
> +		}
> +		quorum_exec_send_nodeinfo();
> +	}
> +
> +	if (left_list_entries) {
> +		recalculate_quorum(leaving, leaving);
> +	}
> +
> +	memcpy(&quorum_ringid, ring_id, sizeof(*ring_id));
> +
> +	if (configuration_type == TOTEM_CONFIGURATION_REGULAR) {
> +		set_quorum(quorum_members, quorum_members_entries,
> +			   cluster_is_quorate, &quorum_ringid);
> +	}
> +
> +	LEAVE();
> +}
> +
> +static void exec_votequorum_nodeinfo_endian_convert (void *message)
> +{
> +	struct req_exec_quorum_nodeinfo *nodeinfo = message;
> +
> +	ENTER();
> +
> +	nodeinfo->votes = swab32(nodeinfo->votes);
> +	nodeinfo->expected_votes = swab32(nodeinfo->expected_votes);
> +	nodeinfo->major_version = swab32(nodeinfo->major_version);
> +	nodeinfo->minor_version = swab32(nodeinfo->minor_version);
> +	nodeinfo->patch_version = swab32(nodeinfo->patch_version);
> +	nodeinfo->config_version = swab32(nodeinfo->config_version);
> +	nodeinfo->flags = swab32(nodeinfo->flags);
> +	nodeinfo->wait_for_all_status = swab32(nodeinfo->wait_for_all_status);
> +	nodeinfo->quorate = swab32(nodeinfo->quorate);
> +
> +	LEAVE();
> +}
> +
> +static void exec_votequorum_reconfigure_endian_convert (void *message)
> +{
> +	struct req_exec_quorum_reconfigure *reconfigure = message;
> +
> +	ENTER();
> +
> +	reconfigure->nodeid = swab32(reconfigure->nodeid);
> +	reconfigure->value = swab32(reconfigure->value);
> +
> +	LEAVE();
> +}
> +
> +static void message_handler_req_exec_votequorum_nodeinfo (
> +	const void *message,
> +	unsigned int nodeid)
> +{
> +	const struct req_exec_quorum_nodeinfo *req_exec_quorum_nodeinfo = message;
> +	struct cluster_node *node;
> +	int old_votes;
> +	int old_expected;
> +	nodestate_t old_state;
> +	int new_node = 0;
> +
> +	ENTER();
> +
> +	log_printf(LOGSYS_LEVEL_DEBUG, "got nodeinfo message from cluster node %d\n", nodeid);
> +
> +	node = find_node_by_nodeid(nodeid);
> +	if (!node) {
> +		node = allocate_node(nodeid);
> +		new_node = 1;
> +	}
> +	if (!node) {
> +		corosync_api->error_memory_failure();
> +		LEAVE();
> +		return;
> +	}
> +
> +	old_votes = node->votes;
> +	old_expected = node->expected_votes;
> +	old_state = node->state;
> +
> +	/* Update node state */
> +	node->votes = req_exec_quorum_nodeinfo->votes;
> +	node->expected_votes = req_exec_quorum_nodeinfo->expected_votes;
> +	node->state = NODESTATE_MEMBER;
> +
> +	log_printf(LOGSYS_LEVEL_DEBUG, "nodeinfo message: votes: %d, expected: %d wfa: %d quorate: %d\n",
> +					req_exec_quorum_nodeinfo->votes,
> +					req_exec_quorum_nodeinfo->expected_votes,
> +					req_exec_quorum_nodeinfo->wait_for_all_status,
> +					req_exec_quorum_nodeinfo->quorate);
> +
> +	if ((last_man_standing) && (req_exec_quorum_nodeinfo->votes > 1)) {
> +		log_printf(LOGSYS_LEVEL_WARNING, "Last Man Standing feature is supported only when all"
> +						 "cluster nodes votes are set to 1. Disabling LMS.");
> +		last_man_standing = 0;
> +		if (last_man_standing_timer_set) {
> +			corosync_api->timer_delete(last_man_standing_timer);
> +			last_man_standing_timer_set = 0;
> +		}
> +	}
> +
> +	node->flags &= ~NODE_FLAGS_BEENDOWN;
> +
> +	if (new_node ||
> +	    req_exec_quorum_nodeinfo->first_trans || 
> +	    old_votes != node->votes ||
> +	    old_expected != node->expected_votes ||
> +	    old_state != node->state) {
> +		recalculate_quorum(0, 0);
> +	}
> +
> +	if (!nodeid) {
> +		free(node);
> +	}
> +
> +	if ((wait_for_all) &&
> +	    (!req_exec_quorum_nodeinfo->wait_for_all_status) &&
> +	    (req_exec_quorum_nodeinfo->quorate)) {
> +		wait_for_all_status = 0;
> +	}
> +
> +	LEAVE();
> +}
> +
> +static void message_handler_req_exec_votequorum_reconfigure (
> +	const void *message,
> +	unsigned int nodeid)
> +{
> +	const struct req_exec_quorum_reconfigure *req_exec_quorum_reconfigure = message;
> +	struct cluster_node *node;
> +	struct list_head *nodelist;
> +
> +	ENTER();
> +
> +	log_printf(LOGSYS_LEVEL_DEBUG, "got reconfigure message from cluster node %d\n", nodeid);
> +
> +	node = find_node_by_nodeid(req_exec_quorum_reconfigure->nodeid);
> +	if (!node) {
> +		LEAVE();
> +		return;
> +	}
> +
> +	switch(req_exec_quorum_reconfigure->param)
> +	{
> +	case RECONFIG_PARAM_EXPECTED_VOTES:
> +		list_iterate(nodelist, &cluster_members_list) {
> +			node = list_entry(nodelist, struct cluster_node, list);
> +			if (node->state == NODESTATE_MEMBER &&
> +			    node->expected_votes > req_exec_quorum_reconfigure->value) {
> +				node->expected_votes = req_exec_quorum_reconfigure->value;
> +			}
> +		}
> +		send_expectedvotes_notification();
> +		recalculate_quorum(1, 0);  /* Allow decrease */
> +		break;
> +
> +	case RECONFIG_PARAM_NODE_VOTES:
> +		node->votes = req_exec_quorum_reconfigure->value;
> +		recalculate_quorum(1, 0);  /* Allow decrease */
> +		break;
> +
> +	}
> +
> +	LEAVE();
> +}
> +
> +static int quorum_lib_init_fn (void *conn)
> +{
> +	struct quorum_pd *pd = (struct quorum_pd *)corosync_api->ipc_private_data_get (conn);
> +
> +	ENTER();
> +
> +	list_init (&pd->list);
> +	pd->conn = conn;
> +
> +	LEAVE();
> +	return (0);
> +}
> +
> +/*
> + * Message from the library
> + */
> +static void message_handler_req_lib_votequorum_getinfo (void *conn, const void *message)
> +{
> +	const struct req_lib_votequorum_getinfo *req_lib_votequorum_getinfo = message;
> +	struct res_lib_votequorum_getinfo res_lib_votequorum_getinfo;
> +	struct cluster_node *node;
> +	unsigned int highest_expected = 0;
> +	unsigned int total_votes = 0;
> +	cs_error_t error = CS_OK;
> +
> +	ENTER();
> +
> +	log_printf(LOGSYS_LEVEL_DEBUG, "got getinfo request on %p for node %d\n", conn, req_lib_votequorum_getinfo->nodeid);
> +
> +	node = find_node_by_nodeid(req_lib_votequorum_getinfo->nodeid);
> +	if (node) {
> +		struct cluster_node *iternode;
> +		struct list_head *nodelist;
> +
> +		list_iterate(nodelist, &cluster_members_list) {
> +			iternode = list_entry(nodelist, struct cluster_node, list);
> +
> +			if (iternode->state == NODESTATE_MEMBER) {
> +				highest_expected =
> +					max(highest_expected, iternode->expected_votes);
> +				total_votes += iternode->votes;
> +			}
> +		}
> +
> +		if (quorum_device && quorum_device->state == NODESTATE_MEMBER) {
> +			total_votes += quorum_device->votes;
> +		}
> +
> +		res_lib_votequorum_getinfo.votes = us->votes;
> +		res_lib_votequorum_getinfo.expected_votes = us->expected_votes;
> +		res_lib_votequorum_getinfo.highest_expected = highest_expected;
> +
> +		res_lib_votequorum_getinfo.quorum = quorum;
> +		res_lib_votequorum_getinfo.total_votes = total_votes;
> +		res_lib_votequorum_getinfo.flags = 0;
> +		res_lib_votequorum_getinfo.nodeid = node->node_id;
> +
> +		if (two_node) {
> +			res_lib_votequorum_getinfo.flags |= VOTEQUORUM_INFO_FLAG_TWONODE;
> +		}
> +		if (cluster_is_quorate) {
> +			res_lib_votequorum_getinfo.flags |= VOTEQUORUM_INFO_FLAG_QUORATE;
> +		}
> +		if (wait_for_all) {
> +			res_lib_votequorum_getinfo.flags |= VOTEQUORUM_INFO_WAIT_FOR_ALL;
> +		}
> +		if (last_man_standing) {
> +			res_lib_votequorum_getinfo.flags |= VOTEQUORUM_INFO_LAST_MAN_STANDING;
> +		}
> +		if (auto_tie_breaker) {
> +			res_lib_votequorum_getinfo.flags |= VOTEQUORUM_INFO_AUTO_TIE_BREAKER;
> +		}
> +	} else {
> +		error = CS_ERR_NOT_EXIST;
> +	}
> +
> +	res_lib_votequorum_getinfo.header.size = sizeof(res_lib_votequorum_getinfo);
> +	res_lib_votequorum_getinfo.header.id = MESSAGE_RES_VOTEQUORUM_GETINFO;
> +	res_lib_votequorum_getinfo.header.error = error;
> +	corosync_api->ipc_response_send(conn, &res_lib_votequorum_getinfo, sizeof(res_lib_votequorum_getinfo));
> +	log_printf(LOGSYS_LEVEL_DEBUG, "getinfo response error: %d\n", error);
> +
> +	LEAVE();
> +}
> +
> +/*
> + * Message from the library
> + */
> +static void message_handler_req_lib_votequorum_setexpected (void *conn, const void *message)
> +{
> +	const struct req_lib_votequorum_setexpected *req_lib_votequorum_setexpected = message;
> +	struct res_lib_votequorum_status res_lib_votequorum_status;
> +	cs_error_t error = CS_OK;
> +	unsigned int newquorum;
> +	unsigned int total_votes;
> +
> +	ENTER();
> +
> +	/*
> +	 * Validate new expected votes
> +	 */
> +	newquorum = calculate_quorum(1, req_lib_votequorum_setexpected->expected_votes, &total_votes);
> +	if (newquorum < total_votes / 2 ||
> +	    newquorum > total_votes) {
> +		error = CS_ERR_INVALID_PARAM;
> +		goto error_exit;
> +	}
> +
> +	quorum_exec_send_reconfigure(RECONFIG_PARAM_EXPECTED_VOTES, us->node_id,
> +				     req_lib_votequorum_setexpected->expected_votes);
> +
> +	/*
> +	 * send status
> +	 */
> +error_exit:
> +	res_lib_votequorum_status.header.size = sizeof(res_lib_votequorum_status);
> +	res_lib_votequorum_status.header.id = MESSAGE_RES_VOTEQUORUM_STATUS;
> +	res_lib_votequorum_status.header.error = error;
> +	corosync_api->ipc_response_send(conn, &res_lib_votequorum_status, sizeof(res_lib_votequorum_status));
> +
> +	LEAVE();
> +}
> +
> +/*
> + * Message from the library
> + */
> +static void message_handler_req_lib_votequorum_setvotes (void *conn, const void *message)
> +{
> +	const struct req_lib_votequorum_setvotes *req_lib_votequorum_setvotes = message;
> +	struct res_lib_votequorum_status res_lib_votequorum_status;
> +	struct cluster_node *node;
> +	unsigned int newquorum;
> +	unsigned int total_votes;
> +	unsigned int saved_votes;
> +	cs_error_t error = CS_OK;
> +	unsigned int nodeid;
> +
> +	ENTER();
> +
> +	nodeid = req_lib_votequorum_setvotes->nodeid;
> +	node = find_node_by_nodeid(nodeid);
> +	if (!node) {
> +		error = CS_ERR_NAME_NOT_FOUND;
> +		goto error_exit;
> +	}
> +
> +	/*
> +	 * Check votes is valid
> +	 */
> +	saved_votes = node->votes;
> +	node->votes = req_lib_votequorum_setvotes->votes;
> +
> +	newquorum = calculate_quorum(1, 0, &total_votes);
> +
> +	if (newquorum < total_votes / 2 ||
> +	    newquorum > total_votes) {
> +		node->votes = saved_votes;
> +		error = CS_ERR_INVALID_PARAM;
> +		goto error_exit;
> +	}
> +
> +	if (!nodeid) {
> +		nodeid = corosync_api->totem_nodeid_get();
> +	}
> +
> +	quorum_exec_send_reconfigure(RECONFIG_PARAM_NODE_VOTES, nodeid,
> +				     req_lib_votequorum_setvotes->votes);
> +
> +	/*
> +	 * send status
> +	 */
> +error_exit:
> +	res_lib_votequorum_status.header.size = sizeof(res_lib_votequorum_status);
> +	res_lib_votequorum_status.header.id = MESSAGE_RES_VOTEQUORUM_STATUS;
> +	res_lib_votequorum_status.header.error = error;
> +	corosync_api->ipc_response_send(conn, &res_lib_votequorum_status, sizeof(res_lib_votequorum_status));
> +
> +	LEAVE();
> +}
> +
> +static void quorum_device_timer_fn(void *arg)
> +{
> +	ENTER();
> +
> +	if (!quorum_device || quorum_device->state == NODESTATE_DEAD) {
> +		LEAVE();
> +		return;
> +	}
> +
> +	if ((quorum_device->last_hello / QB_TIME_NS_IN_SEC) + quorumdev_poll/1000 <
> +	    (qb_util_nano_current_get () / QB_TIME_NS_IN_SEC)) {
> +		quorum_device->state = NODESTATE_DEAD;
> +		log_printf(LOGSYS_LEVEL_INFO, "lost contact with quorum device\n");
> +		recalculate_quorum(0, 0);
> +	} else {
> +		corosync_api->timer_add_duration((unsigned long long)quorumdev_poll*1000000, quorum_device,
> +						 quorum_device_timer_fn, &quorum_device_timer);
> +	}
> +
> +	LEAVE();
> +}
> +
> +static void message_handler_req_lib_votequorum_qdisk_register (void *conn,
> +							       const void *message)
> +{
> +	const struct req_lib_votequorum_qdisk_register *req_lib_votequorum_qdisk_register = message;
> +	struct res_lib_votequorum_status res_lib_votequorum_status;
> +	cs_error_t error = CS_OK;
> +
> +	ENTER();
> +
> +	if (quorum_device) {
> +		error = CS_ERR_EXIST;
> +	} else {
> +		quorum_device = allocate_node(0);
> +		quorum_device->state = NODESTATE_DEAD;
> +		quorum_device->votes = req_lib_votequorum_qdisk_register->votes;
> +		strcpy(quorum_device_name, req_lib_votequorum_qdisk_register->name);
> +		list_add(&quorum_device->list, &cluster_members_list);
> +	}
> +
> +	/*
> +	 * send status
> +	 */
> +	res_lib_votequorum_status.header.size = sizeof(res_lib_votequorum_status);
> +	res_lib_votequorum_status.header.id = MESSAGE_RES_VOTEQUORUM_STATUS;
> +	res_lib_votequorum_status.header.error = error;
> +	corosync_api->ipc_response_send(conn, &res_lib_votequorum_status, sizeof(res_lib_votequorum_status));
> +
> +	LEAVE();
> +}
> +
> +static void message_handler_req_lib_votequorum_qdisk_unregister (void *conn,
> +								 const void *message)
> +{
> +	struct res_lib_votequorum_status res_lib_votequorum_status;
> +	cs_error_t error = CS_OK;
> +
> +	ENTER();
> +
> +	if (quorum_device) {
> +		struct cluster_node *node = quorum_device;
> +
> +		quorum_device = NULL;
> +		list_del(&node->list);
> +		free(node);
> +		recalculate_quorum(0, 0);
> +	} else {
> +		error = CS_ERR_NOT_EXIST;
> +	}
> +
> +	/*
> +	 * send status
> +	 */
> +	res_lib_votequorum_status.header.size = sizeof(res_lib_votequorum_status);
> +	res_lib_votequorum_status.header.id = MESSAGE_RES_VOTEQUORUM_STATUS;
> +	res_lib_votequorum_status.header.error = error;
> +	corosync_api->ipc_response_send(conn, &res_lib_votequorum_status, sizeof(res_lib_votequorum_status));
> +
> +	LEAVE();
> +}
> +
> +static void message_handler_req_lib_votequorum_qdisk_poll (void *conn,
> +							   const void *message)
> +{
> +	const struct req_lib_votequorum_qdisk_poll *req_lib_votequorum_qdisk_poll = message;
> +	struct res_lib_votequorum_status res_lib_votequorum_status;
> +	cs_error_t error = CS_OK;
> +
> +	ENTER();
> +
> +	if (quorum_device) {
> +		if (req_lib_votequorum_qdisk_poll->state) {
> +			quorum_device->last_hello = qb_util_nano_current_get ();
> +			if (quorum_device->state == NODESTATE_DEAD) {
> +				quorum_device->state = NODESTATE_MEMBER;
> +				recalculate_quorum(0, 0);
> +
> +				corosync_api->timer_add_duration((unsigned long long)quorumdev_poll*1000000, quorum_device,
> +								 quorum_device_timer_fn, &quorum_device_timer);
> +			}
> +		} else {
> +			if (quorum_device->state == NODESTATE_MEMBER) {
> +				quorum_device->state = NODESTATE_DEAD;
> +				recalculate_quorum(0, 0);
> +				corosync_api->timer_delete(quorum_device_timer);
> +			}
> +		}
> +	} else {
> +		error = CS_ERR_NOT_EXIST;
> +	}
> +
> +	/*
> +	 * send status
> +	 */
> +	res_lib_votequorum_status.header.size = sizeof(res_lib_votequorum_status);
> +	res_lib_votequorum_status.header.id = MESSAGE_RES_VOTEQUORUM_STATUS;
> +	res_lib_votequorum_status.header.error = error;
> +	corosync_api->ipc_response_send(conn, &res_lib_votequorum_status, sizeof(res_lib_votequorum_status));
> +
> +	LEAVE();
> +}
> +
> +static void message_handler_req_lib_votequorum_qdisk_getinfo (void *conn,
> +							      const void *message)
> +{
> +	struct res_lib_votequorum_qdisk_getinfo res_lib_votequorum_qdisk_getinfo;
> +	cs_error_t error = CS_OK;
> +
> +	ENTER();
> +
> +	if (quorum_device) {
> +		log_printf(LOGSYS_LEVEL_DEBUG, "got qdisk_getinfo state %d\n", quorum_device->state);
> +		res_lib_votequorum_qdisk_getinfo.votes = quorum_device->votes;
> +		if (quorum_device->state == NODESTATE_MEMBER) {
> +			res_lib_votequorum_qdisk_getinfo.state = 1;
> +		} else {
> +			res_lib_votequorum_qdisk_getinfo.state = 0;
> +		}
> +		strcpy(res_lib_votequorum_qdisk_getinfo.name, quorum_device_name);
> +	} else {
> +		error = CS_ERR_NOT_EXIST;
> +	}
> +
> +	/*
> +	 * send status
> +	 */
> +	res_lib_votequorum_qdisk_getinfo.header.size = sizeof(res_lib_votequorum_qdisk_getinfo);
> +	res_lib_votequorum_qdisk_getinfo.header.id = MESSAGE_RES_VOTEQUORUM_GETINFO;
> +	res_lib_votequorum_qdisk_getinfo.header.error = error;
> +	corosync_api->ipc_response_send(conn, &res_lib_votequorum_qdisk_getinfo, sizeof(res_lib_votequorum_qdisk_getinfo));
> +
> +	LEAVE();
> +}
> +
> +static void message_handler_req_lib_votequorum_trackstart (void *conn,
> +							   const void *message)
> +{
> +	const struct req_lib_votequorum_trackstart *req_lib_votequorum_trackstart = message;
> +	struct res_lib_votequorum_status res_lib_votequorum_status;
> +	struct quorum_pd *quorum_pd = (struct quorum_pd *)corosync_api->ipc_private_data_get (conn);
> +
> +	ENTER();
> +	/*
> +	 * If an immediate listing of the current cluster membership
> +	 * is requested, generate membership list
> +	 */
> +	if (req_lib_votequorum_trackstart->track_flags & CS_TRACK_CURRENT ||
> +	    req_lib_votequorum_trackstart->track_flags & CS_TRACK_CHANGES) {
> +		log_printf(LOGSYS_LEVEL_DEBUG, "sending initial status to %p\n", conn);
> +		send_quorum_notification(conn, req_lib_votequorum_trackstart->context);
> +	}
> +
> +	/*
> +	 * Record requests for tracking
> +	 */
> +	if (req_lib_votequorum_trackstart->track_flags & CS_TRACK_CHANGES ||
> +	    req_lib_votequorum_trackstart->track_flags & CS_TRACK_CHANGES_ONLY) {
> +
> +		quorum_pd->track_flags = req_lib_votequorum_trackstart->track_flags;
> +		quorum_pd->tracking_enabled = 1;
> +		quorum_pd->tracking_context = req_lib_votequorum_trackstart->context;
> +
> +		list_add (&quorum_pd->list, &trackers_list);
> +	}
> +
> +	/*
> +	 * Send status
> +	 */
> +	res_lib_votequorum_status.header.size = sizeof(res_lib_votequorum_status);
> +	res_lib_votequorum_status.header.id = MESSAGE_RES_VOTEQUORUM_STATUS;
> +	res_lib_votequorum_status.header.error = CS_OK;
> +	corosync_api->ipc_response_send(conn, &res_lib_votequorum_status, sizeof(res_lib_votequorum_status));
> +
> +	LEAVE();
> +}
> +
> +static void message_handler_req_lib_votequorum_trackstop (void *conn,
> +							  const void *message)
> +{
> +	struct res_lib_votequorum_status res_lib_votequorum_status;
> +	struct quorum_pd *quorum_pd = (struct quorum_pd *)corosync_api->ipc_private_data_get (conn);
> +	int error = CS_OK;
> +
> +	ENTER();
> +
> +	if (quorum_pd->tracking_enabled) {
> +		error = CS_OK;
> +		quorum_pd->tracking_enabled = 0;
> +		list_del (&quorum_pd->list);
> +		list_init (&quorum_pd->list);
> +	} else {
> +		error = CS_ERR_NOT_EXIST;
> +	}
> +
> +	/*
> +	 * send status
> +	 */
> +	res_lib_votequorum_status.header.size = sizeof(res_lib_votequorum_status);
> +	res_lib_votequorum_status.header.id = MESSAGE_RES_VOTEQUORUM_STATUS;
> +	res_lib_votequorum_status.header.error = error;
> +	corosync_api->ipc_response_send(conn, &res_lib_votequorum_status, sizeof(res_lib_votequorum_status));
> +
> +	LEAVE();
> +}
> +
> +static void reread_config(void)
> +{
> +	unsigned int old_votes;
> +	unsigned int old_expected;
> +
> +	ENTER();
> +
> +	old_votes = us->votes;
> +	old_expected = us->expected_votes;
> +
> +	/*
> +	 * Reload the configuration
> +	 */
> +	read_quorum_config();
> +
> +	/*
> +	 * Check for fundamental changes that we need to propogate
> +	 */
> +	if (old_votes != us->votes) {
> +		quorum_exec_send_reconfigure(RECONFIG_PARAM_NODE_VOTES, us->node_id, us->votes);
> +	}
> +	if (old_expected != us->expected_votes) {
> +		quorum_exec_send_reconfigure(RECONFIG_PARAM_EXPECTED_VOTES, us->node_id, us->expected_votes);
> +	}
> +
> +	LEAVE();
> +}
> +
> +static void key_change_quorum(
> +	int32_t event,
> +	const char *key_name,
> +	struct icmap_notify_value new_val,
> +	struct icmap_notify_value old_val,
> +	void *user_data)
> +{
> +	ENTER();
> +
> +	reread_config();
> +
> +	LEAVE();
> +}
> +
> +static void add_votequorum_config_notification(void)
> +{
> +	icmap_track_t icmap_track;
> +
> +	ENTER();
> +
> +	icmap_track_add("quorum.",
> +		ICMAP_TRACK_ADD | ICMAP_TRACK_DELETE | ICMAP_TRACK_MODIFY | ICMAP_TRACK_PREFIX,
> +		key_change_quorum,
> +		NULL,
> +		&icmap_track);
> +
> +	LEAVE();
> +}
> diff --git a/exec/vsf_quorum.c b/exec/vsf_quorum.c
> index 6c2fd42..c59afb9 100644
> --- a/exec/vsf_quorum.c
> +++ b/exec/vsf_quorum.c
> @@ -195,45 +195,11 @@ static struct corosync_service_engine quorum_service_handler = {
>  	.sync_mode				= CS_SYNC_V1
>  };
>  
> -static struct lcr_iface corosync_quorum_ver0[1] = {
> -	{
> -		.name			= "corosync_quorum",
> -		.version		= 0,
> -		.versions_replace	= 0,
> -		.versions_replace_count	= 0,
> -		.dependencies		= 0,
> -		.dependency_count	= 0,
> -		.constructor		= NULL,
> -		.destructor		= NULL,
> -		.interfaces		= NULL,
> -	},
> -};
> -
>  static struct corosync_service_engine *quorum_get_service_handler_ver0 (void)
>  {
>  	return (&quorum_service_handler);
>  }
>  
> -static struct lcr_comp quorum_comp_ver0 = {
> -	.iface_count			= 1,
> -	.ifaces				= corosync_quorum_ver0
> -};
> -
> -static struct corosync_service_engine_iface_ver0 quorum_service_handler_iface = {
> -	.corosync_get_service_engine_ver0 = quorum_get_service_handler_ver0
> -};
> -
> -#ifdef COROSYNC_SOLARIS
> -void corosync_lcr_component_register (void);
> -
> -void corosync_lcr_component_register (void) {
> -#else
> -__attribute__ ((constructor)) static void corosync_lcr_component_register (void) {
> -#endif
> -	lcr_component_register (&quorum_comp_ver0);
> -	lcr_interfaces_set (&corosync_quorum_ver0[0], &quorum_service_handler_iface);
> -}
> -
>  /* -------------------------------------------------- */
>  
>  
> @@ -296,6 +262,7 @@ static int quorum_exec_init_fn (struct corosync_api_v1 *api)
>  #ifdef COROSYNC_SOLARIS
>  	logsys_subsys_init();
>  #endif
> +#ifdef SDAKE
>  	corosync_api = api;
>  	list_init (&lib_trackers_list);
>  	list_init (&internal_trackers_list);
> @@ -347,6 +314,7 @@ static int quorum_exec_init_fn (struct corosync_api_v1 *api)
>  		quorum_type = 0;
>  	}
>  
> +#endif
>  	return (0);
>  }
>  
> diff --git a/exec/vsf_ykd.c b/exec/vsf_ykd.c
> index f9012be..01eab84 100644
> --- a/exec/vsf_ykd.c
> +++ b/exec/vsf_ykd.c
> @@ -63,7 +63,6 @@
>  #include <corosync/coroapi.h>
>  #include <corosync/engine/quorum.h>
>  #include <corosync/swab.h>
> -#include <corosync/lcr/lcr_comp.h>
>  
>  LOGSYS_DECLARE_SUBSYS ("YKD");
>  
> @@ -526,39 +525,3 @@ static void ykd_init (
>  
>  	ykd_state_init ();
>  }
> -
> -/*
> - * lcrso object definition
> - */
> -static struct quorum_services_api_ver1 vsf_ykd_iface_ver0 = {
> -	.init				= ykd_init,
> -};
> -
> -static struct lcr_iface corosync_vsf_ykd_ver0[1] = {
> -	{
> -		.name			= "corosync_quorum_ykd",
> -		.version		= 0,
> -		.versions_replace	= 0,
> -		.versions_replace_count	= 0,
> -		.dependencies		= 0,
> -		.dependency_count	= 0,
> -		.constructor		= NULL,
> -		.destructor		= NULL,
> -		.interfaces		= (void **)(void *)&vsf_ykd_iface_ver0,
> -	}
> -};
> -
> -static struct lcr_comp vsf_ykd_comp_ver0 = {
> -	.iface_count			= 1,
> -	.ifaces				= corosync_vsf_ykd_ver0
> -};
> -
> -#ifdef COROSYNC_SOLARIS
> -void corosync_lcr_component_register (void);
> -
> -void corosync_lcr_component_register (void) {
> -#else
> -__attribute__ ((constructor)) static void corosync_lcr_component_register (void) {
> -#endif
> -	lcr_component_register (&vsf_ykd_comp_ver0);
> -}
> diff --git a/exec/wd.c b/exec/wd.c
> new file mode 100644
> index 0000000..1297ecd
> --- /dev/null
> +++ b/exec/wd.c
> @@ -0,0 +1,707 @@
> +/*
> + * Copyright (c) 2010 Red Hat, Inc.
> + *
> + * All rights reserved.
> + *
> + * Author: Angus Salkeld <asalkeld@xxxxxxxxxx>
> + *
> + * This software licensed under BSD license, the text of which follows:
> + *
> + * Redistribution and use in source and binary forms, with or without
> + * modification, are permitted provided that the following conditions are met:
> + *
> + * - Redistributions of source code must retain the above copyright notice,
> + *   this list of conditions and the following disclaimer.
> + * - Redistributions in binary form must reproduce the above copyright notice,
> + *   this list of conditions and the following disclaimer in the documentation
> + *   and/or other materials provided with the distribution.
> + * - Neither the name of the MontaVista Software, Inc. nor the names of its
> + *   contributors may be used to endorse or promote products derived from this
> + *   software without specific prior written permission.
> + *
> + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
> + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
> + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
> + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
> + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
> + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
> + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
> + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
> + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
> + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
> + * THE POSSIBILITY OF SUCH DAMAGE.
> + */
> +
> +#include <config.h>
> +
> +#include <unistd.h>
> +#include <fcntl.h>
> +#include <sys/ioctl.h>
> +#include <linux/types.h>
> +#include <linux/watchdog.h>
> +#include <sys/reboot.h>
> +
> +#include <corosync/corotypes.h>
> +#include <corosync/corodefs.h>
> +#include <corosync/coroapi.h>
> +#include <corosync/list.h>
> +#include <corosync/logsys.h>
> +#include <corosync/icmap.h>
> +#include "../exec/fsm.h"
> +
> +
> +typedef enum {
> +	WD_RESOURCE_GOOD,
> +	WD_RESOURCE_FAILED,
> +	WD_RESOURCE_STATE_UNKNOWN,
> +	WD_RESOURCE_NOT_MONITORED
> +} wd_resource_state_t;
> +
> +struct resource {
> +	char res_path[ICMAP_KEYNAME_MAXLEN];
> +	char *recovery;
> +	char name[CS_MAX_NAME_LENGTH];
> +	time_t last_updated;
> +	struct cs_fsm fsm;
> +
> +	corosync_timer_handle_t check_timer;
> +	uint64_t check_timeout;
> +	icmap_track_t icmap_track;
> +};
> +
> +LOGSYS_DECLARE_SUBSYS("WD");
> +
> +/*
> + * Service Interfaces required by service_message_handler struct
> + */
> +static int wd_exec_init_fn (
> +	struct corosync_api_v1 *corosync_api);
> +static int wd_exec_exit_fn (void);
> +static void wd_resource_check_fn (void* resource_ref);
> +
> +static struct corosync_api_v1 *api;
> +#define WD_DEFAULT_TIMEOUT_SEC 6
> +#define WD_DEFAULT_TIMEOUT_MS (WD_DEFAULT_TIMEOUT_SEC * CS_TIME_MS_IN_SEC)
> +#define WD_MIN_TIMEOUT_MS 500
> +#define WD_MAX_TIMEOUT_MS (120 * CS_TIME_MS_IN_SEC)
> +static uint32_t watchdog_timeout = WD_DEFAULT_TIMEOUT_SEC;
> +static uint64_t tickle_timeout = (WD_DEFAULT_TIMEOUT_MS / 2);
> +static int dog = -1;
> +static corosync_timer_handle_t wd_timer;
> +static int watchdog_ok = 1;
> +
> +struct corosync_service_engine wd_service_engine = {
> +	.name			= "corosync watchdog service",
> +	.id			= WD_SERVICE,
> +	.priority		= 1,
> +	.private_data_size	= 0,
> +	.flow_control		= CS_LIB_FLOW_CONTROL_NOT_REQUIRED,
> +	.lib_init_fn		= NULL,
> +	.lib_exit_fn		= NULL,
> +	.lib_engine		= NULL,
> +	.lib_engine_count	= 0,
> +	.exec_engine		= NULL,
> +	.exec_engine_count	= 0,
> +	.confchg_fn		= NULL,
> +	.exec_init_fn		= wd_exec_init_fn,
> +	.exec_exit_fn		= wd_exec_exit_fn,
> +	.exec_dump_fn		= NULL,
> +	.sync_mode		= CS_SYNC_V2
> +};
> +
> +static DECLARE_LIST_INIT (confchg_notify);
> +
> +/*
> + * F S M
> + */
> +static void wd_config_changed (struct cs_fsm* fsm, int32_t event, void * data);
> +static void wd_resource_failed (struct cs_fsm* fsm, int32_t event, void * data);
> +
> +enum wd_resource_state {
> +	WD_S_RUNNING,
> +	WD_S_FAILED,
> +	WD_S_STOPPED
> +};
> +
> +enum wd_resource_event {
> +	WD_E_FAILURE,
> +	WD_E_CONFIG_CHANGED
> +};
> +
> +const char * wd_running_str		= "running";
> +const char * wd_failed_str		= "failed";
> +const char * wd_failure_str		= "failure";
> +const char * wd_stopped_str		= "stopped";
> +const char * wd_config_changed_str	= "config_changed";
> +
> +struct cs_fsm_entry wd_fsm_table[] = {
> +	{ WD_S_STOPPED,	WD_E_CONFIG_CHANGED,	wd_config_changed,	{WD_S_STOPPED, WD_S_RUNNING, -1} },
> +	{ WD_S_STOPPED,	WD_E_FAILURE,		NULL,			{-1} },
> +	{ WD_S_RUNNING,	WD_E_CONFIG_CHANGED,	wd_config_changed,	{WD_S_RUNNING, WD_S_STOPPED, -1} },
> +	{ WD_S_RUNNING,	WD_E_FAILURE,		wd_resource_failed,	{WD_S_FAILED, -1} },
> +	{ WD_S_FAILED,	WD_E_CONFIG_CHANGED,	wd_config_changed,	{WD_S_RUNNING, WD_S_STOPPED, -1} },
> +	{ WD_S_FAILED,	WD_E_FAILURE,		NULL,			{-1} },
> +};
> +
> +static struct corosync_service_engine *wd_get_service_engine_ver0 (void)
> +{
> +	return (&wd_service_engine);
> +}
> +
> +static const char * wd_res_state_to_str(struct cs_fsm* fsm,
> +	int32_t state)
> +{
> +	switch (state) {
> +	case WD_S_STOPPED:
> +		return wd_stopped_str;
> +		break;
> +	case WD_S_RUNNING:
> +		return wd_running_str;
> +		break;
> +	case WD_S_FAILED:
> +		return wd_failed_str;
> +		break;
> +	}
> +	return NULL;
> +}
> +
> +static const char * wd_res_event_to_str(struct cs_fsm* fsm,
> +	int32_t event)
> +{
> +	switch (event) {
> +	case WD_E_CONFIG_CHANGED:
> +		return wd_config_changed_str;
> +		break;
> +	case WD_E_FAILURE:
> +		return wd_failure_str;
> +		break;
> +	}
> +	return NULL;
> +}
> +
> +/*
> + * returns (CS_TRUE == OK, CS_FALSE == failed)
> + */
> +static int32_t wd_resource_state_is_ok (struct resource *ref)
> +{
> +	char* state;
> +	uint64_t last_updated;
> +	uint64_t my_time;
> +	uint64_t allowed_period;
> +	char key_name[ICMAP_KEYNAME_MAXLEN];
> +
> +	snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "%s%s", ref->res_path, "last_updated");
> +	if (icmap_get_uint64(key_name, &last_updated) != CS_OK) {
> +		/* key does not exist.
> +		*/
> +		return CS_FALSE;
> +	}
> +
> +	snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "%s%s", ref->res_path, "state");
> +	if (icmap_get_string(key_name, &state) != CS_OK || strcmp(state, "disabled") == 0) {
> +		/* key does not exist.
> +		*/
> +		return CS_FALSE;
> +	}
> +
> +	if (last_updated == 0) {
> +		/* initial value */
> +		free(state);
> +		return CS_TRUE;
> +	}
> +
> +	my_time = cs_timestamp_get();
> +
> +	/*
> +	 * Here we check that the monitor has written a timestamp within the poll_period
> +	 * plus a grace factor of (0.5 * poll_period).
> +	 */
> +	allowed_period = (ref->check_timeout * MILLI_2_NANO_SECONDS * 3) / 2;
> +	if ((last_updated + allowed_period) < my_time) {
> +		log_printf (LOGSYS_LEVEL_ERROR,
> +			"last_updated %"PRIu64" ms too late, period:%"PRIu64".",
> +			(uint64_t)(my_time/MILLI_2_NANO_SECONDS - ((last_updated + allowed_period) / MILLI_2_NANO_SECONDS)),
> +			ref->check_timeout);
> +		return CS_FALSE;
> +	}
> +
> +	if (strcmp (state, wd_failed_str) == 0) {
> +		free(state);
> +		return CS_FALSE;
> +	}
> +
> +	free(state);
> +	return CS_TRUE;
> +}
> +
> +static void wd_config_changed (struct cs_fsm* fsm, int32_t event, void * data)
> +{
> +	char *state;
> +	uint64_t tmp_value;
> +	uint64_t next_timeout;
> +	struct resource *ref = (struct resource*)data;
> +	char key_name[ICMAP_KEYNAME_MAXLEN];
> +
> +	next_timeout = ref->check_timeout;
> +
> +	snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "%s%s", ref->res_path, "poll_period");
> +	if (icmap_get_uint64(ref->res_path, &tmp_value) == CS_OK) {
> +		if (tmp_value >= WD_MIN_TIMEOUT_MS && tmp_value <= WD_MAX_TIMEOUT_MS) {
> +			log_printf (LOGSYS_LEVEL_DEBUG,
> +				"poll_period changing from:%"PRIu64" to %"PRIu64".",
> +				ref->check_timeout, tmp_value);
> +			/*
> +			 * To easy in the transition between poll_period's we are going
> +			 * to make the first timeout the bigger of the new and old value.
> +			 * This is to give the monitoring system time to adjust.
> +			 */
> +			next_timeout = CS_MAX(tmp_value, ref->check_timeout);
> +			ref->check_timeout = tmp_value;
> +		} else {
> +			log_printf (LOGSYS_LEVEL_WARNING,
> +				"Could NOT use poll_period:%"PRIu64" ms for resource %s",
> +				tmp_value, ref->name);
> +		}
> +	}
> +
> +	snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "%s%s", ref->res_path, "recovery");
> +	if (icmap_get_string(key_name, &ref->recovery) != CS_OK) {
> +		/* key does not exist.
> +		 */
> +		log_printf (LOGSYS_LEVEL_WARNING,
> +			"resource %s missing a recovery key.", ref->name);
> +		cs_fsm_state_set(&ref->fsm, WD_S_STOPPED, ref);
> +		return;
> +	}
> +	snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "%s%s", ref->res_path, "state");
> +	if (icmap_get_string(key_name, &state) != CS_OK) {
> +		/* key does not exist.
> +		*/
> +		log_printf (LOGSYS_LEVEL_WARNING,
> +			"resource %s missing a state key.", ref->name);
> +		cs_fsm_state_set(&ref->fsm, WD_S_STOPPED, ref);
> +		return;
> +	}
> +	if (ref->check_timer) {
> +		api->timer_delete(ref->check_timer);
> +		ref->check_timer = 0;
> +	}
> +
> +	if (strcmp(wd_stopped_str, state) == 0) {
> +		cs_fsm_state_set(&ref->fsm, WD_S_STOPPED, ref);
> +	} else {
> +		api->timer_add_duration(next_timeout * MILLI_2_NANO_SECONDS,
> +			ref, wd_resource_check_fn, &ref->check_timer);
> +		cs_fsm_state_set(&ref->fsm, WD_S_RUNNING, ref);
> +	}
> +	free(state);
> +}
> +
> +static void wd_resource_failed (struct cs_fsm* fsm, int32_t event, void * data)
> +{
> +	struct resource* ref = (struct resource*)data;
> +
> +	if (ref->check_timer) {
> +		api->timer_delete(ref->check_timer);
> +		ref->check_timer = 0;
> +	}
> +
> +	log_printf (LOGSYS_LEVEL_CRIT, "%s resource \"%s\" failed!",
> +		ref->recovery, (char*)ref->name);
> +	if (strcmp (ref->recovery, "watchdog") == 0 ||
> +	    strcmp (ref->recovery, "quit") == 0) {
> +		watchdog_ok = 0;
> +	}
> +	else if (strcmp (ref->recovery, "reboot") == 0) {
> +		reboot(RB_AUTOBOOT);
> +	}
> +	else if (strcmp (ref->recovery, "shutdown") == 0) {
> +		reboot(RB_POWER_OFF);
> +	}
> +	cs_fsm_state_set(fsm, WD_S_FAILED, data);
> +}
> +
> +static void wd_key_changed(
> +	int32_t event,
> +	const char *key_name,
> +	struct icmap_notify_value new_val,
> +	struct icmap_notify_value old_val,
> +	void *user_data)
> +{
> +	struct resource* ref = (struct resource*)user_data;
> +	char *last_key_part;
> +
> +	if (ref == NULL) {
> +		return ;
> +	}
> +
> +	last_key_part = strrchr(key_name, '.');
> +	if (last_key_part == NULL) {
> +		return ;
> +	}
> +	last_key_part++;
> +
> +	if (event == ICMAP_TRACK_ADD || event == ICMAP_TRACK_MODIFY) {
> +		if (strcmp(last_key_part, "last_updated") == 0 ||
> +			strcmp(last_key_part, "current") == 0) {
> +			return;
> +		}
> +
> +		cs_fsm_process(&ref->fsm, WD_E_CONFIG_CHANGED, ref);
> +	}
> +
> +	if (event == ICMAP_TRACK_DELETE && ref != NULL) {
> +		if (strcmp(last_key_part, "state") != 0) {
> +			return ;
> +		}
> +
> +		log_printf (LOGSYS_LEVEL_WARNING,
> +			"resource \"%s\" deleted from cmap!",
> +			ref->name);
> +
> +		api->timer_delete(ref->check_timer);
> +		ref->check_timer = 0;
> +		icmap_track_delete(ref->icmap_track);
> +
> +		free(ref);
> +	}
> +}
> +
> +static void wd_resource_check_fn (void* resource_ref)
> +{
> +	struct resource* ref = (struct resource*)resource_ref;
> +
> +	if (wd_resource_state_is_ok (ref) == CS_FALSE) {
> +		cs_fsm_process(&ref->fsm, WD_E_FAILURE, ref);
> +		return;
> +	}
> +	api->timer_add_duration(ref->check_timeout*MILLI_2_NANO_SECONDS,
> +		ref, wd_resource_check_fn, &ref->check_timer);
> +}
> +
> +/*
> + * return 0   - fully configured
> + * return -1  - partially configured
> + */
> +static int32_t wd_resource_create (char *res_path, char *res_name)
> +{
> +	char *state;
> +	uint64_t tmp_value;
> +	struct resource *ref = malloc (sizeof (struct resource));
> +	char key_name[ICMAP_KEYNAME_MAXLEN];
> +
> +	strcpy(ref->res_path, res_path);
> +	ref->check_timeout = WD_DEFAULT_TIMEOUT_MS;
> +	ref->check_timer = 0;
> +
> +	strcpy(ref->name, res_name);
> +	ref->fsm.name = ref->name;
> +	ref->fsm.table = wd_fsm_table;
> +	ref->fsm.entries = sizeof(wd_fsm_table) / sizeof(struct cs_fsm_entry);
> +	ref->fsm.curr_entry = 0;
> +	ref->fsm.curr_state = WD_S_STOPPED;
> +	ref->fsm.state_to_str = wd_res_state_to_str;
> +	ref->fsm.event_to_str = wd_res_event_to_str;
> +
> +	snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "%s%s", res_path, "poll_period");
> +	if (icmap_get_uint64(key_name, &tmp_value) != CS_OK) {
> +		icmap_set_uint64(key_name, ref->check_timeout);
> +	} else {
> +		if (tmp_value >= WD_MIN_TIMEOUT_MS && tmp_value <= WD_MAX_TIMEOUT_MS) {
> +			ref->check_timeout = tmp_value;
> +		} else {
> +			log_printf (LOGSYS_LEVEL_WARNING,
> +				"Could NOT use poll_period:%"PRIu64" ms for resource %s",
> +				tmp_value, ref->name);
> +		}
> +	}
> +
> +	icmap_track_add(res_path,
> +			ICMAP_TRACK_ADD | ICMAP_TRACK_MODIFY | ICMAP_TRACK_DELETE | ICMAP_TRACK_PREFIX,
> +			wd_key_changed,
> +			ref, &ref->icmap_track);
> +
> +	snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "%s%s", res_path, "recovery");
> +	if (icmap_get_string(key_name, &ref->recovery) != CS_OK) {
> +		/* key does not exist.
> +		 */
> +		log_printf (LOGSYS_LEVEL_WARNING,
> +			"resource %s missing a recovery key.", ref->name);
> +		return -1;
> +	}
> +	snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "%s%s", res_path, "state");
> +	if (icmap_get_string(key_name, &state) != CS_OK) {
> +		/* key does not exist.
> +		*/
> +		log_printf (LOGSYS_LEVEL_WARNING,
> +			"resource %s missing a state key.", ref->name);
> +		return -1;
> +	}
> +
> +	snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "%s%s", res_path, "last_updated");
> +	if (icmap_get_uint64(key_name, &tmp_value) != CS_OK) {
> +		/* key does not exist.
> +		 */
> +		ref->last_updated = 0;
> +	} else {
> +		ref->last_updated = tmp_value;
> +	}
> +
> +	/*
> +	 * delay the first check to give the monitor time to start working.
> +	 */
> +	tmp_value = CS_MAX(ref->check_timeout * 2, WD_DEFAULT_TIMEOUT_MS);
> +	api->timer_add_duration(tmp_value * MILLI_2_NANO_SECONDS,
> +		ref,
> +		wd_resource_check_fn, &ref->check_timer);
> +
> +	cs_fsm_state_set(&ref->fsm, WD_S_RUNNING, ref);
> +	return 0;
> +}
> +
> +
> +static void wd_tickle_fn (void* arg)
> +{
> +	ENTER();
> +
> +	if (watchdog_ok) {
> +		if (dog > 0) {
> +			ioctl(dog, WDIOC_KEEPALIVE, &watchdog_ok);
> +		}
> +		api->timer_add_duration(tickle_timeout*MILLI_2_NANO_SECONDS, NULL,
> +			wd_tickle_fn, &wd_timer);
> +	}
> +	else {
> +		log_printf (LOGSYS_LEVEL_ALERT, "NOT tickling the watchdog!");
> +	}
> +
> +}
> +
> +static void wd_resource_created_cb(
> +	int32_t event,
> +	const char *key_name,
> +	struct icmap_notify_value new_val,
> +	struct icmap_notify_value old_val,
> +	void *user_data)
> +{
> +	char res_name[ICMAP_KEYNAME_MAXLEN];
> +	char res_type[ICMAP_KEYNAME_MAXLEN];
> +	char tmp_key[ICMAP_KEYNAME_MAXLEN];
> +	int res;
> +
> +	if (event != ICMAP_TRACK_ADD) {
> +		return ;
> +	}
> +
> +	res = sscanf(key_name, "resources.%[^.].%[^.].%[^.]", res_type, res_name, tmp_key);
> +	if (res != 3) {
> +		return ;
> +	}
> +
> +	if (strcmp(tmp_key, "state") != 0) {
> +		return ;
> +	}
> +
> +	snprintf(tmp_key, ICMAP_KEYNAME_MAXLEN, "resources.%s.%s.", res_type, res_name);
> +	wd_resource_create (tmp_key, res_name);
> +}
> +
> +static void wd_scan_resources (void)
> +{
> +	int res_count = 0;
> +	icmap_track_t icmap_track;
> +	icmap_iter_t iter;
> +	const char *key_name;
> +	int res;
> +	char res_name[ICMAP_KEYNAME_MAXLEN];
> +	char res_type[ICMAP_KEYNAME_MAXLEN];
> +	char tmp_key[ICMAP_KEYNAME_MAXLEN];
> +
> +	ENTER();
> +
> +	iter = icmap_iter_init("resources.");
> +	while ((key_name = icmap_iter_next(iter, NULL, NULL)) != NULL) {
> +		res = sscanf(key_name, "resources.%[^.].%[^.].%[^.]", res_type, res_name, tmp_key);
> +		if (res != 3) {
> +			continue ;
> +		}
> +
> +		if (strcmp(tmp_key, "state") != 0) {
> +			continue ;
> +		}
> +
> +		snprintf(tmp_key, ICMAP_KEYNAME_MAXLEN, "resources.%s.%s.", res_type, res_name);
> +		if (wd_resource_create (tmp_key, res_name) == 0) {
> +			res_count++;
> +		}
> +	}
> +	icmap_iter_finalize(iter);
> +
> +	icmap_track_add("resources.process.", ICMAP_TRACK_ADD | ICMAP_TRACK_PREFIX,
> +			wd_resource_created_cb, NULL, &icmap_track);
> +	icmap_track_add("resources.system.", ICMAP_TRACK_ADD | ICMAP_TRACK_PREFIX,
> +			wd_resource_created_cb, NULL, &icmap_track);
> +
> +	if (res_count == 0) {
> +		log_printf (LOGSYS_LEVEL_INFO, "no resources configured.");
> +	}
> +}
> +
> +
> +static void watchdog_timeout_apply (uint32_t new)
> +{
> +	struct watchdog_info ident;
> +	uint32_t original_timeout = watchdog_timeout;
> +
> +	if (new == original_timeout) {
> +		return;
> +	}
> +
> +	watchdog_timeout = new;
> +
> +	if (dog > 0) {
> +		ioctl(dog, WDIOC_GETSUPPORT, &ident);
> +		if (ident.options & WDIOF_SETTIMEOUT) {
> +			/* yay! the dog is trained.
> +			 */
> +			ioctl(dog, WDIOC_SETTIMEOUT, &watchdog_timeout);
> +		}
> +		ioctl(dog, WDIOC_GETTIMEOUT, &watchdog_timeout);
> +	}
> +
> +	if (watchdog_timeout == new) {
> +		tickle_timeout = (watchdog_timeout * CS_TIME_MS_IN_SEC)/ 2;
> +
> +		/* reset the tickle timer in case it was reduced.
> +		 */
> +		api->timer_delete (wd_timer);
> +		api->timer_add_duration(tickle_timeout*MILLI_2_NANO_SECONDS, NULL,
> +			wd_tickle_fn, &wd_timer);
> +
> +		log_printf (LOGSYS_LEVEL_DEBUG, "The Watchdog timeout is %d seconds\n", watchdog_timeout);
> +		log_printf (LOGSYS_LEVEL_DEBUG, "The tickle timeout is %"PRIu64" ms\n", tickle_timeout);
> +	} else {
> +		log_printf (LOGSYS_LEVEL_WARNING,
> +			"Could not change the Watchdog timeout from %d to %d seconds\n",
> +			original_timeout, new);
> +	}
> +
> +}
> +
> +static int setup_watchdog(void)
> +{
> +	struct watchdog_info ident;
> +
> +	ENTER();
> +	if (access ("/dev/watchdog", W_OK) != 0) {
> +		log_printf (LOGSYS_LEVEL_WARNING, "No Watchdog, try modprobe <a watchdog>");
> +		dog = -1;
> +		return -1;
> +	}
> +
> +	/* here goes, lets hope they have "Magic Close"
> +	 */
> +	dog = open("/dev/watchdog", O_WRONLY);
> +
> +	if (dog == -1) {
> +		log_printf (LOGSYS_LEVEL_WARNING, "Watchdog exists but couldn't be opened.");
> +		dog = -1;
> +		return -1;
> +	}
> +
> +	/* Right we have the dog.
> +	 * Lets see what breed it is.
> +	 */
> +
> +	ioctl(dog, WDIOC_GETSUPPORT, &ident);
> +	log_printf (LOGSYS_LEVEL_INFO, "Watchdog is now been tickled by corosync.");
> +	log_printf (LOGSYS_LEVEL_DEBUG, "%s", ident.identity);
> +
> +	watchdog_timeout_apply (watchdog_timeout);
> +
> +	ioctl(dog, WDIOC_SETOPTIONS, WDIOS_ENABLECARD);
> +
> +	return 0;
> +}
> +
> +static void wd_top_level_key_changed(
> +	int32_t event,
> +	const char *key_name,
> +	struct icmap_notify_value new_val,
> +	struct icmap_notify_value old_val,
> +	void *user_data)
> +{
> +	uint32_t tmp_value_32;
> +
> +	ENTER();
> +
> +	if (icmap_get_uint32("resources.watchdog_timeout", &tmp_value_32) != CS_OK) {
> +		if (tmp_value_32 >= 2 && tmp_value_32 <= 120) {
> +			watchdog_timeout_apply (tmp_value_32);
> +		}
> +	}
> +	else {
> +		watchdog_timeout_apply (WD_DEFAULT_TIMEOUT_SEC);
> +	}
> +}
> +
> +static void watchdog_timeout_get_initial (void)
> +{
> +	uint32_t tmp_value_32;
> +	icmap_track_t icmap_track;
> +
> +	ENTER();
> +
> +	if (icmap_get_uint32("resources.watchdog_timeout", &tmp_value_32) != CS_OK) {
> +		watchdog_timeout_apply (WD_DEFAULT_TIMEOUT_SEC);
> +
> +		icmap_set_uint32("resources.watchdog_timeout", watchdog_timeout);
> +	}
> +	else {
> +		if (tmp_value_32 >= 2 && tmp_value_32 <= 120) {
> +			watchdog_timeout_apply (tmp_value_32);
> +		} else {
> +			watchdog_timeout_apply (WD_DEFAULT_TIMEOUT_SEC);
> +		}
> +	}
> +
> +	icmap_track_add("resources.watchdog_timeout", ICMAP_TRACK_MODIFY,
> +			wd_top_level_key_changed, NULL, &icmap_track);
> +
> +}
> +
> +static int wd_exec_init_fn (
> +	struct corosync_api_v1 *corosync_api)
> +{
> +
> +	ENTER();
> +#ifdef COROSYNC_SOLARIS
> +	logsys_subsys_init();
> +#endif
> +	api = corosync_api;
> +
> +	watchdog_timeout_get_initial();
> +
> +	setup_watchdog();
> +
> +	wd_scan_resources();
> +
> +	api->timer_add_duration(tickle_timeout*MILLI_2_NANO_SECONDS, NULL,
> +				wd_tickle_fn, &wd_timer);
> +
> +	return 0;
> +}
> +
> +static int wd_exec_exit_fn (void)
> +{
> +	char magic = 'V';
> +	ENTER();
> +
> +	if (dog > 0) {
> +		log_printf (LOGSYS_LEVEL_INFO, "magically closing the watchdog.");
> +		write (dog, &magic, 1);
> +	}
> +	return 0;
> +}
> +
> +
> diff --git a/services/Makefile.am b/services/Makefile.am
> deleted file mode 100644
> index 2ac4a03..0000000
> --- a/services/Makefile.am
> +++ /dev/null
> @@ -1,103 +0,0 @@
> -# Copyright (c) 2009 Red Hat, Inc.
> -#
> -# Authors: Andrew Beekhof
> -#	   Steven Dake (sdake@xxxxxxxxxx)
> -#
> -# This software licensed under BSD license, the text of which follows:
> -#
> -# Redistribution and use in source and binary forms, with or without
> -# modification, are permitted provided that the following conditions are met:
> -#
> -# - Redistributions of source code must retain the above copyright notice,
> -#   this list of conditions and the following disclaimer.
> -# - Redistributions in binary form must reproduce the above copyright notice,
> -#   this list of conditions and the following disclaimer in the documentation
> -#   and/or other materials provided with the distribution.
> -# - Neither the name of the MontaVista Software, Inc. nor the names of its
> -#   contributors may be used to endorse or promote products derived from this
> -#   software without specific prior written permission.
> -#
> -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
> -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
> -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
> -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
> -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
> -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
> -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
> -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
> -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
> -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
> -# THE POSSIBILITY OF SUCH DAMAGE.
> -
> -MAINTAINERCLEANFILES    = Makefile.in
> -
> -AM_CFLAGS		= -fPIC
> -
> -INCLUDES		= -I$(top_builddir)/include -I$(top_srcdir)/include \
> -			  -I$(top_builddir)/include/corosync \
> -			  -I$(top_srcdir)/include/corosync
> -
> -SERVICE_LCRSO		= evs cfg cpg pload cmap
> -if BUILD_WATCHDOG
> -SERVICE_LCRSO		+= wd
> -endif
> -if BUILD_MONITORING
> -SERVICE_LCRSO		+= mon
> -endif
> -
> -QUORUM_LCRSO		= votequorum testquorum
> -
> -SOURCES			= $(SERVICE_LCRSO:%=%.c) $(QUORUM_LCRSO:%=%.c)
> -
> -EXTRA_DIST		= $(SOURCES)
> -
> -LCRSO			= $(SERVICE_LCRSO:%=service_%.lcrso) $(QUORUM_LCRSO:%=quorum_%.lcrso)
> -
> -LCRSO_OBJS		= $(SOURCES:%.c=%.o)
> -
> -if BUILD_DARWIN
> -quorum_%.lcrso: %.o
> -	$(CC) $(LDFLAGS) $(CFLAGS) -L$(top_builddir)/exec -llogsys -bundle -bundle_loader $(top_builddir)/exec/corosync $^ -o $@
> -
> -service_%.lcrso: %.o
> -	$(CC) $(LDFLAGS) $(CFLAGS) -L$(top_builddir)/exec -llogsys -bundle -bundle_loader $(top_builddir)/exec/corosync $^ -o $@
> -
> -else
> -
> -if BUILD_SOLARIS
> -
> -quorum_%.lcrso: %.o
> -	$(LD) $(LDFLAGS) -G $^ -o $@
> -
> -service_%.lcrso: %.o
> -	$(LD) $(LDFLAGS) -G $^ -o $@
> -
> -else
> -quorum_%.lcrso: %.o
> -	$(CC) $(LDFLAGS) $(CFLAGS) $(COVERAGE_LCRSO_EXTRA_LDFLAGS) -shared -Wl,-soname=$@ $^ -o $@
> -
> -service_%.lcrso: %.o
> -	$(CC) $(LDFLAGS) $(CFLAGS) $(COVERAGE_LCRSO_EXTRA_LDFLAGS) -shared -Wl,-soname=$@ $^ -o $@
> -endif
> -
> -endif
> -
> -%.o: %.c
> -	$(CC) $(AM_CFLAGS) $(CFLAGS) $(CPPFLAGS) $(INCLUDES) -c -o $@ $<
> -
> -lint:
> -	-splint $(INCLUDES) $(LINT_FLAGS) $(CFLAGS) *.c
> -
> -all-local: $(LCRSO_OBJS) $(LCRSO)
> -	@echo Built Service Engines
> -
> -install-exec-local:
> -	$(INSTALL) -d $(DESTDIR)/$(LCRSODIR)
> -	$(INSTALL) -m 755 $(LCRSO) $(DESTDIR)/$(LCRSODIR)
> -
> -uninstall-local:
> -	cd $(DESTDIR)/$(LCRSODIR) && \
> -		rm -f $(LCRSO)
> -
> -clean-local:
> -	rm -f *.o *.a *.so* *.da *.bb *.bbg *.lcrso
> diff --git a/services/cfg.c b/services/cfg.c
> deleted file mode 100644
> index 6703885..0000000
> --- a/services/cfg.c
> +++ /dev/null
> @@ -1,1104 +0,0 @@
> -/*
> - * Copyright (c) 2005-2006 MontaVista Software, Inc.
> - * Copyright (c) 2006-2009 Red Hat, Inc.
> - *
> - * All rights reserved.
> - *
> - * Author: Steven Dake (sdake@xxxxxxxxxx)
> - *
> - * This software licensed under BSD license, the text of which follows:
> - *
> - * Redistribution and use in source and binary forms, with or without
> - * modification, are permitted provided that the following conditions are met:
> - *
> - * - Redistributions of source code must retain the above copyright notice,
> - *   this list of conditions and the following disclaimer.
> - * - Redistributions in binary form must reproduce the above copyright notice,
> - *   this list of conditions and the following disclaimer in the documentation
> - *   and/or other materials provided with the distribution.
> - * - Neither the name of the MontaVista Software, Inc. nor the names of its
> - *   contributors may be used to endorse or promote products derived from this
> - *   software without specific prior written permission.
> - *
> - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
> - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
> - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
> - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
> - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
> - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
> - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
> - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
> - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
> - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
> - * THE POSSIBILITY OF SUCH DAMAGE.
> - */
> -
> -#include <config.h>
> -
> -#include <sys/types.h>
> -#include <sys/uio.h>
> -#include <sys/socket.h>
> -#include <sys/un.h>
> -#include <netinet/in.h>
> -#include <arpa/inet.h>
> -#include <unistd.h>
> -#include <fcntl.h>
> -#include <stdlib.h>
> -#include <stdio.h>
> -#include <limits.h>
> -#include <errno.h>
> -#include <string.h>
> -#include <assert.h>
> -
> -#include <corosync/corotypes.h>
> -#include <qb/qbipc_common.h>
> -#include <corosync/cfg.h>
> -#include <corosync/list.h>
> -#include <corosync/mar_gen.h>
> -#include <corosync/totem/totemip.h>
> -#include <corosync/totem/totem.h>
> -#include <corosync/ipc_cfg.h>
> -#include <corosync/lcr/lcr_comp.h>
> -#include <corosync/logsys.h>
> -#include <corosync/coroapi.h>
> -#include <corosync/icmap.h>
> -#include <corosync/corodefs.h>
> -
> -LOGSYS_DECLARE_SUBSYS ("CFG");
> -
> -enum cfg_message_req_types {
> -        MESSAGE_REQ_EXEC_CFG_RINGREENABLE = 0,
> -	MESSAGE_REQ_EXEC_CFG_KILLNODE = 1,
> -	MESSAGE_REQ_EXEC_CFG_SHUTDOWN = 2,
> -	MESSAGE_REQ_EXEC_CFG_CRYPTO_SET = 3
> -};
> -
> -#define DEFAULT_SHUTDOWN_TIMEOUT 5
> -
> -static struct list_head trackers_list;
> -
> -/*
> - * Variables controlling a requested shutdown
> - */
> -static corosync_timer_handle_t shutdown_timer;
> -static struct cfg_info *shutdown_con;
> -static uint32_t shutdown_flags;
> -static int shutdown_yes;
> -static int shutdown_no;
> -static int shutdown_expected;
> -
> -struct cfg_info
> -{
> -	struct list_head list;
> -	void *conn;
> -	void *tracker_conn;
> -	enum {SHUTDOWN_REPLY_UNKNOWN, SHUTDOWN_REPLY_YES, SHUTDOWN_REPLY_NO} shutdown_reply;
> -};
> -
> -static void cfg_confchg_fn (
> -	enum totem_configuration_type configuration_type,
> -	const unsigned int *member_list, size_t member_list_entries,
> -	const unsigned int *left_list, size_t left_list_entries,
> -	const unsigned int *joined_list, size_t joined_list_entries,
> -	const struct memb_ring_id *ring_id);
> -
> -static int cfg_exec_init_fn (struct corosync_api_v1 *corosync_api_v1);
> -
> -static struct corosync_api_v1 *api;
> -
> -static int cfg_lib_init_fn (void *conn);
> -
> -static int cfg_lib_exit_fn (void *conn);
> -
> -static void message_handler_req_exec_cfg_ringreenable (
> -        const void *message,
> -        unsigned int nodeid);
> -
> -static void message_handler_req_exec_cfg_killnode (
> -        const void *message,
> -        unsigned int nodeid);
> -
> -static void message_handler_req_exec_cfg_shutdown (
> -        const void *message,
> -        unsigned int nodeid);
> -
> -static void message_handler_req_exec_cfg_crypto_set (
> -        const void *message,
> -        unsigned int nodeid);
> -
> -static void exec_cfg_killnode_endian_convert (void *msg);
> -
> -static void message_handler_req_lib_cfg_ringstatusget (
> -	void *conn,
> -	const void *msg);
> -
> -static void message_handler_req_lib_cfg_ringreenable (
> -	void *conn,
> -	const void *msg);
> -
> -static void message_handler_req_lib_cfg_statetrack (
> -	void *conn,
> -	const void *msg);
> -
> -static void message_handler_req_lib_cfg_statetrackstop (
> -	void *conn,
> -	const void *msg);
> -
> -static void message_handler_req_lib_cfg_administrativestateset (
> -	void *conn,
> -	const void *msg);
> -
> -static void message_handler_req_lib_cfg_administrativestateget (
> -	void *conn,
> -	const void *msg);
> -
> -static void message_handler_req_lib_cfg_serviceload (
> -	void *conn,
> -	const void *msg);
> -
> -static void message_handler_req_lib_cfg_serviceunload (
> -	void *conn,
> -	const void *msg);
> -
> -static void message_handler_req_lib_cfg_killnode (
> -	void *conn,
> -	const void *msg);
> -
> -static void message_handler_req_lib_cfg_tryshutdown (
> -	void *conn,
> -	const void *msg);
> -
> -static void message_handler_req_lib_cfg_replytoshutdown (
> -	void *conn,
> -	const void *msg);
> -
> -static void message_handler_req_lib_cfg_get_node_addrs (
> -	void *conn,
> -	const void *msg);
> -
> -static void message_handler_req_lib_cfg_local_get (
> -	void *conn,
> -	const void *msg);
> -
> -static void message_handler_req_lib_cfg_crypto_set (
> -	void *conn,
> -	const void *msg);
> -
> -/*
> - * Service Handler Definition
> - */
> -static struct corosync_lib_handler cfg_lib_engine[] =
> -{
> -	{ /* 0 */
> -		.lib_handler_fn		= message_handler_req_lib_cfg_ringstatusget,
> -		.flow_control		= CS_LIB_FLOW_CONTROL_REQUIRED
> -	},
> -	{ /* 1 */
> -		.lib_handler_fn		= message_handler_req_lib_cfg_ringreenable,
> -		.flow_control		= CS_LIB_FLOW_CONTROL_REQUIRED
> -	},
> -	{ /* 2 */
> -		.lib_handler_fn		= message_handler_req_lib_cfg_statetrack,
> -		.flow_control		= CS_LIB_FLOW_CONTROL_REQUIRED
> -	},
> -	{ /* 3 */
> -		.lib_handler_fn		= message_handler_req_lib_cfg_statetrackstop,
> -		.flow_control		= CS_LIB_FLOW_CONTROL_REQUIRED
> -	},
> -	{ /* 4 */
> -		.lib_handler_fn		= message_handler_req_lib_cfg_administrativestateset,
> -		.flow_control		= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> -	},
> -	{ /* 5 */
> -		.lib_handler_fn		= message_handler_req_lib_cfg_administrativestateget,
> -		.flow_control		= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> -	},
> -	{ /* 6 */
> -		.lib_handler_fn		= message_handler_req_lib_cfg_serviceload,
> -		.flow_control		= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> -	},
> -	{ /* 7 */
> -		.lib_handler_fn		= message_handler_req_lib_cfg_serviceunload,
> -		.flow_control		= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> -	},
> -	{ /* 8 */
> -		.lib_handler_fn		= message_handler_req_lib_cfg_killnode,
> -		.flow_control		= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> -	},
> -	{ /* 9 */
> -		.lib_handler_fn		= message_handler_req_lib_cfg_tryshutdown,
> -		.flow_control		= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> -	},
> -	{ /* 10 */
> -		.lib_handler_fn		= message_handler_req_lib_cfg_replytoshutdown,
> -		.flow_control		= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> -	},
> -	{ /* 11 */
> -		.lib_handler_fn		= message_handler_req_lib_cfg_get_node_addrs,
> -		.flow_control		= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> -	},
> -	{ /* 12 */
> -		.lib_handler_fn		= message_handler_req_lib_cfg_local_get,
> -		.flow_control		= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> -	},
> -	{ /* 13 */
> -		.lib_handler_fn		= message_handler_req_lib_cfg_crypto_set,
> -		.flow_control		= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> -	}
> -};
> -
> -static struct corosync_exec_handler cfg_exec_engine[] =
> -{
> -	{ /* 0 */
> -		.exec_handler_fn = message_handler_req_exec_cfg_ringreenable,
> -	},
> -	{ /* 1 */
> -		.exec_handler_fn = message_handler_req_exec_cfg_killnode,
> -		.exec_endian_convert_fn	= exec_cfg_killnode_endian_convert
> -	},
> -	{ /* 2 */
> -		.exec_handler_fn = message_handler_req_exec_cfg_shutdown,
> -	},
> -	{ /* 3 */
> -		.exec_handler_fn = message_handler_req_exec_cfg_crypto_set,
> -	}
> -};
> -
> -/*
> - * Exports the interface for the service
> - */
> -struct corosync_service_engine cfg_service_engine = {
> -	.name					= "corosync configuration service",
> -	.id					= CFG_SERVICE,
> -	.priority				= 1,
> -	.private_data_size			= sizeof(struct cfg_info),
> -	.flow_control				= CS_LIB_FLOW_CONTROL_NOT_REQUIRED,
> -	.allow_inquorate			= CS_LIB_ALLOW_INQUORATE,
> -	.lib_init_fn				= cfg_lib_init_fn,
> -	.lib_exit_fn				= cfg_lib_exit_fn,
> -	.lib_engine				= cfg_lib_engine,
> -	.lib_engine_count			= sizeof (cfg_lib_engine) / sizeof (struct corosync_lib_handler),
> -	.exec_init_fn				= cfg_exec_init_fn,
> -	.exec_engine				= cfg_exec_engine,
> -	.exec_engine_count			= sizeof (cfg_exec_engine) / sizeof (struct corosync_exec_handler),
> -	.confchg_fn				= cfg_confchg_fn,
> -	.sync_mode				= CS_SYNC_V1
> -};
> -
> -/*
> - * Dynamic Loader definition
> - */
> -static struct corosync_service_engine *cfg_get_service_engine_ver0 (void);
> -
> -static struct corosync_service_engine_iface_ver0 cfg_service_engine_iface = {
> -	.corosync_get_service_engine_ver0	= cfg_get_service_engine_ver0
> -};
> -
> -static struct lcr_iface corosync_cfg_ver0[1] = {
> -	{
> -		.name				= "corosync_cfg",
> -		.version			= 0,
> -		.versions_replace		= 0,
> -		.versions_replace_count		= 0,
> -		.dependencies			= 0,
> -		.dependency_count		= 0,
> -		.constructor			= NULL,
> -		.destructor			= NULL,
> -		.interfaces			= NULL
> -	}
> -};
> -
> -static struct lcr_comp cfg_comp_ver0 = {
> -	.iface_count				= 1,
> -	.ifaces					= corosync_cfg_ver0
> -};
> -
> -static struct corosync_service_engine *cfg_get_service_engine_ver0 (void)
> -{
> -	return (&cfg_service_engine);
> -}
> -
> -#ifdef COROSYNC_SOLARIS
> -void corosync_lcr_component_register (void);
> -
> -void corosync_lcr_component_register (void) {
> -#else
> -__attribute__ ((constructor)) static void corosync_lcr_component_register (void) {
> -#endif
> -	lcr_interfaces_set (&corosync_cfg_ver0[0], &cfg_service_engine_iface);
> -
> -	lcr_component_register (&cfg_comp_ver0);
> -}
> -
> -struct req_exec_cfg_ringreenable {
> -	struct qb_ipc_request_header header __attribute__((aligned(8)));
> -        mar_message_source_t source __attribute__((aligned(8)));
> -};
> -
> -struct req_exec_cfg_killnode {
> -	struct qb_ipc_request_header header __attribute__((aligned(8)));
> -        mar_uint32_t nodeid __attribute__((aligned(8)));
> -	mar_name_t reason __attribute__((aligned(8)));
> -};
> -
> -struct req_exec_cfg_crypto_set {
> -	struct qb_ipc_request_header header __attribute__((aligned(8)));
> -	mar_uint32_t type __attribute__((aligned(8)));
> -};
> -
> -struct req_exec_cfg_shutdown {
> -	struct qb_ipc_request_header header __attribute__((aligned(8)));
> -};
> -
> -/* IMPL */
> -
> -static int cfg_exec_init_fn (
> -	struct corosync_api_v1 *corosync_api_v1)
> -{
> -#ifdef COROSYNC_SOLARIS
> -	logsys_subsys_init();
> -#endif
> -
> -	api = corosync_api_v1;
> -
> -	list_init(&trackers_list);
> -	return (0);
> -}
> -
> -static void cfg_confchg_fn (
> -	enum totem_configuration_type configuration_type,
> -	const unsigned int *member_list, size_t member_list_entries,
> -	const unsigned int *left_list, size_t left_list_entries,
> -	const unsigned int *joined_list, size_t joined_list_entries,
> -	const struct memb_ring_id *ring_id)
> -{
> -}
> -
> -/*
> - * Tell other nodes we are shutting down
> - */
> -static int send_shutdown(void)
> -{
> -	struct req_exec_cfg_shutdown req_exec_cfg_shutdown;
> -	struct iovec iovec;
> -
> -	ENTER();
> -	req_exec_cfg_shutdown.header.size =
> -		sizeof (struct req_exec_cfg_shutdown);
> -	req_exec_cfg_shutdown.header.id = SERVICE_ID_MAKE (CFG_SERVICE,
> -		MESSAGE_REQ_EXEC_CFG_SHUTDOWN);
> -
> -	iovec.iov_base = (char *)&req_exec_cfg_shutdown;
> -	iovec.iov_len = sizeof (struct req_exec_cfg_shutdown);
> -
> -	assert (api->totem_mcast (&iovec, 1, TOTEM_SAFE) == 0);
> -
> -	LEAVE();
> -	return 0;
> -}
> -
> -static void send_test_shutdown(void *only_conn, void *exclude_conn, int status)
> -{
> -	struct res_lib_cfg_testshutdown res_lib_cfg_testshutdown;
> -	struct list_head *iter;
> -
> -	ENTER();
> -	res_lib_cfg_testshutdown.header.size = sizeof(struct res_lib_cfg_testshutdown);
> -	res_lib_cfg_testshutdown.header.id = MESSAGE_RES_CFG_TESTSHUTDOWN;
> -	res_lib_cfg_testshutdown.header.error = status;
> -	res_lib_cfg_testshutdown.flags = shutdown_flags;
> -
> -	if (only_conn) {
> -		TRACE1("sending testshutdown to only %p", only_conn);
> -		api->ipc_dispatch_send(only_conn, &res_lib_cfg_testshutdown,
> -				       sizeof(res_lib_cfg_testshutdown));
> -	} else {
> -		for (iter = trackers_list.next; iter != &trackers_list; iter = iter->next) {
> -			struct cfg_info *ci = list_entry(iter, struct cfg_info, list);
> -
> -			if (ci->conn != exclude_conn) {
> -				TRACE1("sending testshutdown to %p", ci->tracker_conn);
> -				api->ipc_dispatch_send(ci->tracker_conn, &res_lib_cfg_testshutdown,
> -						       sizeof(res_lib_cfg_testshutdown));
> -			}
> -		}
> -	}
> -	LEAVE();
> -}
> -
> -static void check_shutdown_status(void)
> -{
> -	ENTER();
> -
> -	/*
> -	 * Shutdown client might have gone away
> -	 */
> -	if (!shutdown_con) {
> -		LEAVE();
> -		return;
> -	}
> -
> -	/*
> -	 * All replies safely gathered in ?
> -	 */
> -	if (shutdown_yes + shutdown_no >= shutdown_expected) {
> -		struct res_lib_cfg_tryshutdown res_lib_cfg_tryshutdown;
> -
> -		api->timer_delete(shutdown_timer);
> -
> -		if (shutdown_yes >= shutdown_expected ||
> -		    shutdown_flags == CFG_SHUTDOWN_FLAG_REGARDLESS) {
> -			TRACE1("shutdown confirmed");
> -
> -			res_lib_cfg_tryshutdown.header.size = sizeof(struct res_lib_cfg_tryshutdown);
> -			res_lib_cfg_tryshutdown.header.id = MESSAGE_RES_CFG_TRYSHUTDOWN;
> -			res_lib_cfg_tryshutdown.header.error = CS_OK;
> -
> -			/*
> -			 * Tell originator that shutdown was confirmed
> -			 */
> -			api->ipc_response_send(shutdown_con->conn, &res_lib_cfg_tryshutdown,
> -						    sizeof(res_lib_cfg_tryshutdown));
> -			shutdown_con = NULL;
> -
> -			/*
> -			 * Tell other nodes we are going down
> -			 */
> -			send_shutdown();
> -
> -		}
> -		else {
> -
> -			TRACE1("shutdown cancelled");
> -			res_lib_cfg_tryshutdown.header.size = sizeof(struct res_lib_cfg_tryshutdown);
> -			res_lib_cfg_tryshutdown.header.id = MESSAGE_RES_CFG_TRYSHUTDOWN;
> -			res_lib_cfg_tryshutdown.header.error = CS_ERR_BUSY;
> -
> -			/*
> -			 * Tell originator that shutdown was cancelled
> -			 */
> -			api->ipc_response_send(shutdown_con->conn, &res_lib_cfg_tryshutdown,
> -						    sizeof(res_lib_cfg_tryshutdown));
> -			shutdown_con = NULL;
> -		}
> -
> -		log_printf(LOGSYS_LEVEL_DEBUG, "shutdown decision is: (yes count: %d, no count: %d) flags=%x\n", shutdown_yes, shutdown_no, shutdown_flags);
> -	}
> -	LEAVE();
> -}
> -
> -
> -/*
> - * Not all nodes responded to the shutdown (in time)
> - */
> -static void shutdown_timer_fn(void *arg)
> -{
> -	ENTER();
> -
> -	/*
> -	 * Mark undecideds as "NO"
> -	 */
> -	shutdown_no = shutdown_expected;
> -	check_shutdown_status();
> -
> -	send_test_shutdown(NULL, NULL, CS_ERR_TIMEOUT);
> -	LEAVE();
> -}
> -
> -static void remove_ci_from_shutdown(struct cfg_info *ci)
> -{
> -	ENTER();
> -
> -	/*
> -	 * If the controlling shutdown process has quit, then cancel the
> -	 * shutdown session
> -	 */
> -	if (ci == shutdown_con) {
> -		shutdown_con = NULL;
> -		api->timer_delete(shutdown_timer);
> -	}
> -
> -	if (!list_empty(&ci->list)) {
> -		list_del(&ci->list);
> -		list_init(&ci->list);
> -
> -		/*
> -		 * Remove our option
> -		 */
> -		if (shutdown_con) {
> -			if (ci->shutdown_reply == SHUTDOWN_REPLY_YES)
> -				shutdown_yes--;
> -			if (ci->shutdown_reply == SHUTDOWN_REPLY_NO)
> -				shutdown_no--;
> -		}
> -
> -		/*
> -		 * If we are leaving, then that's an implicit YES to shutdown
> -		 */
> -		ci->shutdown_reply = SHUTDOWN_REPLY_YES;
> -		shutdown_yes++;
> -
> -		check_shutdown_status();
> -	}
> -	LEAVE();
> -}
> -
> -
> -int cfg_lib_exit_fn (void *conn)
> -{
> -	struct cfg_info *ci = (struct cfg_info *)api->ipc_private_data_get (conn);
> -
> -	ENTER();
> -	remove_ci_from_shutdown(ci);
> -	LEAVE();
> -	return (0);
> -}
> -
> -static int cfg_lib_init_fn (void *conn)
> -{
> -	struct cfg_info *ci = (struct cfg_info *)api->ipc_private_data_get (conn);
> -
> -	ENTER();
> -	list_init(&ci->list);
> -	LEAVE();
> -
> -        return (0);
> -}
> -
> -/*
> - * Executive message handlers
> - */
> -static void message_handler_req_exec_cfg_ringreenable (
> -        const void *message,
> -        unsigned int nodeid)
> -{
> -	const struct req_exec_cfg_ringreenable *req_exec_cfg_ringreenable
> -	  = message;
> -	struct res_lib_cfg_ringreenable res_lib_cfg_ringreenable;
> -
> -	ENTER();
> -	api->totem_ring_reenable ();
> -        if (api->ipc_source_is_local(&req_exec_cfg_ringreenable->source)) {
> -		res_lib_cfg_ringreenable.header.id = MESSAGE_RES_CFG_RINGREENABLE;
> -		res_lib_cfg_ringreenable.header.size = sizeof (struct res_lib_cfg_ringreenable);
> -		res_lib_cfg_ringreenable.header.error = CS_OK;
> -		api->ipc_response_send (
> -			req_exec_cfg_ringreenable->source.conn,
> -			&res_lib_cfg_ringreenable,
> -			sizeof (struct res_lib_cfg_ringreenable));
> -
> -		api->ipc_refcnt_dec(req_exec_cfg_ringreenable->source.conn);
> -	}
> -	LEAVE();
> -}
> -
> -static void exec_cfg_killnode_endian_convert (void *msg)
> -{
> -	struct req_exec_cfg_killnode *req_exec_cfg_killnode =
> -		(struct req_exec_cfg_killnode *)msg;
> -	ENTER();
> -
> -	swab_mar_name_t(&req_exec_cfg_killnode->reason);
> -	LEAVE();
> -}
> -
> -
> -static void message_handler_req_exec_cfg_killnode (
> -        const void *message,
> -        unsigned int nodeid)
> -{
> -	const struct req_exec_cfg_killnode *req_exec_cfg_killnode = message;
> -	cs_name_t reason;
> -
> -	ENTER();
> -	log_printf(LOGSYS_LEVEL_DEBUG, "request to kill node %d(us=%d): %s\n",  req_exec_cfg_killnode->nodeid, api->totem_nodeid_get(), reason.value);
> -        if (req_exec_cfg_killnode->nodeid == api->totem_nodeid_get()) {
> -		marshall_from_mar_name_t(&reason, &req_exec_cfg_killnode->reason);
> -		log_printf(LOGSYS_LEVEL_NOTICE, "Killed by node %d: %s\n",
> -			   nodeid, reason.value);
> -		corosync_fatal_error(COROSYNC_FATAL_ERROR_EXIT);
> -	}
> -	LEAVE();
> -}
> -
> -/*
> - * Self shutdown
> - */
> -static void message_handler_req_exec_cfg_shutdown (
> -        const void *message,
> -        unsigned int nodeid)
> -{
> -	ENTER();
> -
> -	log_printf(LOGSYS_LEVEL_NOTICE, "Node %d was shut down by sysadmin\n", nodeid);
> -	if (nodeid == api->totem_nodeid_get()) {
> -		api->shutdown_request();
> -	}
> -	LEAVE();
> -}
> -
> -static void message_handler_req_exec_cfg_crypto_set (
> -        const void *message,
> -        unsigned int nodeid)
> -{
> -	const struct req_exec_cfg_crypto_set *req_exec_cfg_crypto_set = message;
> -	ENTER();
> -
> -	log_printf(LOGSYS_LEVEL_NOTICE, "Node %d requested set crypto to %d\n", nodeid, req_exec_cfg_crypto_set->type);
> -
> -	api->totem_crypto_set(req_exec_cfg_crypto_set->type);
> -	LEAVE();
> -}
> -
> -
> -/*
> - * Library Interface Implementation
> - */
> -static void message_handler_req_lib_cfg_ringstatusget (
> -	void *conn,
> -	const void *msg)
> -{
> -	struct res_lib_cfg_ringstatusget res_lib_cfg_ringstatusget;
> -	struct totem_ip_address interfaces[INTERFACE_MAX];
> -	unsigned int iface_count;
> -	char **status;
> -	const char *totem_ip_string;
> -	unsigned int i;
> -
> -	ENTER();
> -
> -	res_lib_cfg_ringstatusget.header.id = MESSAGE_RES_CFG_RINGSTATUSGET;
> -	res_lib_cfg_ringstatusget.header.size = sizeof (struct res_lib_cfg_ringstatusget);
> -	res_lib_cfg_ringstatusget.header.error = CS_OK;
> -
> -	api->totem_ifaces_get (
> -		api->totem_nodeid_get(),
> -		interfaces,
> -		&status,
> -		&iface_count);
> -
> -	res_lib_cfg_ringstatusget.interface_count = iface_count;
> -
> -	for (i = 0; i < iface_count; i++) {
> -		totem_ip_string
> -		  = (const char *)api->totem_ip_print (&interfaces[i]);
> -		strcpy ((char *)&res_lib_cfg_ringstatusget.interface_status[i],
> -			status[i]);
> -		strcpy ((char *)&res_lib_cfg_ringstatusget.interface_name[i],
> -			totem_ip_string);
> -	}
> -	api->ipc_response_send (
> -		conn,
> -		&res_lib_cfg_ringstatusget,
> -		sizeof (struct res_lib_cfg_ringstatusget));
> -
> -	LEAVE();
> -}
> -
> -static void message_handler_req_lib_cfg_ringreenable (
> -	void *conn,
> -	const void *msg)
> -{
> -	struct req_exec_cfg_ringreenable req_exec_cfg_ringreenable;
> -	struct iovec iovec;
> -
> -	ENTER();
> -	req_exec_cfg_ringreenable.header.size =
> -		sizeof (struct req_exec_cfg_ringreenable);
> -	req_exec_cfg_ringreenable.header.id = SERVICE_ID_MAKE (CFG_SERVICE,
> -		MESSAGE_REQ_EXEC_CFG_RINGREENABLE);
> -	api->ipc_source_set (&req_exec_cfg_ringreenable.source, conn);
> -	api->ipc_refcnt_inc(conn);
> -
> -	iovec.iov_base = (char *)&req_exec_cfg_ringreenable;
> -	iovec.iov_len = sizeof (struct req_exec_cfg_ringreenable);
> -
> -	assert (api->totem_mcast (&iovec, 1, TOTEM_SAFE) == 0);
> -
> -	LEAVE();
> -}
> -
> -static void message_handler_req_lib_cfg_statetrack (
> -	void *conn,
> -	const void *msg)
> -{
> -	struct cfg_info *ci = (struct cfg_info *)api->ipc_private_data_get (conn);
> -	struct res_lib_cfg_statetrack res_lib_cfg_statetrack;
> -
> -	ENTER();
> -
> -	/*
> -	 * We only do shutdown tracking at the moment
> -	 */
> -	if (list_empty(&ci->list)) {
> -		list_add(&ci->list, &trackers_list);
> -		ci->tracker_conn = conn;
> -
> -		if (shutdown_con) {
> -			/*
> -			 * Shutdown already in progress, ask the newcomer's opinion
> -			 */
> -			ci->shutdown_reply = SHUTDOWN_REPLY_UNKNOWN;
> -			shutdown_expected++;
> -			send_test_shutdown(conn, NULL, CS_OK);
> -		}
> -	}
> -
> -	res_lib_cfg_statetrack.header.size = sizeof(struct res_lib_cfg_statetrack);
> -	res_lib_cfg_statetrack.header.id = MESSAGE_RES_CFG_STATETRACKSTART;
> -	res_lib_cfg_statetrack.header.error = CS_OK;
> -
> -	api->ipc_response_send(conn, &res_lib_cfg_statetrack,
> -				    sizeof(res_lib_cfg_statetrack));
> -
> -	LEAVE();
> -}
> -
> -static void message_handler_req_lib_cfg_statetrackstop (
> -	void *conn,
> -	const void *msg)
> -{
> -	struct cfg_info *ci = (struct cfg_info *)api->ipc_private_data_get (conn);
> -//	struct req_lib_cfg_statetrackstop *req_lib_cfg_statetrackstop = (struct req_lib_cfg_statetrackstop *)message;
> -
> -	ENTER();
> -	remove_ci_from_shutdown(ci);
> -	LEAVE();
> -}
> -
> -static void message_handler_req_lib_cfg_administrativestateset (
> -	void *conn,
> -	const void *msg)
> -{
> -//	struct req_lib_cfg_administrativestateset *req_lib_cfg_administrativestateset = (struct req_lib_cfg_administrativestateset *)message;
> -
> -	ENTER();
> -	LEAVE();
> -}
> -static void message_handler_req_lib_cfg_administrativestateget (
> -	void *conn,
> -	const void *msg)
> -{
> -//	struct req_lib_cfg_administrativestateget *req_lib_cfg_administrativestateget = (struct req_lib_cfg_administrativestateget *)message;
> -	ENTER();
> -	LEAVE();
> -}
> -
> -static void message_handler_req_lib_cfg_serviceload (
> -	void *conn,
> -	const void *msg)
> -{
> -	const struct req_lib_cfg_serviceload *req_lib_cfg_serviceload = msg;
> -	struct res_lib_cfg_serviceload res_lib_cfg_serviceload;
> -
> -	ENTER();
> -	api->service_link_and_init (
> -		api,
> -		(const char *)req_lib_cfg_serviceload->service_name,
> -		req_lib_cfg_serviceload->service_ver);
> -
> -	res_lib_cfg_serviceload.header.id = MESSAGE_RES_CFG_SERVICEUNLOAD;
> -	res_lib_cfg_serviceload.header.size = sizeof (struct res_lib_cfg_serviceload);
> -	res_lib_cfg_serviceload.header.error = CS_OK;
> -	api->ipc_response_send (
> -		conn,
> -		&res_lib_cfg_serviceload,
> -		sizeof (struct res_lib_cfg_serviceload));
> -	LEAVE();
> -}
> -
> -static void message_handler_req_lib_cfg_serviceunload (
> -	void *conn,
> -	const void *msg)
> -{
> -	const struct req_lib_cfg_serviceunload *req_lib_cfg_serviceunload = msg;
> -	struct res_lib_cfg_serviceunload res_lib_cfg_serviceunload;
> -
> -	ENTER();
> -	api->service_unlink_and_exit (
> -		api,
> -		(const char *)req_lib_cfg_serviceunload->service_name,
> -		req_lib_cfg_serviceunload->service_ver);
> -	res_lib_cfg_serviceunload.header.id = MESSAGE_RES_CFG_SERVICEUNLOAD;
> -	res_lib_cfg_serviceunload.header.size = sizeof (struct res_lib_cfg_serviceunload);
> -	res_lib_cfg_serviceunload.header.error = CS_OK;
> -	api->ipc_response_send (
> -		conn,
> -		&res_lib_cfg_serviceunload,
> -		sizeof (struct res_lib_cfg_serviceunload));
> -	LEAVE();
> -}
> -
> -
> -static void message_handler_req_lib_cfg_killnode (
> -	void *conn,
> -	const void *msg)
> -{
> -	const struct req_lib_cfg_killnode *req_lib_cfg_killnode = msg;
> -	struct res_lib_cfg_killnode res_lib_cfg_killnode;
> -	struct req_exec_cfg_killnode req_exec_cfg_killnode;
> -	struct iovec iovec;
> -
> -	ENTER();
> -	req_exec_cfg_killnode.header.size =
> -		sizeof (struct req_exec_cfg_killnode);
> -	req_exec_cfg_killnode.header.id = SERVICE_ID_MAKE (CFG_SERVICE,
> -		MESSAGE_REQ_EXEC_CFG_KILLNODE);
> -	req_exec_cfg_killnode.nodeid = req_lib_cfg_killnode->nodeid;
> -	marshall_to_mar_name_t(&req_exec_cfg_killnode.reason, &req_lib_cfg_killnode->reason);
> -
> -	iovec.iov_base = (char *)&req_exec_cfg_killnode;
> -	iovec.iov_len = sizeof (struct req_exec_cfg_killnode);
> -
> -	(void)api->totem_mcast (&iovec, 1, TOTEM_SAFE);
> -
> -	res_lib_cfg_killnode.header.size = sizeof(struct res_lib_cfg_killnode);
> -	res_lib_cfg_killnode.header.id = MESSAGE_RES_CFG_KILLNODE;
> -	res_lib_cfg_killnode.header.error = CS_OK;
> -
> -	api->ipc_response_send(conn, &res_lib_cfg_killnode,
> -				    sizeof(res_lib_cfg_killnode));
> -
> -	LEAVE();
> -}
> -
> -
> -static void message_handler_req_lib_cfg_tryshutdown (
> -	void *conn,
> -	const void *msg)
> -{
> -	struct cfg_info *ci = (struct cfg_info *)api->ipc_private_data_get (conn);
> -	const struct req_lib_cfg_tryshutdown *req_lib_cfg_tryshutdown = msg;
> -	struct list_head *iter;
> -
> -	ENTER();
> -
> -	if (req_lib_cfg_tryshutdown->flags == CFG_SHUTDOWN_FLAG_IMMEDIATE) {
> -		struct res_lib_cfg_tryshutdown res_lib_cfg_tryshutdown;
> -
> -		/*
> -		 * Tell other nodes
> -		 */
> -		send_shutdown();
> -
> -		res_lib_cfg_tryshutdown.header.size = sizeof(struct res_lib_cfg_tryshutdown);
> -		res_lib_cfg_tryshutdown.header.id = MESSAGE_RES_CFG_TRYSHUTDOWN;
> -		res_lib_cfg_tryshutdown.header.error = CS_OK;
> -		api->ipc_response_send(conn, &res_lib_cfg_tryshutdown,
> -					    sizeof(res_lib_cfg_tryshutdown));
> -
> -		LEAVE();
> -		return;
> -	}
> -
> -	/*
> -	 * Shutdown in progress, return an error
> -	 */
> -	if (shutdown_con) {
> -		struct res_lib_cfg_tryshutdown res_lib_cfg_tryshutdown;
> -
> -		res_lib_cfg_tryshutdown.header.size = sizeof(struct res_lib_cfg_tryshutdown);
> -		res_lib_cfg_tryshutdown.header.id = MESSAGE_RES_CFG_TRYSHUTDOWN;
> -		res_lib_cfg_tryshutdown.header.error = CS_ERR_EXIST;
> -
> -		api->ipc_response_send(conn, &res_lib_cfg_tryshutdown,
> -					    sizeof(res_lib_cfg_tryshutdown));
> -
> -
> -		LEAVE();
> -
> -		return;
> -	}
> -
> -	ci->conn = conn;
> -	shutdown_con = (struct cfg_info *)api->ipc_private_data_get (conn);
> -	shutdown_flags = req_lib_cfg_tryshutdown->flags;
> -	shutdown_yes = 0;
> -	shutdown_no = 0;
> -
> -	/*
> -	 * Count the number of listeners
> -	 */
> -	shutdown_expected = 0;
> -
> -	for (iter = trackers_list.next; iter != &trackers_list; iter = iter->next) {
> -		struct cfg_info *testci = list_entry(iter, struct cfg_info, list);
> -		/*
> -		 * It is assumed that we will allow shutdown
> -		 */
> -		if (testci != ci) {
> -			testci->shutdown_reply = SHUTDOWN_REPLY_UNKNOWN;
> -			shutdown_expected++;
> -		}
> -	}
> -
> -	/*
> -	 * If no-one is listening for events then we can just go down now
> -	 */
> -	if (shutdown_expected == 0) {
> -		struct res_lib_cfg_tryshutdown res_lib_cfg_tryshutdown;
> -
> -		res_lib_cfg_tryshutdown.header.size = sizeof(struct res_lib_cfg_tryshutdown);
> -		res_lib_cfg_tryshutdown.header.id = MESSAGE_RES_CFG_TRYSHUTDOWN;
> -		res_lib_cfg_tryshutdown.header.error = CS_OK;
> -
> -		/*
> -		 * Tell originator that shutdown was confirmed
> -		 */
> -		api->ipc_response_send(conn, &res_lib_cfg_tryshutdown,
> -				       sizeof(res_lib_cfg_tryshutdown));
> -
> -		send_shutdown();
> -		LEAVE();
> -		return;
> -	}
> -	else {
> -		unsigned int shutdown_timeout = DEFAULT_SHUTDOWN_TIMEOUT;
> -
> -		/*
> -		 * Look for a shutdown timeout in configuration map
> -		 */
> -		icmap_get_uint32("cfg.shutdown_timeout", &shutdown_timeout);
> -
> -		/*
> -		 * Start the timer. If we don't get a full set of replies before this goes
> -		 * off we'll cancel the shutdown
> -		 */
> -		api->timer_add_duration((unsigned long long)shutdown_timeout*1000000000, NULL,
> -					shutdown_timer_fn, &shutdown_timer);
> -
> -		/*
> -		 * Tell the users we would like to shut down
> -		 */
> -		send_test_shutdown(NULL, conn, CS_OK);
> -	}
> -
> -	/*
> -	 * We don't sent a reply to the caller here.
> -	 * We send it when we know if we can shut down or not
> -	 */
> -
> -	LEAVE();
> -}
> -
> -static void message_handler_req_lib_cfg_replytoshutdown (
> -	void *conn,
> -	const void *msg)
> -{
> -	struct cfg_info *ci = (struct cfg_info *)api->ipc_private_data_get (conn);
> -	const struct req_lib_cfg_replytoshutdown *req_lib_cfg_replytoshutdown = msg;
> -	struct res_lib_cfg_replytoshutdown res_lib_cfg_replytoshutdown;
> -	int status = CS_OK;
> -
> -	ENTER();
> -	if (!shutdown_con) {
> -		status = CS_ERR_ACCESS;
> -		goto exit_fn;
> -	}
> -
> -	if (req_lib_cfg_replytoshutdown->response) {
> -		shutdown_yes++;
> -		ci->shutdown_reply = SHUTDOWN_REPLY_YES;
> -	}
> -	else {
> -		shutdown_no++;
> -		ci->shutdown_reply = SHUTDOWN_REPLY_NO;
> -	}
> -	check_shutdown_status();
> -
> -exit_fn:
> -	res_lib_cfg_replytoshutdown.header.error = status;
> -	res_lib_cfg_replytoshutdown.header.id = MESSAGE_RES_CFG_REPLYTOSHUTDOWN;
> -	res_lib_cfg_replytoshutdown.header.size = sizeof(res_lib_cfg_replytoshutdown);
> -
> -	api->ipc_response_send(conn, &res_lib_cfg_replytoshutdown,
> -			       sizeof(res_lib_cfg_replytoshutdown));
> -
> -	LEAVE();
> -}
> -
> -static void message_handler_req_lib_cfg_get_node_addrs (void *conn,
> -							const void *msg)
> -{
> -	struct totem_ip_address node_ifs[INTERFACE_MAX];
> -	char buf[PIPE_BUF];
> -	char **status;
> -	unsigned int num_interfaces = 0;
> -	int ret = CS_OK;
> -	int i;
> -	const struct req_lib_cfg_get_node_addrs *req_lib_cfg_get_node_addrs = msg;
> -	struct res_lib_cfg_get_node_addrs *res_lib_cfg_get_node_addrs = (struct res_lib_cfg_get_node_addrs *)buf;
> -	unsigned int nodeid = req_lib_cfg_get_node_addrs->nodeid;
> -	char *addr_buf;
> -
> -	if (nodeid == 0)
> -		nodeid = api->totem_nodeid_get();
> -
> -	api->totem_ifaces_get(nodeid, node_ifs, &status, &num_interfaces);
> -
> -	res_lib_cfg_get_node_addrs->header.size = sizeof(struct res_lib_cfg_get_node_addrs) + (num_interfaces * TOTEMIP_ADDRLEN);
> -	res_lib_cfg_get_node_addrs->header.id = MESSAGE_RES_CFG_GET_NODE_ADDRS;
> -	res_lib_cfg_get_node_addrs->header.error = ret;
> -	res_lib_cfg_get_node_addrs->num_addrs = num_interfaces;
> -	if (num_interfaces) {
> -		res_lib_cfg_get_node_addrs->family = node_ifs[0].family;
> -		for (i = 0, addr_buf = (char *)res_lib_cfg_get_node_addrs->addrs;
> -		    i < num_interfaces; i++, addr_buf += TOTEMIP_ADDRLEN) {
> -			memcpy(addr_buf, node_ifs[i].addr, TOTEMIP_ADDRLEN);
> -		}
> -	}
> -	else {
> -		res_lib_cfg_get_node_addrs->header.error = CS_ERR_NOT_EXIST;
> -	}
> -	api->ipc_response_send(conn, res_lib_cfg_get_node_addrs, res_lib_cfg_get_node_addrs->header.size);
> -}
> -
> -static void message_handler_req_lib_cfg_local_get (void *conn, const void *msg)
> -{
> -	struct res_lib_cfg_local_get res_lib_cfg_local_get;
> -
> -	res_lib_cfg_local_get.header.size = sizeof(res_lib_cfg_local_get);
> -	res_lib_cfg_local_get.header.id = MESSAGE_RES_CFG_LOCAL_GET;
> -	res_lib_cfg_local_get.header.error = CS_OK;
> -	res_lib_cfg_local_get.local_nodeid = api->totem_nodeid_get ();
> -
> -	api->ipc_response_send(conn, &res_lib_cfg_local_get,
> -		sizeof(res_lib_cfg_local_get));
> -}
> -
> -
> -static void message_handler_req_lib_cfg_crypto_set (
> -	void *conn,
> -	const void *msg)
> -{
> -	const struct req_lib_cfg_crypto_set *req_lib_cfg_crypto_set = msg;
> -	struct res_lib_cfg_crypto_set res_lib_cfg_crypto_set;
> -	struct req_exec_cfg_crypto_set req_exec_cfg_crypto_set;
> -	struct iovec iovec;
> -	int ret = CS_ERR_INVALID_PARAM;
> -
> -	req_exec_cfg_crypto_set.header.size =
> -		sizeof (struct req_exec_cfg_crypto_set);
> -	req_exec_cfg_crypto_set.header.id = SERVICE_ID_MAKE (CFG_SERVICE,
> -		MESSAGE_REQ_EXEC_CFG_CRYPTO_SET);
> -
> -	/*
> -	 * Set it locally first so we can tell if it is allowed
> -	 */
> -	if (api->totem_crypto_set(req_lib_cfg_crypto_set->type) == 0) {
> -
> -		req_exec_cfg_crypto_set.type = req_lib_cfg_crypto_set->type;
> -
> -		iovec.iov_base = (char *)&req_exec_cfg_crypto_set;
> -		iovec.iov_len = sizeof (struct req_exec_cfg_crypto_set);
> -		assert (api->totem_mcast (&iovec, 1, TOTEM_SAFE) == 0);
> -		ret = CS_OK;
> -	}
> -
> -	res_lib_cfg_crypto_set.header.size = sizeof(res_lib_cfg_crypto_set);
> -	res_lib_cfg_crypto_set.header.id = MESSAGE_RES_CFG_CRYPTO_SET;
> -	res_lib_cfg_crypto_set.header.error = ret;
> -
> -	api->ipc_response_send(conn, &res_lib_cfg_crypto_set,
> -		sizeof(res_lib_cfg_crypto_set));
> -}
> diff --git a/services/cmap.c b/services/cmap.c
> deleted file mode 100644
> index 3081173..0000000
> --- a/services/cmap.c
> +++ /dev/null
> @@ -1,645 +0,0 @@
> -/*
> - * Copyright (c) 2011 Red Hat, Inc.
> - *
> - * All rights reserved.
> - *
> - * Author: Jan Friesse (jfriesse@xxxxxxxxxx)
> - *
> - * This software licensed under BSD license, the text of which follows:
> - *
> - * Redistribution and use in source and binary forms, with or without
> - * modification, are permitted provided that the following conditions are met:
> - *
> - * - Redistributions of source code must retain the above copyright notice,
> - *   this list of conditions and the following disclaimer.
> - * - Redistributions in binary form must reproduce the above copyright notice,
> - *   this list of conditions and the following disclaimer in the documentation
> - *   and/or other materials provided with the distribution.
> - * - Neither the name of the Red Hat, Inc. nor the names of its
> - *   contributors may be used to endorse or promote products derived from this
> - *   software without specific prior written permission.
> - *
> - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
> - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
> - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
> - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
> - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
> - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
> - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
> - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
> - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
> - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
> - * THE POSSIBILITY OF SUCH DAMAGE.
> - */
> -
> -#include <config.h>
> -
> -#include <sys/types.h>
> -#include <unistd.h>
> -#include <fcntl.h>
> -#include <stdlib.h>
> -#include <errno.h>
> -#include <unistd.h>
> -#include <poll.h>
> -#include <assert.h>
> -
> -#include <qb/qbloop.h>
> -#include <qb/qbipc_common.h>
> -
> -#include <corosync/corotypes.h>
> -#include <corosync/corodefs.h>
> -#include <corosync/list.h>
> -#include <corosync/mar_gen.h>
> -#include <corosync/ipc_cmap.h>
> -#include <corosync/lcr/lcr_comp.h>
> -#include <corosync/logsys.h>
> -#include <corosync/coroapi.h>
> -#include <corosync/icmap.h>
> -
> -#define hdb_error_to_cs(_result_) qb_to_cs_error(_result_)
> -
> -LOGSYS_DECLARE_SUBSYS ("CMAP");
> -
> -struct cmap_conn_info {
> -	struct hdb_handle_database iter_db;
> -	struct hdb_handle_database track_db;
> -};
> -
> -typedef uint64_t cmap_iter_handle_t;
> -typedef uint64_t cmap_track_handle_t;
> -
> -struct cmap_track_user_data {
> -	void *conn;
> -	cmap_track_handle_t track_handle;
> -	uint64_t track_inst_handle;
> -};
> -
> -static struct corosync_api_v1 *api;
> -
> -static int cmap_exec_init_fn (struct corosync_api_v1 *corosync_api);
> -static int cmap_exec_exit_fn(void);
> -
> -static int cmap_lib_init_fn (void *conn);
> -static int cmap_lib_exit_fn (void *conn);
> -
> -static void message_handler_req_lib_cmap_set(void *conn, const void *message);
> -static void message_handler_req_lib_cmap_delete(void *conn, const void *message);
> -static void message_handler_req_lib_cmap_get(void *conn, const void *message);
> -static void message_handler_req_lib_cmap_adjust_int(void *conn, const void *message);
> -static void message_handler_req_lib_cmap_iter_init(void *conn, const void *message);
> -static void message_handler_req_lib_cmap_iter_next(void *conn, const void *message);
> -static void message_handler_req_lib_cmap_iter_finalize(void *conn, const void *message);
> -static void message_handler_req_lib_cmap_track_add(void *conn, const void *message);
> -static void message_handler_req_lib_cmap_track_delete(void *conn, const void *message);
> -
> -static void cmap_notify_fn(int32_t event,
> -		const char *key_name,
> -		struct icmap_notify_value new_val,
> -		struct icmap_notify_value old_val,
> -		void *user_data);
> -
> -/*
> - * Library Handler Definition
> - */
> -static struct corosync_lib_handler cmap_lib_engine[] =
> -{
> -	{ /* 0 */
> -		.lib_handler_fn				= message_handler_req_lib_cmap_set,
> -		.flow_control				= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> -	},
> -	{ /* 1 */
> -		.lib_handler_fn				= message_handler_req_lib_cmap_delete,
> -		.flow_control				= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> -	},
> -	{ /* 2 */
> -		.lib_handler_fn				= message_handler_req_lib_cmap_get,
> -		.flow_control				= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> -	},
> -	{ /* 3 */
> -		.lib_handler_fn				= message_handler_req_lib_cmap_adjust_int,
> -		.flow_control				= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> -	},
> -	{ /* 4 */
> -		.lib_handler_fn				= message_handler_req_lib_cmap_iter_init,
> -		.flow_control				= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> -	},
> -	{ /* 5 */
> -		.lib_handler_fn				= message_handler_req_lib_cmap_iter_next,
> -		.flow_control				= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> -	},
> -	{ /* 6 */
> -		.lib_handler_fn				= message_handler_req_lib_cmap_iter_finalize,
> -		.flow_control				= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> -	},
> -	{ /* 7 */
> -		.lib_handler_fn				= message_handler_req_lib_cmap_track_add,
> -		.flow_control				= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> -	},
> -	{ /* 8 */
> -		.lib_handler_fn				= message_handler_req_lib_cmap_track_delete,
> -		.flow_control				= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> -	},
> -};
> -
> -
> -struct corosync_service_engine cmap_service_engine = {
> -	.name				        = "corosync configuration map access",
> -	.id					= CMAP_SERVICE,
> -	.priority				= 1,
> -	.private_data_size			= sizeof(struct cmap_conn_info),
> -	.flow_control				= CS_LIB_FLOW_CONTROL_NOT_REQUIRED,
> -	.allow_inquorate			= CS_LIB_ALLOW_INQUORATE,
> -	.lib_init_fn				= cmap_lib_init_fn,
> -	.lib_exit_fn				= cmap_lib_exit_fn,
> -	.lib_engine				= cmap_lib_engine,
> -	.lib_engine_count			= sizeof (cmap_lib_engine) / sizeof (struct corosync_lib_handler),
> -	.exec_init_fn				= cmap_exec_init_fn,
> -	.exec_exit_fn				= cmap_exec_exit_fn,
> -};
> -
> -/*
> - * Dynamic loader definition
> - */
> -static struct corosync_service_engine *cmap_get_service_engine_ver0 (void);
> -
> -static struct corosync_service_engine_iface_ver0 cmap_service_engine_iface = {
> -	.corosync_get_service_engine_ver0		= cmap_get_service_engine_ver0
> -};
> -
> -static struct lcr_iface corosync_cmap_ver0[1] = {
> -	{
> -		.name				= "corosync_cmap",
> -		.version			= 0,
> -		.versions_replace		= 0,
> -		.versions_replace_count         = 0,
> -		.dependencies			= 0,
> -		.dependency_count		= 0,
> -		.constructor			= NULL,
> -		.destructor			= NULL,
> -		.interfaces			= NULL
> -	}
> -};
> -
> -static struct lcr_comp cmap_comp_ver0 = {
> -	.iface_count			= 1,
> -	.ifaces			        = corosync_cmap_ver0
> -};
> -
> -
> -static struct corosync_service_engine *cmap_get_service_engine_ver0 (void)
> -{
> -	return (&cmap_service_engine);
> -}
> -
> -#ifdef COROSYNC_SOLARIS
> -void corosync_lcr_component_register (void);
> -
> -void corosync_lcr_component_register (void) {
> -#else
> -__attribute__ ((constructor)) static void corosync_lcr_component_register (void) {
> -#endif
> -        lcr_interfaces_set (&corosync_cmap_ver0[0], &cmap_service_engine_iface);
> -
> -	lcr_component_register (&cmap_comp_ver0);
> -}
> -
> -static int cmap_exec_exit_fn(void)
> -{
> -	return 0;
> -}
> -
> -static int cmap_exec_init_fn (
> -	struct corosync_api_v1 *corosync_api)
> -{
> -
> -#ifdef COROSYNC_SOLARIS
> -	logsys_subsys_init();
> -#endif
> -	api = corosync_api;
> -
> -	return (0);
> -}
> -
> -static int cmap_lib_init_fn (void *conn)
> -{
> -	struct cmap_conn_info *conn_info = (struct cmap_conn_info *)api->ipc_private_data_get (conn);
> -
> -	log_printf(LOGSYS_LEVEL_DEBUG, "lib_init_fn: conn=%p\n", conn);
> -
> -	api->ipc_refcnt_inc(conn);
> -
> -	memset(conn_info, 0, sizeof(*conn_info));
> -	hdb_create(&conn_info->iter_db);
> -	hdb_create(&conn_info->track_db);
> -
> -	return (0);
> -}
> -
> -static int cmap_lib_exit_fn (void *conn)
> -{
> -	struct cmap_conn_info *conn_info = (struct cmap_conn_info *)api->ipc_private_data_get (conn);
> -	hdb_handle_t iter_handle = 0;
> -	icmap_iter_t *iter;
> -	hdb_handle_t track_handle = 0;
> -	icmap_track_t *track;
> -
> -	log_printf(LOGSYS_LEVEL_DEBUG, "exit_fn for conn=%p\n", conn);
> -
> -	hdb_iterator_reset(&conn_info->iter_db);
> -        while (hdb_iterator_next(&conn_info->iter_db,
> -                (void*)&iter, &iter_handle) == 0) {
> -
> -		icmap_iter_finalize(*iter);
> -
> -		(void)hdb_handle_put (&conn_info->iter_db, iter_handle);
> -        }
> -
> -	hdb_destroy(&conn_info->iter_db);
> -
> -	hdb_iterator_reset(&conn_info->track_db);
> -        while (hdb_iterator_next(&conn_info->track_db,
> -                (void*)&track, &track_handle) == 0) {
> -
> -		free(icmap_track_get_user_data(*track));
> -
> -		icmap_track_delete(*track);
> -
> -		(void)hdb_handle_put (&conn_info->track_db, track_handle);
> -        }
> -	hdb_destroy(&conn_info->track_db);
> -
> -	api->ipc_refcnt_dec(conn);
> -
> -	return (0);
> -}
> -
> -static void message_handler_req_lib_cmap_set(void *conn, const void *message)
> -{
> -	const struct req_lib_cmap_set *req_lib_cmap_set = message;
> -	struct res_lib_cmap_set res_lib_cmap_set;
> -	cs_error_t ret;
> -
> -	if (icmap_is_key_ro((char *)req_lib_cmap_set->key_name.value)) {
> -		ret = CS_ERR_ACCESS;
> -	} else {
> -		ret = icmap_set((char *)req_lib_cmap_set->key_name.value, &req_lib_cmap_set->value,
> -				req_lib_cmap_set->value_len, req_lib_cmap_set->type);
> -	}
> -
> -	memset(&res_lib_cmap_set, 0, sizeof(res_lib_cmap_set));
> -	res_lib_cmap_set.header.size = sizeof(res_lib_cmap_set);
> -	res_lib_cmap_set.header.id = MESSAGE_RES_CMAP_SET;
> -	res_lib_cmap_set.header.error = ret;
> -
> -	api->ipc_response_send(conn, &res_lib_cmap_set, sizeof(res_lib_cmap_set));
> -}
> -
> -static void message_handler_req_lib_cmap_delete(void *conn, const void *message)
> -{
> -	const struct req_lib_cmap_set *req_lib_cmap_set = message;
> -	struct res_lib_cmap_delete res_lib_cmap_delete;
> -	cs_error_t ret;
> -
> -	if (icmap_is_key_ro((char *)req_lib_cmap_set->key_name.value)) {
> -		ret = CS_ERR_ACCESS;
> -	} else {
> -		ret = icmap_delete((char *)req_lib_cmap_set->key_name.value);
> -	}
> -
> -	memset(&res_lib_cmap_delete, 0, sizeof(res_lib_cmap_delete));
> -	res_lib_cmap_delete.header.size = sizeof(res_lib_cmap_delete);
> -	res_lib_cmap_delete.header.id = MESSAGE_RES_CMAP_DELETE;
> -	res_lib_cmap_delete.header.error = ret;
> -
> -	api->ipc_response_send(conn, &res_lib_cmap_delete, sizeof(res_lib_cmap_delete));
> -}
> -
> -static void message_handler_req_lib_cmap_get(void *conn, const void *message)
> -{
> -	const struct req_lib_cmap_get *req_lib_cmap_get = message;
> -	struct res_lib_cmap_get *res_lib_cmap_get;
> -	struct res_lib_cmap_get error_res_lib_cmap_get;
> -	cs_error_t ret;
> -	size_t value_len;
> -	size_t res_lib_cmap_get_size;
> -	icmap_value_types_t type;
> -	void *value;
> -
> -	value_len = req_lib_cmap_get->value_len;
> -
> -	res_lib_cmap_get_size = sizeof(*res_lib_cmap_get) + value_len;
> -	res_lib_cmap_get = malloc(res_lib_cmap_get_size);
> -	if (res_lib_cmap_get == NULL) {
> -		ret = CS_ERR_NO_MEMORY;
> -		goto error_exit;
> -	}
> -
> -	memset(res_lib_cmap_get, 0, res_lib_cmap_get_size);
> -
> -	if (value_len > 0) {
> -		value = res_lib_cmap_get->value;
> -	} else {
> -		value = NULL;
> -	}
> -
> -	ret = icmap_get((char *)req_lib_cmap_get->key_name.value,
> -			value,
> -			&value_len,
> -			&type);
> -
> -	if (ret != CS_OK) {
> -		free(res_lib_cmap_get);
> -		goto error_exit;
> -	}
> -
> -	res_lib_cmap_get->header.size = res_lib_cmap_get_size;
> -	res_lib_cmap_get->header.id = MESSAGE_RES_CMAP_GET;
> -	res_lib_cmap_get->header.error = ret;
> -	res_lib_cmap_get->type = type;
> -	res_lib_cmap_get->value_len = value_len;
> -
> -	api->ipc_response_send(conn, res_lib_cmap_get, res_lib_cmap_get_size);
> -	free(res_lib_cmap_get);
> -
> -	return ;
> -
> -error_exit:
> -	memset(&error_res_lib_cmap_get, 0, sizeof(error_res_lib_cmap_get));
> -	error_res_lib_cmap_get.header.size = sizeof(error_res_lib_cmap_get);
> -	error_res_lib_cmap_get.header.id = MESSAGE_RES_CMAP_GET;
> -	error_res_lib_cmap_get.header.error = ret;
> -
> -	api->ipc_response_send(conn, &error_res_lib_cmap_get, sizeof(error_res_lib_cmap_get));
> -}
> -
> -static void message_handler_req_lib_cmap_adjust_int(void *conn, const void *message)
> -{
> -	const struct req_lib_cmap_adjust_int *req_lib_cmap_adjust_int = message;
> -	struct res_lib_cmap_adjust_int res_lib_cmap_adjust_int;
> -	cs_error_t ret;
> -
> -	ret = icmap_adjust_int((char *)req_lib_cmap_adjust_int->key_name.value, req_lib_cmap_adjust_int->step);
> -
> -	memset(&res_lib_cmap_adjust_int, 0, sizeof(res_lib_cmap_adjust_int));
> -	res_lib_cmap_adjust_int.header.size = sizeof(res_lib_cmap_adjust_int);
> -	res_lib_cmap_adjust_int.header.id = MESSAGE_RES_CMAP_ADJUST_INT;
> -	res_lib_cmap_adjust_int.header.error = ret;
> -
> -	api->ipc_response_send(conn, &res_lib_cmap_adjust_int, sizeof(res_lib_cmap_adjust_int));
> -}
> -
> -static void message_handler_req_lib_cmap_iter_init(void *conn, const void *message)
> -{
> -	const struct req_lib_cmap_iter_init *req_lib_cmap_iter_init = message;
> -	struct res_lib_cmap_iter_init res_lib_cmap_iter_init;
> -	cs_error_t ret;
> -	icmap_iter_t iter;
> -	icmap_iter_t *hdb_iter;
> -	cmap_iter_handle_t handle;
> -	const char *prefix;
> -	struct cmap_conn_info *conn_info = (struct cmap_conn_info *)api->ipc_private_data_get (conn);
> -
> -	if (req_lib_cmap_iter_init->prefix.length > 0) {
> -		prefix = (char *)req_lib_cmap_iter_init->prefix.value;
> -	} else {
> -		prefix = NULL;
> -	}
> -
> -	iter = icmap_iter_init(prefix);
> -	if (iter == NULL) {
> -		ret = CS_ERR_NO_SECTIONS;
> -		goto reply_send;
> -	}
> -
> -	ret = hdb_error_to_cs(hdb_handle_create(&conn_info->iter_db, sizeof(iter), &handle));
> -	if (ret != CS_OK) {
> -		goto reply_send;
> -	}
> -
> -	ret = hdb_error_to_cs(hdb_handle_get(&conn_info->iter_db, handle, (void *)&hdb_iter));
> -	if (ret != CS_OK) {
> -		goto reply_send;
> -	}
> -
> -	*hdb_iter = iter;
> -
> -	(void)hdb_handle_put (&conn_info->iter_db, handle);
> -
> -reply_send:
> -	memset(&res_lib_cmap_iter_init, 0, sizeof(res_lib_cmap_iter_init));
> -	res_lib_cmap_iter_init.header.size = sizeof(res_lib_cmap_iter_init);
> -	res_lib_cmap_iter_init.header.id = MESSAGE_RES_CMAP_ITER_INIT;
> -	res_lib_cmap_iter_init.header.error = ret;
> -	res_lib_cmap_iter_init.iter_handle = handle;
> -
> -	api->ipc_response_send(conn, &res_lib_cmap_iter_init, sizeof(res_lib_cmap_iter_init));
> -}
> -
> -static void message_handler_req_lib_cmap_iter_next(void *conn, const void *message)
> -{
> -	const struct req_lib_cmap_iter_next *req_lib_cmap_iter_next = message;
> -	struct res_lib_cmap_iter_next res_lib_cmap_iter_next;
> -	cs_error_t ret;
> -	icmap_iter_t *iter;
> -	size_t value_len;
> -	icmap_value_types_t type;
> -	const char *res = NULL;
> -	struct cmap_conn_info *conn_info = (struct cmap_conn_info *)api->ipc_private_data_get (conn);
> -
> -	ret = hdb_error_to_cs(hdb_handle_get(&conn_info->iter_db,
> -				req_lib_cmap_iter_next->iter_handle, (void *)&iter));
> -	if (ret != CS_OK) {
> -		goto reply_send;
> -	}
> -
> -	res = icmap_iter_next(*iter, &value_len, &type);
> -	if (res == NULL) {
> -		ret = CS_ERR_NO_SECTIONS;
> -	}
> -
> -	(void)hdb_handle_put (&conn_info->iter_db, req_lib_cmap_iter_next->iter_handle);
> -
> -reply_send:
> -	memset(&res_lib_cmap_iter_next, 0, sizeof(res_lib_cmap_iter_next));
> -	res_lib_cmap_iter_next.header.size = sizeof(res_lib_cmap_iter_next);
> -	res_lib_cmap_iter_next.header.id = MESSAGE_RES_CMAP_ITER_NEXT;
> -	res_lib_cmap_iter_next.header.error = ret;
> -
> -	if (res != NULL) {
> -		res_lib_cmap_iter_next.value_len = value_len;
> -		res_lib_cmap_iter_next.type = type;
> -
> -		memcpy(res_lib_cmap_iter_next.key_name.value, res, strlen(res));
> -	        res_lib_cmap_iter_next.key_name.length = strlen(res);
> -	}
> -
> -	api->ipc_response_send(conn, &res_lib_cmap_iter_next, sizeof(res_lib_cmap_iter_next));
> -}
> -
> -static void message_handler_req_lib_cmap_iter_finalize(void *conn, const void *message)
> -{
> -	const struct req_lib_cmap_iter_finalize *req_lib_cmap_iter_finalize = message;
> -	struct res_lib_cmap_iter_finalize res_lib_cmap_iter_finalize;
> -	cs_error_t ret;
> -	icmap_iter_t *iter;
> -	struct cmap_conn_info *conn_info = (struct cmap_conn_info *)api->ipc_private_data_get (conn);
> -
> -	ret = hdb_error_to_cs(hdb_handle_get(&conn_info->iter_db,
> -				req_lib_cmap_iter_finalize->iter_handle, (void *)&iter));
> -	if (ret != CS_OK) {
> -		goto reply_send;
> -	}
> -
> -	icmap_iter_finalize(*iter);
> -
> -	(void)hdb_handle_destroy(&conn_info->iter_db, req_lib_cmap_iter_finalize->iter_handle);
> -
> -	(void)hdb_handle_put (&conn_info->iter_db, req_lib_cmap_iter_finalize->iter_handle);
> -
> -reply_send:
> -	memset(&res_lib_cmap_iter_finalize, 0, sizeof(res_lib_cmap_iter_finalize));
> -	res_lib_cmap_iter_finalize.header.size = sizeof(res_lib_cmap_iter_finalize);
> -	res_lib_cmap_iter_finalize.header.id = MESSAGE_RES_CMAP_ITER_FINALIZE;
> -	res_lib_cmap_iter_finalize.header.error = ret;
> -
> -	api->ipc_response_send(conn, &res_lib_cmap_iter_finalize, sizeof(res_lib_cmap_iter_finalize));
> -}
> -
> -static void cmap_notify_fn(int32_t event,
> -		const char *key_name,
> -		struct icmap_notify_value new_val,
> -		struct icmap_notify_value old_val,
> -		void *user_data)
> -{
> -	struct cmap_track_user_data *cmap_track_user_data = (struct cmap_track_user_data *)user_data;
> -	struct res_lib_cmap_notify_callback res_lib_cmap_notify_callback;
> -	struct iovec iov[3];
> -
> -	memset(&res_lib_cmap_notify_callback, 0, sizeof(res_lib_cmap_notify_callback));
> -
> -	res_lib_cmap_notify_callback.header.size = sizeof(res_lib_cmap_notify_callback) + new_val.len + old_val.len;
> -	res_lib_cmap_notify_callback.header.id = MESSAGE_RES_CMAP_NOTIFY_CALLBACK;
> -	res_lib_cmap_notify_callback.header.error = CS_OK;
> -
> -	res_lib_cmap_notify_callback.new_value_type = new_val.type;
> -	res_lib_cmap_notify_callback.old_value_type = old_val.type;
> -	res_lib_cmap_notify_callback.new_value_len = new_val.len;
> -	res_lib_cmap_notify_callback.old_value_len = old_val.len;
> -	res_lib_cmap_notify_callback.event = event;
> -	res_lib_cmap_notify_callback.key_name.length = strlen(key_name);
> -	res_lib_cmap_notify_callback.track_inst_handle = cmap_track_user_data->track_inst_handle;
> -
> -	memcpy(res_lib_cmap_notify_callback.key_name.value, key_name, strlen(key_name));
> -
> -	iov[0].iov_base = (char *)&res_lib_cmap_notify_callback;
> -	iov[0].iov_len = sizeof(res_lib_cmap_notify_callback);
> -	iov[1].iov_base = (char *)new_val.data;
> -	iov[1].iov_len = new_val.len;
> -	iov[2].iov_base = (char *)old_val.data;
> -	iov[2].iov_len = old_val.len;
> -
> -	api->ipc_dispatch_iov_send(cmap_track_user_data->conn, iov, 3);
> -}
> -
> -static void message_handler_req_lib_cmap_track_add(void *conn, const void *message)
> -{
> -	const struct req_lib_cmap_track_add *req_lib_cmap_track_add = message;
> -	struct res_lib_cmap_track_add res_lib_cmap_track_add;
> -	cs_error_t ret;
> -	cmap_track_handle_t handle;
> -	icmap_track_t track;
> -	icmap_track_t *hdb_track;
> -	struct cmap_track_user_data *cmap_track_user_data;
> -	const char *key_name;
> -
> -	struct cmap_conn_info *conn_info = (struct cmap_conn_info *)api->ipc_private_data_get (conn);
> -
> -	cmap_track_user_data = malloc(sizeof(*cmap_track_user_data));
> -	if (cmap_track_user_data == NULL) {
> -		ret = CS_ERR_NO_MEMORY;
> -
> -		goto reply_send;
> -	}
> -	memset(cmap_track_user_data, 0, sizeof(*cmap_track_user_data));
> -
> -	if (req_lib_cmap_track_add->key_name.length > 0) {
> -		key_name = (char *)req_lib_cmap_track_add->key_name.value;
> -	} else {
> -		key_name = NULL;
> -	}
> -
> -	ret = icmap_track_add(key_name,
> -			req_lib_cmap_track_add->track_type,
> -			cmap_notify_fn,
> -			cmap_track_user_data,
> -			&track);
> -	if (ret != CS_OK) {
> -		free(cmap_track_user_data);
> -
> -		goto reply_send;
> -	}
> -
> -	ret = hdb_error_to_cs(hdb_handle_create(&conn_info->track_db, sizeof(track), &handle));
> -	if (ret != CS_OK) {
> -		free(cmap_track_user_data);
> -
> -		goto reply_send;
> -	}
> -
> -	ret = hdb_error_to_cs(hdb_handle_get(&conn_info->track_db, handle, (void *)&hdb_track));
> -	if (ret != CS_OK) {
> -		free(cmap_track_user_data);
> -
> -		goto reply_send;
> -	}
> -
> -	*hdb_track = track;
> -	cmap_track_user_data->conn = conn;
> -	cmap_track_user_data->track_handle = handle;
> -	cmap_track_user_data->track_inst_handle = req_lib_cmap_track_add->track_inst_handle;
> -
> -	(void)hdb_handle_put (&conn_info->track_db, handle);
> -
> -reply_send:
> -	memset(&res_lib_cmap_track_add, 0, sizeof(res_lib_cmap_track_add));
> -	res_lib_cmap_track_add.header.size = sizeof(res_lib_cmap_track_add);
> -	res_lib_cmap_track_add.header.id = MESSAGE_RES_CMAP_TRACK_ADD;
> -	res_lib_cmap_track_add.header.error = ret;
> -	res_lib_cmap_track_add.track_handle = handle;
> -
> -	api->ipc_response_send(conn, &res_lib_cmap_track_add, sizeof(res_lib_cmap_track_add));
> -}
> -
> -static void message_handler_req_lib_cmap_track_delete(void *conn, const void *message)
> -{
> -	const struct req_lib_cmap_track_delete *req_lib_cmap_track_delete = message;
> -	struct res_lib_cmap_track_delete res_lib_cmap_track_delete;
> -	cs_error_t ret;
> -	icmap_track_t *track;
> -	struct cmap_conn_info *conn_info = (struct cmap_conn_info *)api->ipc_private_data_get (conn);
> -	uint64_t track_inst_handle = 0;
> -
> -	ret = hdb_error_to_cs(hdb_handle_get(&conn_info->track_db,
> -				req_lib_cmap_track_delete->track_handle, (void *)&track));
> -	if (ret != CS_OK) {
> -		goto reply_send;
> -	}
> -
> -	track_inst_handle = ((struct cmap_track_user_data *)icmap_track_get_user_data(*track))->track_inst_handle;
> -
> -	free(icmap_track_get_user_data(*track));
> -
> -	ret = icmap_track_delete(*track);
> -
> -	(void)hdb_handle_put (&conn_info->track_db, req_lib_cmap_track_delete->track_handle);
> -	(void)hdb_handle_destroy(&conn_info->track_db, req_lib_cmap_track_delete->track_handle);
> -
> -reply_send:
> -	memset(&res_lib_cmap_track_delete, 0, sizeof(res_lib_cmap_track_delete));
> -	res_lib_cmap_track_delete.header.size = sizeof(res_lib_cmap_track_delete);
> -	res_lib_cmap_track_delete.header.id = MESSAGE_RES_CMAP_TRACK_DELETE;
> -	res_lib_cmap_track_delete.header.error = ret;
> -	res_lib_cmap_track_delete.track_inst_handle = track_inst_handle;
> -
> -	api->ipc_response_send(conn, &res_lib_cmap_track_delete, sizeof(res_lib_cmap_track_delete));
> -}
> diff --git a/services/cpg.c b/services/cpg.c
> deleted file mode 100644
> index c7c4ead..0000000
> --- a/services/cpg.c
> +++ /dev/null
> @@ -1,2106 +0,0 @@
> -/*
> - * Copyright (c) 2006-2009 Red Hat, Inc.
> - *
> - * All rights reserved.
> - *
> - * Author: Christine Caulfield (ccaulfie@xxxxxxxxxx)
> - * Author: Jan Friesse (jfriesse@xxxxxxxxxx)
> - *
> - * This software licensed under BSD license, the text of which follows:
> - *
> - * Redistribution and use in source and binary forms, with or without
> - * modification, are permitted provided that the following conditions are met:
> - *
> - * - Redistributions of source code must retain the above copyright notice,
> - *   this list of conditions and the following disclaimer.
> - * - Redistributions in binary form must reproduce the above copyright notice,
> - *   this list of conditions and the following disclaimer in the documentation
> - *   and/or other materials provided with the distribution.
> - * - Neither the name of the MontaVista Software, Inc. nor the names of its
> - *   contributors may be used to endorse or promote products derived from this
> - *   software without specific prior written permission.
> - *
> - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTIBUTORS "AS IS"
> - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
> - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
> - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
> - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
> - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
> - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
> - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
> - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
> - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
> - * THE POSSIBILITY OF SUCH DAMAGE.
> - */
> -
> -#include <config.h>
> -
> -#ifdef HAVE_ALLOCA_H
> -#include <alloca.h>
> -#endif
> -#include <sys/types.h>
> -#include <sys/socket.h>
> -#include <sys/un.h>
> -#include <sys/ioctl.h>
> -#include <netinet/in.h>
> -#include <sys/uio.h>
> -#include <unistd.h>
> -#include <fcntl.h>
> -#include <stdlib.h>
> -#include <stdio.h>
> -#include <errno.h>
> -#include <time.h>
> -#include <assert.h>
> -#include <unistd.h>
> -#include <netinet/in.h>
> -#include <arpa/inet.h>
> -#include <sys/mman.h>
> -#include <qb/qbmap.h>
> -
> -#include <corosync/corotypes.h>
> -#include <qb/qbipc_common.h>
> -#include <corosync/corodefs.h>
> -#include <corosync/list.h>
> -#include <corosync/jhash.h>
> -#include <corosync/lcr/lcr_comp.h>
> -#include <corosync/logsys.h>
> -#include <corosync/coroapi.h>
> -
> -#include <corosync/cpg.h>
> -#include <corosync/ipc_cpg.h>
> -
> -LOGSYS_DECLARE_SUBSYS ("CPG");
> -
> -#define GROUP_HASH_SIZE 32
> -
> -enum cpg_message_req_types {
> -	MESSAGE_REQ_EXEC_CPG_PROCJOIN = 0,
> -	MESSAGE_REQ_EXEC_CPG_PROCLEAVE = 1,
> -	MESSAGE_REQ_EXEC_CPG_JOINLIST = 2,
> -	MESSAGE_REQ_EXEC_CPG_MCAST = 3,
> -	MESSAGE_REQ_EXEC_CPG_DOWNLIST_OLD = 4,
> -	MESSAGE_REQ_EXEC_CPG_DOWNLIST = 5
> -};
> -
> -struct zcb_mapped {
> -	struct list_head list;
> -	void *addr;
> -	size_t size;
> -};
> -/*
> - * state`		exec deliver
> - * match group name, pid -> if matched deliver for YES:
> - * XXX indicates impossible state
> - *
> - *			join			leave			mcast
> - * UNJOINED		XXX			XXX			NO
> - * LEAVE_STARTED	XXX			YES(unjoined_enter)	YES
> - * JOIN_STARTED		YES(join_started_enter)	XXX			NO
> - * JOIN_COMPLETED	XXX			NO			YES
> - *
> - * join_started_enter
> - * 	set JOIN_COMPLETED
> - *	add entry to process_info list
> - * unjoined_enter
> - *	set UNJOINED
> - *	delete entry from process_info list
> - *
> - *
> - *			library accept join error codes
> - * UNJOINED		YES(CS_OK) 			set JOIN_STARTED
> - * LEAVE_STARTED	NO(CS_ERR_BUSY)
> - * JOIN_STARTED		NO(CS_ERR_EXIST)
> - * JOIN_COMPlETED	NO(CS_ERR_EXIST)
> - *
> - *			library accept leave error codes
> - * UNJOINED		NO(CS_ERR_NOT_EXIST)
> - * LEAVE_STARTED	NO(CS_ERR_NOT_EXIST)
> - * JOIN_STARTED		NO(CS_ERR_BUSY)
> - * JOIN_COMPLETED	YES(CS_OK)			set LEAVE_STARTED
> - *
> - *			library accept mcast
> - * UNJOINED		NO(CS_ERR_NOT_EXIST)
> - * LEAVE_STARTED	NO(CS_ERR_NOT_EXIST)
> - * JOIN_STARTED		YES(CS_OK)
> - * JOIN_COMPLETED	YES(CS_OK)
> - */
> -enum cpd_state {
> -	CPD_STATE_UNJOINED,
> -	CPD_STATE_LEAVE_STARTED,
> -	CPD_STATE_JOIN_STARTED,
> -	CPD_STATE_JOIN_COMPLETED
> -};
> -
> -enum cpg_sync_state {
> -	CPGSYNC_DOWNLIST,
> -	CPGSYNC_JOINLIST
> -};
> -
> -enum cpg_downlist_state_e {
> -       CPG_DOWNLIST_NONE,
> -       CPG_DOWNLIST_WAITING_FOR_MESSAGES,
> -       CPG_DOWNLIST_APPLYING,
> -};
> -static enum cpg_downlist_state_e downlist_state;
> -static struct list_head downlist_messages_head;
> -
> -struct cpg_pd {
> -	void *conn;
> - 	mar_cpg_name_t group_name;
> -	uint32_t pid;
> -	enum cpd_state cpd_state;
> -	unsigned int flags;
> -	int initial_totem_conf_sent;
> -	struct list_head list;
> -	struct list_head iteration_instance_list_head;
> -	struct list_head zcb_mapped_list_head;
> -};
> -
> -struct cpg_iteration_instance {
> -	hdb_handle_t handle;
> -	struct list_head list;
> -	struct list_head items_list_head; /* List of process_info */
> -	struct list_head *current_pointer;
> -};
> -
> -DECLARE_HDB_DATABASE(cpg_iteration_handle_t_db,NULL);
> -
> -DECLARE_LIST_INIT(cpg_pd_list_head);
> -
> -static unsigned int my_member_list[PROCESSOR_COUNT_MAX];
> -
> -static unsigned int my_member_list_entries;
> -
> -static unsigned int my_old_member_list[PROCESSOR_COUNT_MAX];
> -
> -static unsigned int my_old_member_list_entries = 0;
> -
> -static struct corosync_api_v1 *api = NULL;
> -
> -static enum cpg_sync_state my_sync_state = CPGSYNC_DOWNLIST;
> -
> -static mar_cpg_ring_id_t last_sync_ring_id;
> -
> -struct process_info {
> -	unsigned int nodeid;
> -	uint32_t pid;
> -	mar_cpg_name_t group;
> -	struct list_head list; /* on the group_info members list */
> -};
> -DECLARE_LIST_INIT(process_info_list_head);
> -
> -struct join_list_entry {
> -	uint32_t pid;
> -	mar_cpg_name_t group_name;
> -};
> -
> -/*
> - * Service Interfaces required by service_message_handler struct
> - */
> -static int cpg_exec_init_fn (struct corosync_api_v1 *);
> -
> -static int cpg_lib_init_fn (void *conn);
> -
> -static int cpg_lib_exit_fn (void *conn);
> -
> -static void message_handler_req_exec_cpg_procjoin (
> -	const void *message,
> -	unsigned int nodeid);
> -
> -static void message_handler_req_exec_cpg_procleave (
> -	const void *message,
> -	unsigned int nodeid);
> -
> -static void message_handler_req_exec_cpg_joinlist (
> -	const void *message,
> -	unsigned int nodeid);
> -
> -static void message_handler_req_exec_cpg_mcast (
> -	const void *message,
> -	unsigned int nodeid);
> -
> -static void message_handler_req_exec_cpg_downlist_old (
> -	const void *message,
> -	unsigned int nodeid);
> -
> -static void message_handler_req_exec_cpg_downlist (
> -	const void *message,
> -	unsigned int nodeid);
> -
> -static void exec_cpg_procjoin_endian_convert (void *msg);
> -
> -static void exec_cpg_joinlist_endian_convert (void *msg);
> -
> -static void exec_cpg_mcast_endian_convert (void *msg);
> -
> -static void exec_cpg_downlist_endian_convert_old (void *msg);
> -
> -static void exec_cpg_downlist_endian_convert (void *msg);
> -
> -static void message_handler_req_lib_cpg_join (void *conn, const void *message);
> -
> -static void message_handler_req_lib_cpg_leave (void *conn, const void *message);
> -
> -static void message_handler_req_lib_cpg_finalize (void *conn, const void *message);
> -
> -static void message_handler_req_lib_cpg_mcast (void *conn, const void *message);
> -
> -static void message_handler_req_lib_cpg_membership (void *conn,
> -						    const void *message);
> -
> -static void message_handler_req_lib_cpg_local_get (void *conn,
> -						   const void *message);
> -
> -static void message_handler_req_lib_cpg_iteration_initialize (
> -	void *conn,
> -	const void *message);
> -
> -static void message_handler_req_lib_cpg_iteration_next (
> -	void *conn,
> -	const void *message);
> -
> -static void message_handler_req_lib_cpg_iteration_finalize (
> -	void *conn,
> -	const void *message);
> -
> -static void message_handler_req_lib_cpg_zc_alloc (
> -	void *conn,
> -	const void *message);
> -
> -static void message_handler_req_lib_cpg_zc_free (
> -	void *conn,
> -	const void *message);
> -
> -static void message_handler_req_lib_cpg_zc_execute (
> -	void *conn,
> -	const void *message);
> -
> -static int cpg_node_joinleave_send (unsigned int pid, const mar_cpg_name_t *group_name, int fn, int reason);
> -
> -static int cpg_exec_send_downlist(void);
> -
> -static int cpg_exec_send_joinlist(void);
> -
> -static void downlist_messages_delete (void);
> -
> -static void downlist_master_choose_and_send (void);
> -
> -static void cpg_sync_init_v2 (
> -	const unsigned int *trans_list,
> -	size_t trans_list_entries,
> -	const unsigned int *member_list,
> -	size_t member_list_entries,
> -	const struct memb_ring_id *ring_id);
> -
> -static int  cpg_sync_process (void);
> -
> -static void cpg_sync_activate (void);
> -
> -static void cpg_sync_abort (void);
> -
> -static int notify_lib_totem_membership (
> -	void *conn,
> -	int member_list_entries,
> -	const unsigned int *member_list);
> -
> -static inline int zcb_all_free (
> -	struct cpg_pd *cpd);
> -
> -/*
> - * Library Handler Definition
> - */
> -static struct corosync_lib_handler cpg_lib_engine[] =
> -{
> -	{ /* 0 - MESSAGE_REQ_CPG_JOIN */
> -		.lib_handler_fn				= message_handler_req_lib_cpg_join,
> -		.flow_control				= CS_LIB_FLOW_CONTROL_REQUIRED
> -	},
> -	{ /* 1 - MESSAGE_REQ_CPG_LEAVE */
> -		.lib_handler_fn				= message_handler_req_lib_cpg_leave,
> -		.flow_control				= CS_LIB_FLOW_CONTROL_REQUIRED
> -	},
> -	{ /* 2 - MESSAGE_REQ_CPG_MCAST */
> -		.lib_handler_fn				= message_handler_req_lib_cpg_mcast,
> -		.flow_control				= CS_LIB_FLOW_CONTROL_REQUIRED
> -	},
> -	{ /* 3 - MESSAGE_REQ_CPG_MEMBERSHIP */
> -		.lib_handler_fn				= message_handler_req_lib_cpg_membership,
> -		.flow_control				= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> -	},
> -	{ /* 4 - MESSAGE_REQ_CPG_LOCAL_GET */
> -		.lib_handler_fn				= message_handler_req_lib_cpg_local_get,
> -		.flow_control				= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> -	},
> -	{ /* 5 - MESSAGE_REQ_CPG_ITERATIONINITIALIZE */
> -		.lib_handler_fn				= message_handler_req_lib_cpg_iteration_initialize,
> -		.flow_control				= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> -	},
> -	{ /* 6 - MESSAGE_REQ_CPG_ITERATIONNEXT */
> -		.lib_handler_fn				= message_handler_req_lib_cpg_iteration_next,
> -		.flow_control				= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> -	},
> -	{ /* 7 - MESSAGE_REQ_CPG_ITERATIONFINALIZE */
> -		.lib_handler_fn				= message_handler_req_lib_cpg_iteration_finalize,
> -		.flow_control				= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> -	},
> -	{ /* 8 - MESSAGE_REQ_CPG_FINALIZE */
> -		.lib_handler_fn				= message_handler_req_lib_cpg_finalize,
> -		.flow_control				= CS_LIB_FLOW_CONTROL_REQUIRED
> -	},
> -	{ /* 9 */
> -		.lib_handler_fn				= message_handler_req_lib_cpg_zc_alloc,
> -		.flow_control				= CS_LIB_FLOW_CONTROL_REQUIRED
> -	},
> -	{ /* 10 */
> -		.lib_handler_fn				= message_handler_req_lib_cpg_zc_free,
> -		.flow_control				= CS_LIB_FLOW_CONTROL_REQUIRED
> -	},
> -	{ /* 11 */
> -		.lib_handler_fn				= message_handler_req_lib_cpg_zc_execute,
> -		.flow_control				= CS_LIB_FLOW_CONTROL_REQUIRED
> -	},
> -
> -
> -};
> -
> -static struct corosync_exec_handler cpg_exec_engine[] =
> -{
> -	{ /* 0 - MESSAGE_REQ_EXEC_CPG_PROCJOIN */
> -		.exec_handler_fn	= message_handler_req_exec_cpg_procjoin,
> -		.exec_endian_convert_fn	= exec_cpg_procjoin_endian_convert
> -	},
> -	{ /* 1 - MESSAGE_REQ_EXEC_CPG_PROCLEAVE */
> -		.exec_handler_fn	= message_handler_req_exec_cpg_procleave,
> -		.exec_endian_convert_fn	= exec_cpg_procjoin_endian_convert
> -	},
> -	{ /* 2 - MESSAGE_REQ_EXEC_CPG_JOINLIST */
> -		.exec_handler_fn	= message_handler_req_exec_cpg_joinlist,
> -		.exec_endian_convert_fn	= exec_cpg_joinlist_endian_convert
> -	},
> -	{ /* 3 - MESSAGE_REQ_EXEC_CPG_MCAST */
> -		.exec_handler_fn	= message_handler_req_exec_cpg_mcast,
> -		.exec_endian_convert_fn	= exec_cpg_mcast_endian_convert
> -	},
> -	{ /* 4 - MESSAGE_REQ_EXEC_CPG_DOWNLIST_OLD */
> -		.exec_handler_fn	= message_handler_req_exec_cpg_downlist_old,
> -		.exec_endian_convert_fn	= exec_cpg_downlist_endian_convert_old
> -	},
> -	{ /* 5 - MESSAGE_REQ_EXEC_CPG_DOWNLIST */
> -		.exec_handler_fn	= message_handler_req_exec_cpg_downlist,
> -		.exec_endian_convert_fn	= exec_cpg_downlist_endian_convert
> -	},
> -};
> -
> -struct corosync_service_engine cpg_service_engine = {
> -	.name				        = "corosync cluster closed process group service v1.01",
> -	.id					= CPG_SERVICE,
> -	.priority				= 1,
> -	.private_data_size			= sizeof (struct cpg_pd),
> -	.flow_control				= CS_LIB_FLOW_CONTROL_REQUIRED,
> -	.allow_inquorate			= CS_LIB_ALLOW_INQUORATE,
> -	.lib_init_fn				= cpg_lib_init_fn,
> -	.lib_exit_fn				= cpg_lib_exit_fn,
> -	.lib_engine				= cpg_lib_engine,
> -	.lib_engine_count			= sizeof (cpg_lib_engine) / sizeof (struct corosync_lib_handler),
> -	.exec_init_fn				= cpg_exec_init_fn,
> -	.exec_dump_fn				= NULL,
> -	.exec_engine				= cpg_exec_engine,
> -	.exec_engine_count		        = sizeof (cpg_exec_engine) / sizeof (struct corosync_exec_handler),
> -	.sync_mode				= CS_SYNC_V1_APIV2,
> -	.sync_init                              = (sync_init_v1_fn_t)cpg_sync_init_v2,
> -	.sync_process                           = cpg_sync_process,
> -	.sync_activate                          = cpg_sync_activate,
> -	.sync_abort                             = cpg_sync_abort
> -};
> -
> -/*
> - * Dynamic loader definition
> - */
> -static struct corosync_service_engine *cpg_get_service_engine_ver0 (void);
> -
> -static struct corosync_service_engine_iface_ver0 cpg_service_engine_iface = {
> -	.corosync_get_service_engine_ver0		= cpg_get_service_engine_ver0
> -};
> -
> -static struct lcr_iface corosync_cpg_ver0[1] = {
> -	{
> -		.name				= "corosync_cpg",
> -		.version			= 0,
> -		.versions_replace		= 0,
> -		.versions_replace_count         = 0,
> -		.dependencies			= 0,
> -		.dependency_count		= 0,
> -		.constructor			= NULL,
> -		.destructor			= NULL,
> -		.interfaces			= NULL
> -	}
> -};
> -
> -static struct lcr_comp cpg_comp_ver0 = {
> -	.iface_count			= 1,
> -	.ifaces			        = corosync_cpg_ver0
> -};
> -
> -
> -static struct corosync_service_engine *cpg_get_service_engine_ver0 (void)
> -{
> -	return (&cpg_service_engine);
> -}
> -
> -#ifdef COROSYNC_SOLARIS
> -void corosync_lcr_component_register (void);
> -
> -void corosync_lcr_component_register (void) {
> -#else
> -__attribute__ ((constructor)) static void corosync_lcr_component_register (void) {
> -#endif
> -        lcr_interfaces_set (&corosync_cpg_ver0[0], &cpg_service_engine_iface);
> -
> -	lcr_component_register (&cpg_comp_ver0);
> -}
> -
> -struct req_exec_cpg_procjoin {
> -	struct qb_ipc_request_header header __attribute__((aligned(8)));
> -	mar_cpg_name_t group_name __attribute__((aligned(8)));
> -	mar_uint32_t pid __attribute__((aligned(8)));
> -	mar_uint32_t reason __attribute__((aligned(8)));
> -};
> -
> -struct req_exec_cpg_mcast {
> -	struct qb_ipc_request_header header __attribute__((aligned(8)));
> -	mar_cpg_name_t group_name __attribute__((aligned(8)));
> -	mar_uint32_t msglen __attribute__((aligned(8)));
> -	mar_uint32_t pid __attribute__((aligned(8)));
> -	mar_message_source_t source __attribute__((aligned(8)));
> -	mar_uint8_t message[] __attribute__((aligned(8)));
> -};
> -
> -struct req_exec_cpg_downlist_old {
> -	struct qb_ipc_request_header header __attribute__((aligned(8)));
> -	mar_uint32_t left_nodes __attribute__((aligned(8)));
> -	mar_uint32_t nodeids[PROCESSOR_COUNT_MAX]  __attribute__((aligned(8)));
> -};
> -
> -struct req_exec_cpg_downlist {
> -	struct qb_ipc_request_header header __attribute__((aligned(8)));
> -	/* merge decisions */
> -	mar_uint32_t old_members __attribute__((aligned(8)));
> -	/* downlist below */
> -	mar_uint32_t left_nodes __attribute__((aligned(8)));
> -	mar_uint32_t nodeids[PROCESSOR_COUNT_MAX]  __attribute__((aligned(8)));
> -};
> -
> -struct downlist_msg {
> -	mar_uint32_t sender_nodeid;
> -	mar_uint32_t old_members __attribute__((aligned(8)));
> -	mar_uint32_t left_nodes __attribute__((aligned(8)));
> -	mar_uint32_t nodeids[PROCESSOR_COUNT_MAX]  __attribute__((aligned(8)));
> -	struct list_head list;
> -};
> -
> -static struct req_exec_cpg_downlist g_req_exec_cpg_downlist;
> -
> -static void cpg_sync_init_v2 (
> -	const unsigned int *trans_list,
> -	size_t trans_list_entries,
> -	const unsigned int *member_list,
> -	size_t member_list_entries,
> -	const struct memb_ring_id *ring_id)
> -{
> -	int entries;
> -	int i, j;
> -	int found;
> -
> -	my_sync_state = CPGSYNC_DOWNLIST;
> -
> -	memcpy (my_member_list, member_list, member_list_entries *
> -		sizeof (unsigned int));
> -	my_member_list_entries = member_list_entries;
> -
> -	last_sync_ring_id.nodeid = ring_id->rep.nodeid;
> -	last_sync_ring_id.seq = ring_id->seq;
> -
> -	downlist_state = CPG_DOWNLIST_WAITING_FOR_MESSAGES;
> -
> -	entries = 0;
> -	/*
> -	 * Determine list of nodeids for downlist message
> -	 */
> -	for (i = 0; i < my_old_member_list_entries; i++) {
> -		found = 0;
> -		for (j = 0; j < trans_list_entries; j++) {
> -			if (my_old_member_list[i] == trans_list[j]) {
> -				found = 1;
> -				break;
> -			}
> -		}
> -		if (found == 0) {
> -			g_req_exec_cpg_downlist.nodeids[entries++] =
> -				my_old_member_list[i];
> -		}
> -	}
> -	g_req_exec_cpg_downlist.left_nodes = entries;
> -}
> -
> -static int cpg_sync_process (void)
> -{
> -	int res = -1;
> -
> -	if (my_sync_state == CPGSYNC_DOWNLIST) {
> -		res = cpg_exec_send_downlist();
> -		if (res == -1) {
> -			return (-1);
> -		}
> -		my_sync_state = CPGSYNC_JOINLIST;
> -	}
> -	if (my_sync_state == CPGSYNC_JOINLIST) {
> -		res = cpg_exec_send_joinlist();
> -	}
> -	return (res);
> -}
> -
> -static void cpg_sync_activate (void)
> -{
> -	memcpy (my_old_member_list, my_member_list,
> -		my_member_list_entries * sizeof (unsigned int));
> -	my_old_member_list_entries = my_member_list_entries;
> -
> -	if (downlist_state == CPG_DOWNLIST_WAITING_FOR_MESSAGES) {
> -		downlist_master_choose_and_send ();
> -	}
> -
> -	downlist_messages_delete ();
> -	downlist_state = CPG_DOWNLIST_NONE;
> -
> -	notify_lib_totem_membership (NULL, my_member_list_entries, my_member_list);
> -}
> -
> -static void cpg_sync_abort (void)
> -{
> -	downlist_state = CPG_DOWNLIST_NONE;
> -	downlist_messages_delete ();
> -}
> -
> -static int notify_lib_totem_membership (
> -	void *conn,
> -	int member_list_entries,
> -	const unsigned int *member_list)
> -{
> -	struct list_head *iter;
> -	char *buf;
> -	int size;
> -	struct res_lib_cpg_totem_confchg_callback *res;
> -
> -	size = sizeof(struct res_lib_cpg_totem_confchg_callback) +
> -		sizeof(mar_uint32_t) * (member_list_entries);
> -	buf = alloca(size);
> -	if (!buf)
> -		return CS_ERR_LIBRARY;
> -
> -	res = (struct res_lib_cpg_totem_confchg_callback *)buf;
> -	res->member_list_entries = member_list_entries;
> -	res->header.size = size;
> -	res->header.id = MESSAGE_RES_CPG_TOTEM_CONFCHG_CALLBACK;
> -	res->header.error = CS_OK;
> -
> -	memcpy (&res->ring_id, &last_sync_ring_id, sizeof (mar_cpg_ring_id_t));
> -	memcpy (res->member_list, member_list, res->member_list_entries * sizeof (mar_uint32_t));
> -
> -	if (conn == NULL) {
> -		for (iter = cpg_pd_list_head.next; iter != &cpg_pd_list_head; iter = iter->next) {
> -			struct cpg_pd *cpg_pd = list_entry (iter, struct cpg_pd, list);
> -			api->ipc_dispatch_send (cpg_pd->conn, buf, size);
> -		}
> -	} else {
> -		api->ipc_dispatch_send (conn, buf, size);
> -	}
> -
> -	return CS_OK;
> -}
> -
> -static int notify_lib_joinlist(
> -	const mar_cpg_name_t *group_name,
> -	void *conn,
> -	int joined_list_entries,
> -	mar_cpg_address_t *joined_list,
> -	int left_list_entries,
> -	mar_cpg_address_t *left_list,
> -	int id)
> -{
> -	int size;
> -	char *buf;
> -	struct list_head *iter;
> -	int count;
> -	struct res_lib_cpg_confchg_callback *res;
> -	mar_cpg_address_t *retgi;
> -
> -	count = 0;
> -
> -	for (iter = process_info_list_head.next; iter != &process_info_list_head; iter = iter->next) {
> -		struct process_info *pi = list_entry (iter, struct process_info, list);
> -		if (mar_name_compare (&pi->group, group_name) == 0) {
> -			int i;
> -			int founded = 0;
> -
> -			for (i = 0; i < left_list_entries; i++) {
> -				if (left_list[i].nodeid == pi->nodeid && left_list[i].pid == pi->pid) {
> -					founded++;
> -				}
> -			}
> -
> -			if (!founded)
> -				count++;
> -		}
> -	}
> -
> -	size = sizeof(struct res_lib_cpg_confchg_callback) +
> -		sizeof(mar_cpg_address_t) * (count + left_list_entries + joined_list_entries);
> -	buf = alloca(size);
> -	if (!buf)
> -		return CS_ERR_LIBRARY;
> -
> -	res = (struct res_lib_cpg_confchg_callback *)buf;
> -	res->joined_list_entries = joined_list_entries;
> -	res->left_list_entries = left_list_entries;
> -	res->member_list_entries = count;
> -	retgi = res->member_list;
> -	res->header.size = size;
> -	res->header.id = id;
> -	res->header.error = CS_OK;
> -	memcpy(&res->group_name, group_name, sizeof(mar_cpg_name_t));
> -
> -	for (iter = process_info_list_head.next; iter != &process_info_list_head; iter = iter->next) {
> -		struct process_info *pi=list_entry (iter, struct process_info, list);
> -
> -		if (mar_name_compare (&pi->group, group_name) == 0) {
> -			int i;
> -			int founded = 0;
> -
> -			for (i = 0;i < left_list_entries; i++) {
> -				if (left_list[i].nodeid == pi->nodeid && left_list[i].pid == pi->pid) {
> -					founded++;
> -				}
> -			}
> -
> -			if (!founded) {
> -				retgi->nodeid = pi->nodeid;
> -				retgi->pid = pi->pid;
> -				retgi++;
> -			}
> -		}
> -	}
> -
> -	if (left_list_entries) {
> -		memcpy (retgi, left_list, left_list_entries * sizeof(mar_cpg_address_t));
> -		retgi += left_list_entries;
> -	}
> -
> -	if (joined_list_entries) {
> -		memcpy (retgi, joined_list, joined_list_entries * sizeof(mar_cpg_address_t));
> -		retgi += joined_list_entries;
> -	}
> -
> -	if (conn) {
> -		api->ipc_dispatch_send (conn, buf, size);
> -	} else {
> -		for (iter = cpg_pd_list_head.next; iter != &cpg_pd_list_head; iter = iter->next) {
> -			struct cpg_pd *cpd = list_entry (iter, struct cpg_pd, list);
> -			if (mar_name_compare (&cpd->group_name, group_name) == 0) {
> -				assert (joined_list_entries <= 1);
> -				if (joined_list_entries) {
> -					if (joined_list[0].pid == cpd->pid &&
> -						joined_list[0].nodeid == api->totem_nodeid_get()) {
> -						cpd->cpd_state = CPD_STATE_JOIN_COMPLETED;
> -					}
> -				}
> -				if (cpd->cpd_state == CPD_STATE_JOIN_COMPLETED ||
> -					cpd->cpd_state == CPD_STATE_LEAVE_STARTED) {
> -
> -					api->ipc_dispatch_send (cpd->conn, buf, size);
> -				}
> -				if (left_list_entries) {
> -					if (left_list[0].pid == cpd->pid &&
> -						left_list[0].nodeid == api->totem_nodeid_get() &&
> -						left_list[0].reason == CONFCHG_CPG_REASON_LEAVE) {
> -
> -						cpd->pid = 0;
> -						memset (&cpd->group_name, 0, sizeof(cpd->group_name));
> -						cpd->cpd_state = CPD_STATE_UNJOINED;
> -					}
> -				}
> -			}
> -		}
> -	}
> -
> -
> -	/*
> -	 * Traverse thru cpds and send totem membership for cpd, where it is not send yet
> -	 */
> -	for (iter = cpg_pd_list_head.next; iter != &cpg_pd_list_head; iter = iter->next) {
> -		struct cpg_pd *cpd = list_entry (iter, struct cpg_pd, list);
> -
> -		if ((cpd->flags & CPG_MODEL_V1_DELIVER_INITIAL_TOTEM_CONF) && (cpd->initial_totem_conf_sent == 0)) {
> -			cpd->initial_totem_conf_sent = 1;
> -
> -			notify_lib_totem_membership (cpd->conn, my_old_member_list_entries, my_old_member_list);
> -		}
> -	}
> -
> -	return CS_OK;
> -}
> -
> -static void downlist_log(const char *msg, struct downlist_msg* dl)
> -{
> -	log_printf (LOG_DEBUG,
> -		    "%s: sender %s; members(old:%d left:%d)",
> -		    msg,
> -		    api->totem_ifaces_print(dl->sender_nodeid),
> -		    dl->old_members,
> -		    dl->left_nodes);
> -}
> -
> -static struct downlist_msg* downlist_master_choose (void)
> -{
> -	struct downlist_msg *cmp;
> -	struct downlist_msg *best = NULL;
> -	struct list_head *iter;
> -	uint32_t cmp_members;
> -	uint32_t best_members;
> -
> -	for (iter = downlist_messages_head.next;
> -		iter != &downlist_messages_head;
> -		iter = iter->next) {
> -
> -		cmp = list_entry(iter, struct downlist_msg, list);
> -		downlist_log("comparing", cmp);
> -		if (best == NULL) {
> -			best = cmp;
> -			continue;
> -		}
> -		best_members = best->old_members - best->left_nodes;
> -		cmp_members = cmp->old_members - cmp->left_nodes;
> -
> -		if (cmp_members < best_members) {
> -			continue;
> -		}
> -		else if (cmp_members > best_members) {
> -			best = cmp;
> -		}
> -		else if (cmp->sender_nodeid < best->sender_nodeid) {
> -			best = cmp;
> -		}
> -
> -	}
> -	return best;
> -}
> -
> -static void downlist_master_choose_and_send (void)
> -{
> -	struct downlist_msg *stored_msg;
> -	struct list_head *iter;
> -	struct process_info *left_pi;
> -	qb_map_t *group_map;
> -	struct cpg_name cpg_group;
> -	mar_cpg_name_t group;
> -	struct confchg_data{
> -		struct cpg_name cpg_group;
> -		mar_cpg_address_t left_list[CPG_MEMBERS_MAX];
> -		int left_list_entries;
> -		struct list_head  list;
> -	} *pcd;
> -	qb_map_iter_t *miter;
> -	int i, size;
> -
> -	downlist_state = CPG_DOWNLIST_APPLYING;
> -
> -	stored_msg = downlist_master_choose ();
> -	if (!stored_msg) {
> -		log_printf (LOGSYS_LEVEL_DEBUG, "NO chosen downlist");
> -		return;
> -	}
> -	downlist_log("chosen downlist", stored_msg);
> -
> -	group_map = qb_skiplist_create();
> -
> -	/*
> -	 * only the cpg groups included in left nodes should receive
> -	 * confchg event, so we will collect these cpg groups and
> -	 * relative left_lists here.
> -	 */
> -	for (iter = process_info_list_head.next; iter != &process_info_list_head; ) {
> -		struct process_info *pi = list_entry(iter, struct process_info, list);
> -		iter = iter->next;
> -
> -		left_pi = NULL;
> -		for (i = 0; i < stored_msg->left_nodes; i++) {
> -
> -			if (pi->nodeid == stored_msg->nodeids[i]) {
> -				left_pi = pi;
> -				break;
> -			}
> -		}
> -
> -		if (left_pi) {
> -			marshall_from_mar_cpg_name_t(&cpg_group, &left_pi->group);
> -			cpg_group.value[cpg_group.length] = 0;
> -
> -			pcd = (struct confchg_data *)qb_map_get(group_map, cpg_group.value);
> -			if (pcd == NULL) {
> -				pcd = (struct confchg_data *)calloc(1, sizeof(struct confchg_data));
> -				memcpy(&pcd->cpg_group, &cpg_group, sizeof(struct cpg_name));
> -				qb_map_put(group_map, pcd->cpg_group.value, pcd);
> -			}
> -			size = pcd->left_list_entries;
> -			pcd->left_list[size].nodeid = left_pi->nodeid;
> -			pcd->left_list[size].pid = left_pi->pid;
> -			pcd->left_list[size].reason = CONFCHG_CPG_REASON_NODEDOWN;
> -			pcd->left_list_entries++;
> -			list_del (&left_pi->list);
> -			free (left_pi);
> -		}
> -	}
> -
> -	/* send only one confchg event per cpg group */
> -	miter = qb_map_iter_create(group_map);
> -	while (qb_map_iter_next(miter, (void **)&pcd)) {
> -		marshall_to_mar_cpg_name_t(&group, &pcd->cpg_group);
> -
> -		log_printf (LOG_DEBUG, "left_list_entries:%d", pcd->left_list_entries);
> -		for (i=0; i<pcd->left_list_entries; i++) {
> -			log_printf (LOG_DEBUG, "left_list[%d] group:%d, ip:%s, pid:%d",
> -				i, pcd->cpg_group.value,
> -				(char*)api->totem_ifaces_print(pcd->left_list[i].nodeid),
> -				pcd->left_list[i].pid);
> -		}
> -
> -		/* send confchg event */
> -		notify_lib_joinlist(&group, NULL,
> -			0, NULL,
> -			pcd->left_list_entries,
> -			pcd->left_list,
> -			MESSAGE_RES_CPG_CONFCHG_CALLBACK);
> -
> -		free(pcd);
> -	}
> -	qb_map_iter_free(miter);
> -	qb_map_destroy(group_map);
> -}
> -
> -static void downlist_messages_delete (void)
> -{
> -	struct downlist_msg *stored_msg;
> -	struct list_head *iter, *iter_next;
> -
> -	for (iter = downlist_messages_head.next;
> -		iter != &downlist_messages_head;
> -		iter = iter_next) {
> -
> -		iter_next = iter->next;
> -
> -		stored_msg = list_entry(iter, struct downlist_msg, list);
> -		list_del (&stored_msg->list);
> -		free (stored_msg);
> -	}
> -}
> -
> -
> -static int cpg_exec_init_fn (struct corosync_api_v1 *corosync_api)
> -{
> -#ifdef COROSYNC_SOLARIS
> -	logsys_subsys_init();
> -#endif
> -	list_init (&downlist_messages_head);
> -	api = corosync_api;
> -	return (0);
> -}
> -
> -static void cpg_iteration_instance_finalize (struct cpg_iteration_instance *cpg_iteration_instance)
> -{
> -	struct list_head *iter, *iter_next;
> -	struct process_info *pi;
> -
> -	for (iter = cpg_iteration_instance->items_list_head.next;
> -		iter != &cpg_iteration_instance->items_list_head;
> -		iter = iter_next) {
> -
> -		iter_next = iter->next;
> -
> -		pi = list_entry (iter, struct process_info, list);
> -		list_del (&pi->list);
> -		free (pi);
> -	}
> -
> -	list_del (&cpg_iteration_instance->list);
> -	hdb_handle_destroy (&cpg_iteration_handle_t_db, cpg_iteration_instance->handle);
> -}
> -
> -static void cpg_pd_finalize (struct cpg_pd *cpd)
> -{
> -	struct list_head *iter, *iter_next;
> -	struct cpg_iteration_instance *cpii;
> -
> -	zcb_all_free(cpd);
> -	for (iter = cpd->iteration_instance_list_head.next;
> -		iter != &cpd->iteration_instance_list_head;
> -		iter = iter_next) {
> -
> -		iter_next = iter->next;
> -
> -		cpii = list_entry (iter, struct cpg_iteration_instance, list);
> -
> -		cpg_iteration_instance_finalize (cpii);
> -	}
> -
> -	list_del (&cpd->list);
> -}
> -
> -static int cpg_lib_exit_fn (void *conn)
> -{
> -	struct cpg_pd *cpd = (struct cpg_pd *)api->ipc_private_data_get (conn);
> -
> -	log_printf(LOGSYS_LEVEL_DEBUG, "exit_fn for conn=%p\n", conn);
> -
> -	if (cpd->group_name.length > 0) {
> -		cpg_node_joinleave_send (cpd->pid, &cpd->group_name,
> -				MESSAGE_REQ_EXEC_CPG_PROCLEAVE, CONFCHG_CPG_REASON_PROCDOWN);
> -	}
> -
> -	cpg_pd_finalize (cpd);
> -
> -	api->ipc_refcnt_dec (conn);
> -	return (0);
> -}
> -
> -static int cpg_node_joinleave_send (unsigned int pid, const mar_cpg_name_t *group_name, int fn, int reason)
> -{
> -	struct req_exec_cpg_procjoin req_exec_cpg_procjoin;
> -	struct iovec req_exec_cpg_iovec;
> -	int result;
> -
> -	memcpy(&req_exec_cpg_procjoin.group_name, group_name, sizeof(mar_cpg_name_t));
> -	req_exec_cpg_procjoin.pid = pid;
> -	req_exec_cpg_procjoin.reason = reason;
> -
> -	req_exec_cpg_procjoin.header.size = sizeof(req_exec_cpg_procjoin);
> -	req_exec_cpg_procjoin.header.id = SERVICE_ID_MAKE(CPG_SERVICE, fn);
> -
> -	req_exec_cpg_iovec.iov_base = (char *)&req_exec_cpg_procjoin;
> -	req_exec_cpg_iovec.iov_len = sizeof(req_exec_cpg_procjoin);
> -
> -	result = api->totem_mcast (&req_exec_cpg_iovec, 1, TOTEM_AGREED);
> -
> -	return (result);
> -}
> -
> -/* Can byteswap join & leave messages */
> -static void exec_cpg_procjoin_endian_convert (void *msg)
> -{
> -	struct req_exec_cpg_procjoin *req_exec_cpg_procjoin = msg;
> -
> -	req_exec_cpg_procjoin->pid = swab32(req_exec_cpg_procjoin->pid);
> -	swab_mar_cpg_name_t (&req_exec_cpg_procjoin->group_name);
> -	req_exec_cpg_procjoin->reason = swab32(req_exec_cpg_procjoin->reason);
> -}
> -
> -static void exec_cpg_joinlist_endian_convert (void *msg_v)
> -{
> -	char *msg = msg_v;
> -	struct qb_ipc_response_header *res = (struct qb_ipc_response_header *)msg;
> -	struct join_list_entry *jle = (struct join_list_entry *)(msg + sizeof(struct qb_ipc_response_header));
> -
> -	swab_mar_int32_t (&res->size);
> -
> -	while ((const char*)jle < msg + res->size) {
> -		jle->pid = swab32(jle->pid);
> -		swab_mar_cpg_name_t (&jle->group_name);
> -		jle++;
> -	}
> -}
> -
> -static void exec_cpg_downlist_endian_convert_old (void *msg)
> -{
> -}
> -
> -static void exec_cpg_downlist_endian_convert (void *msg)
> -{
> -	struct req_exec_cpg_downlist *req_exec_cpg_downlist = msg;
> -	unsigned int i;
> -
> -	req_exec_cpg_downlist->left_nodes = swab32(req_exec_cpg_downlist->left_nodes);
> -	req_exec_cpg_downlist->old_members = swab32(req_exec_cpg_downlist->old_members);
> -
> -	for (i = 0; i < req_exec_cpg_downlist->left_nodes; i++) {
> -		req_exec_cpg_downlist->nodeids[i] = swab32(req_exec_cpg_downlist->nodeids[i]);
> -	}
> -}
> -
> -
> -static void exec_cpg_mcast_endian_convert (void *msg)
> -{
> -	struct req_exec_cpg_mcast *req_exec_cpg_mcast = msg;
> -
> -	swab_coroipc_request_header_t (&req_exec_cpg_mcast->header);
> -	swab_mar_cpg_name_t (&req_exec_cpg_mcast->group_name);
> -	req_exec_cpg_mcast->pid = swab32(req_exec_cpg_mcast->pid);
> -	req_exec_cpg_mcast->msglen = swab32(req_exec_cpg_mcast->msglen);
> -	swab_mar_message_source_t (&req_exec_cpg_mcast->source);
> -}
> -
> -static struct process_info *process_info_find(const mar_cpg_name_t *group_name, uint32_t pid, unsigned int nodeid) {
> -	struct list_head *iter;
> -
> -	for (iter = process_info_list_head.next; iter != &process_info_list_head; ) {
> -		struct process_info *pi = list_entry (iter, struct process_info, list);
> -		iter = iter->next;
> -
> -		if (pi->pid == pid && pi->nodeid == nodeid &&
> -			mar_name_compare (&pi->group, group_name) == 0) {
> -				return pi;
> -		}
> -	}
> -
> -	return NULL;
> -}
> -
> -static void do_proc_join(
> -	const mar_cpg_name_t *name,
> -	uint32_t pid,
> -	unsigned int nodeid,
> -	int reason)
> -{
> -	struct process_info *pi;
> -	struct process_info *pi_entry;
> -	mar_cpg_address_t notify_info;
> -	struct list_head *list;
> -	struct list_head *list_to_add = NULL;
> -
> -	if (process_info_find (name, pid, nodeid) != NULL) {
> -		return ;
> - 	}
> -	pi = malloc (sizeof (struct process_info));
> -	if (!pi) {
> -		log_printf(LOGSYS_LEVEL_WARNING, "Unable to allocate process_info struct");
> -		return;
> -	}
> -	pi->nodeid = nodeid;
> -	pi->pid = pid;
> -	memcpy(&pi->group, name, sizeof(*name));
> -	list_init(&pi->list);
> -
> -	/*
> -	 * Insert new process in sorted order so synchronization works properly
> -	 */
> -	list_to_add = &process_info_list_head;
> -	for (list = process_info_list_head.next; list != &process_info_list_head; list = list->next) {
> -
> -		pi_entry = list_entry(list, struct process_info, list);
> -		if (pi_entry->nodeid > pi->nodeid ||
> -			(pi_entry->nodeid == pi->nodeid && pi_entry->pid > pi->pid)) {
> -
> -			break;
> -		}
> -		list_to_add = list;
> -	}
> -	list_add (&pi->list, list_to_add);
> -
> -	notify_info.pid = pi->pid;
> -	notify_info.nodeid = nodeid;
> -	notify_info.reason = reason;
> -
> -	notify_lib_joinlist(&pi->group, NULL,
> -			    1, &notify_info,
> -			    0, NULL,
> -			    MESSAGE_RES_CPG_CONFCHG_CALLBACK);
> -}
> -
> -static void message_handler_req_exec_cpg_downlist_old (
> -	const void *message,
> -	unsigned int nodeid)
> -{
> -	log_printf (LOGSYS_LEVEL_WARNING, "downlist OLD from node %d",
> -		nodeid);
> -}
> -
> -static void message_handler_req_exec_cpg_downlist(
> -	const void *message,
> -	unsigned int nodeid)
> -{
> -	const struct req_exec_cpg_downlist *req_exec_cpg_downlist = message;
> -	int i;
> -	struct list_head *iter;
> -	struct downlist_msg *stored_msg;
> -	int found;
> -
> -	if (downlist_state != CPG_DOWNLIST_WAITING_FOR_MESSAGES) {
> -		log_printf (LOGSYS_LEVEL_WARNING, "downlist left_list: %d received in state %d",
> -			req_exec_cpg_downlist->left_nodes, downlist_state);
> -		return;
> -	}
> -
> -	stored_msg = malloc (sizeof (struct downlist_msg));
> -	stored_msg->sender_nodeid = nodeid;
> -	stored_msg->old_members = req_exec_cpg_downlist->old_members;
> -	stored_msg->left_nodes = req_exec_cpg_downlist->left_nodes;
> -	memcpy (stored_msg->nodeids, req_exec_cpg_downlist->nodeids,
> -		req_exec_cpg_downlist->left_nodes * sizeof (mar_uint32_t));
> -	list_init (&stored_msg->list);
> -	list_add (&stored_msg->list, &downlist_messages_head);
> -
> -	for (i = 0; i < my_member_list_entries; i++) {
> -		found = 0;
> -		for (iter = downlist_messages_head.next;
> -			iter != &downlist_messages_head;
> -			iter = iter->next) {
> -
> -			stored_msg = list_entry(iter, struct downlist_msg, list);
> -			if (my_member_list[i] == stored_msg->sender_nodeid) {
> -				found = 1;
> -			}
> -		}
> -		if (!found) {
> -			return;
> -		}
> -	}
> -
> -	downlist_master_choose_and_send ();
> -}
> -
> -
> -static void message_handler_req_exec_cpg_procjoin (
> -	const void *message,
> -	unsigned int nodeid)
> -{
> -	const struct req_exec_cpg_procjoin *req_exec_cpg_procjoin = message;
> -
> -	log_printf(LOGSYS_LEVEL_DEBUG, "got procjoin message from cluster node %d\n", nodeid);
> -
> -	do_proc_join (&req_exec_cpg_procjoin->group_name,
> -		req_exec_cpg_procjoin->pid, nodeid,
> -		CONFCHG_CPG_REASON_JOIN);
> -}
> -
> -static void message_handler_req_exec_cpg_procleave (
> -	const void *message,
> -	unsigned int nodeid)
> -{
> -	const struct req_exec_cpg_procjoin *req_exec_cpg_procjoin = message;
> -	struct process_info *pi;
> -	struct list_head *iter;
> -	mar_cpg_address_t notify_info;
> -
> -	log_printf(LOGSYS_LEVEL_DEBUG, "got procleave message from cluster node %d\n", nodeid);
> -
> -	notify_info.pid = req_exec_cpg_procjoin->pid;
> -	notify_info.nodeid = nodeid;
> -	notify_info.reason = req_exec_cpg_procjoin->reason;
> -
> -	notify_lib_joinlist(&req_exec_cpg_procjoin->group_name, NULL,
> -		0, NULL,
> -		1, &notify_info,
> -		MESSAGE_RES_CPG_CONFCHG_CALLBACK);
> -
> -	for (iter = process_info_list_head.next; iter != &process_info_list_head; ) {
> -		pi = list_entry(iter, struct process_info, list);
> -		iter = iter->next;
> -
> -		if (pi->pid == req_exec_cpg_procjoin->pid && pi->nodeid == nodeid &&
> -			mar_name_compare (&pi->group, &req_exec_cpg_procjoin->group_name)==0) {
> -			list_del (&pi->list);
> -			free (pi);
> -		}
> -	}
> -}
> -
> -
> -/* Got a proclist from another node */
> -static void message_handler_req_exec_cpg_joinlist (
> -	const void *message_v,
> -	unsigned int nodeid)
> -{
> -	const char *message = message_v;
> -	const struct qb_ipc_response_header *res = (const struct qb_ipc_response_header *)message;
> -	const struct join_list_entry *jle = (const struct join_list_entry *)(message + sizeof(struct qb_ipc_response_header));
> -
> -	log_printf(LOGSYS_LEVEL_DEBUG, "got joinlist message from node %x\n",
> -		nodeid);
> -
> -	/* Ignore our own messages */
> -	if (nodeid == api->totem_nodeid_get()) {
> -		return;
> -	}
> -
> -	while ((const char*)jle < message + res->size) {
> -		do_proc_join (&jle->group_name, jle->pid, nodeid,
> -			CONFCHG_CPG_REASON_NODEUP);
> -		jle++;
> -	}
> -}
> -
> -static void message_handler_req_exec_cpg_mcast (
> -	const void *message,
> -	unsigned int nodeid)
> -{
> -	const struct req_exec_cpg_mcast *req_exec_cpg_mcast = message;
> -	struct res_lib_cpg_deliver_callback res_lib_cpg_mcast;
> -	int msglen = req_exec_cpg_mcast->msglen;
> -	struct list_head *iter, *pi_iter;
> -	struct cpg_pd *cpd;
> -	struct iovec iovec[2];
> -	int known_node = 0;
> -
> -	res_lib_cpg_mcast.header.id = MESSAGE_RES_CPG_DELIVER_CALLBACK;
> -	res_lib_cpg_mcast.header.size = sizeof(res_lib_cpg_mcast) + msglen;
> -	res_lib_cpg_mcast.msglen = msglen;
> -	res_lib_cpg_mcast.pid = req_exec_cpg_mcast->pid;
> -	res_lib_cpg_mcast.nodeid = nodeid;
> -
> -	memcpy(&res_lib_cpg_mcast.group_name, &req_exec_cpg_mcast->group_name,
> -		sizeof(mar_cpg_name_t));
> -	iovec[0].iov_base = (void *)&res_lib_cpg_mcast;
> -	iovec[0].iov_len = sizeof (res_lib_cpg_mcast);
> -
> -	iovec[1].iov_base = (char*)message+sizeof(*req_exec_cpg_mcast);
> -	iovec[1].iov_len = msglen;
> -
> -	for (iter = cpg_pd_list_head.next; iter != &cpg_pd_list_head; ) {
> -		cpd = list_entry(iter, struct cpg_pd, list);
> -		iter = iter->next;
> -
> -		if ((cpd->cpd_state == CPD_STATE_LEAVE_STARTED || cpd->cpd_state == CPD_STATE_JOIN_COMPLETED)
> -			&& (mar_name_compare (&cpd->group_name, &req_exec_cpg_mcast->group_name) == 0)) {
> -
> -			if (!known_node) {
> -				/* Try to find, if we know the node */
> -				for (pi_iter = process_info_list_head.next;
> -					pi_iter != &process_info_list_head; pi_iter = pi_iter->next) {
> -
> -					struct process_info *pi = list_entry (pi_iter, struct process_info, list);
> -
> -					if (pi->nodeid == nodeid &&
> -						mar_name_compare (&pi->group, &req_exec_cpg_mcast->group_name) == 0) {
> -						known_node = 1;
> -						break;
> -					}
> -				}
> -			}
> -
> -			if (!known_node) {
> -				log_printf(LOGSYS_LEVEL_WARNING, "Unknown node -> we will not deliver message");
> -				return ;
> -			}
> -
> -			api->ipc_dispatch_iov_send (cpd->conn, iovec, 2);
> -		}
> -	}
> -}
> -
> -
> -static int cpg_exec_send_downlist(void)
> -{
> -	struct iovec iov;
> -
> -	g_req_exec_cpg_downlist.header.id = SERVICE_ID_MAKE(CPG_SERVICE, MESSAGE_REQ_EXEC_CPG_DOWNLIST);
> -	g_req_exec_cpg_downlist.header.size = sizeof(struct req_exec_cpg_downlist);
> -
> -	g_req_exec_cpg_downlist.old_members = my_old_member_list_entries;
> -
> -	iov.iov_base = (void *)&g_req_exec_cpg_downlist;
> -	iov.iov_len = g_req_exec_cpg_downlist.header.size;
> -
> -	return (api->totem_mcast (&iov, 1, TOTEM_AGREED));
> -}
> -
> -static int cpg_exec_send_joinlist(void)
> -{
> -	int count = 0;
> -	struct list_head *iter;
> -	struct qb_ipc_response_header *res;
> - 	char *buf;
> -	struct join_list_entry *jle;
> -	struct iovec req_exec_cpg_iovec;
> -
> - 	for (iter = process_info_list_head.next; iter != &process_info_list_head; iter = iter->next) {
> - 		struct process_info *pi = list_entry (iter, struct process_info, list);
> -
> - 		if (pi->nodeid == api->totem_nodeid_get ()) {
> - 			count++;
> -		}
> -	}
> -
> -	/* Nothing to send */
> -	if (!count)
> -		return 0;
> -
> -	buf = alloca(sizeof(struct qb_ipc_response_header) + sizeof(struct join_list_entry) * count);
> -	if (!buf) {
> -		log_printf(LOGSYS_LEVEL_WARNING, "Unable to allocate joinlist buffer");
> -		return -1;
> -	}
> -
> -	jle = (struct join_list_entry *)(buf + sizeof(struct qb_ipc_response_header));
> -	res = (struct qb_ipc_response_header *)buf;
> -
> - 	for (iter = process_info_list_head.next; iter != &process_info_list_head; iter = iter->next) {
> - 		struct process_info *pi = list_entry (iter, struct process_info, list);
> -
> -		if (pi->nodeid == api->totem_nodeid_get ()) {
> -			memcpy (&jle->group_name, &pi->group, sizeof (mar_cpg_name_t));
> -			jle->pid = pi->pid;
> -			jle++;
> -		}
> -	}
> -
> -	res->id = SERVICE_ID_MAKE(CPG_SERVICE, MESSAGE_REQ_EXEC_CPG_JOINLIST);
> -	res->size = sizeof(struct qb_ipc_response_header)+sizeof(struct join_list_entry) * count;
> -
> -	req_exec_cpg_iovec.iov_base = buf;
> -	req_exec_cpg_iovec.iov_len = res->size;
> -
> -	return (api->totem_mcast (&req_exec_cpg_iovec, 1, TOTEM_AGREED));
> -}
> -
> -static int cpg_lib_init_fn (void *conn)
> -{
> -	struct cpg_pd *cpd = (struct cpg_pd *)api->ipc_private_data_get (conn);
> -	memset (cpd, 0, sizeof(struct cpg_pd));
> -	cpd->conn = conn;
> -	list_add (&cpd->list, &cpg_pd_list_head);
> -
> -	list_init (&cpd->iteration_instance_list_head);
> -	list_init (&cpd->zcb_mapped_list_head);
> -
> -	api->ipc_refcnt_inc (conn);
> -	log_printf(LOGSYS_LEVEL_DEBUG, "lib_init_fn: conn=%p, cpd=%p\n", conn, cpd);
> -	return (0);
> -}
> -
> -/* Join message from the library */
> -static void message_handler_req_lib_cpg_join (void *conn, const void *message)
> -{
> -	const struct req_lib_cpg_join *req_lib_cpg_join = message;
> -	struct cpg_pd *cpd = (struct cpg_pd *)api->ipc_private_data_get (conn);
> -	struct res_lib_cpg_join res_lib_cpg_join;
> -	cs_error_t error = CS_OK;
> -	struct list_head *iter;
> -
> -	/* Test, if we don't have same pid and group name joined */
> -	for (iter = cpg_pd_list_head.next; iter != &cpg_pd_list_head; iter = iter->next) {
> -		struct cpg_pd *cpd_item = list_entry (iter, struct cpg_pd, list);
> -
> -		if (cpd_item->pid == req_lib_cpg_join->pid &&
> -			mar_name_compare(&req_lib_cpg_join->group_name, &cpd_item->group_name) == 0) {
> -
> -			/* We have same pid and group name joined -> return error */
> -			error = CS_ERR_EXIST;
> -			goto response_send;
> -		}
> -	}
> -
> -	/*
> -	 * Same check must be done in process info list, because there may be not yet delivered
> -	 * leave of client.
> -	 */
> -	for (iter = process_info_list_head.next; iter != &process_info_list_head; iter = iter->next) {
> -		struct process_info *pi = list_entry (iter, struct process_info, list);
> -
> -		if (pi->nodeid == api->totem_nodeid_get () && pi->pid == req_lib_cpg_join->pid &&
> -		    mar_name_compare(&req_lib_cpg_join->group_name, &pi->group) == 0) {
> -			/* We have same pid and group name joined -> return error */
> -			error = CS_ERR_TRY_AGAIN;
> -			goto response_send;
> -		}
> -	}
> -
> -	switch (cpd->cpd_state) {
> -	case CPD_STATE_UNJOINED:
> -		error = CS_OK;
> -		cpd->cpd_state = CPD_STATE_JOIN_STARTED;
> -		cpd->pid = req_lib_cpg_join->pid;
> -		cpd->flags = req_lib_cpg_join->flags;
> -		memcpy (&cpd->group_name, &req_lib_cpg_join->group_name,
> -			sizeof (cpd->group_name));
> -
> -		cpg_node_joinleave_send (req_lib_cpg_join->pid,
> -			&req_lib_cpg_join->group_name,
> -			MESSAGE_REQ_EXEC_CPG_PROCJOIN, CONFCHG_CPG_REASON_JOIN);
> -		break;
> -	case CPD_STATE_LEAVE_STARTED:
> -		error = CS_ERR_BUSY;
> -		break;
> -	case CPD_STATE_JOIN_STARTED:
> -		error = CS_ERR_EXIST;
> -		break;
> -	case CPD_STATE_JOIN_COMPLETED:
> -		error = CS_ERR_EXIST;
> -		break;
> -	}
> -
> -response_send:
> -	res_lib_cpg_join.header.size = sizeof(res_lib_cpg_join);
> -        res_lib_cpg_join.header.id = MESSAGE_RES_CPG_JOIN;
> -        res_lib_cpg_join.header.error = error;
> -        api->ipc_response_send (conn, &res_lib_cpg_join, sizeof(res_lib_cpg_join));
> -}
> -
> -/* Leave message from the library */
> -static void message_handler_req_lib_cpg_leave (void *conn, const void *message)
> -{
> -	struct res_lib_cpg_leave res_lib_cpg_leave;
> -	cs_error_t error = CS_OK;
> -	struct req_lib_cpg_leave  *req_lib_cpg_leave = (struct req_lib_cpg_leave *)message;
> -	struct cpg_pd *cpd = (struct cpg_pd *)api->ipc_private_data_get (conn);
> -
> -	log_printf(LOGSYS_LEVEL_DEBUG, "got leave request on %p\n", conn);
> -
> -	switch (cpd->cpd_state) {
> -	case CPD_STATE_UNJOINED:
> -		error = CS_ERR_NOT_EXIST;
> -		break;
> -	case CPD_STATE_LEAVE_STARTED:
> -		error = CS_ERR_NOT_EXIST;
> -		break;
> -	case CPD_STATE_JOIN_STARTED:
> -		error = CS_ERR_BUSY;
> -		break;
> -	case CPD_STATE_JOIN_COMPLETED:
> -		error = CS_OK;
> -		cpd->cpd_state = CPD_STATE_LEAVE_STARTED;
> -		cpg_node_joinleave_send (req_lib_cpg_leave->pid,
> -			&req_lib_cpg_leave->group_name,
> -			MESSAGE_REQ_EXEC_CPG_PROCLEAVE,
> -			CONFCHG_CPG_REASON_LEAVE);
> -		break;
> -	}
> -
> -	/* send return */
> -	res_lib_cpg_leave.header.size = sizeof(res_lib_cpg_leave);
> -	res_lib_cpg_leave.header.id = MESSAGE_RES_CPG_LEAVE;
> -	res_lib_cpg_leave.header.error = error;
> -	api->ipc_response_send(conn, &res_lib_cpg_leave, sizeof(res_lib_cpg_leave));
> -}
> -
> -/* Finalize message from library */
> -static void message_handler_req_lib_cpg_finalize (
> -	void *conn,
> -	const void *message)
> -{
> -	struct cpg_pd *cpd = (struct cpg_pd *)api->ipc_private_data_get (conn);
> -	struct res_lib_cpg_finalize res_lib_cpg_finalize;
> -	cs_error_t error = CS_OK;
> -
> -	log_printf (LOGSYS_LEVEL_DEBUG, "cpg finalize for conn=%p\n", conn);
> -
> -	/*
> -	 * We will just remove cpd from list. After this call, connection will be
> -	 * closed on lib side, and cpg_lib_exit_fn will be called
> -	 */
> -	list_del (&cpd->list);
> -	list_init (&cpd->list);
> -
> -	res_lib_cpg_finalize.header.size = sizeof (res_lib_cpg_finalize);
> -	res_lib_cpg_finalize.header.id = MESSAGE_RES_CPG_FINALIZE;
> -	res_lib_cpg_finalize.header.error = error;
> -
> -	api->ipc_response_send (conn, &res_lib_cpg_finalize,
> -		sizeof (res_lib_cpg_finalize));
> -}
> -
> -static int
> -memory_map (
> -	const char *path,
> -	size_t bytes,
> -	void **buf)
> -{
> -	int32_t fd;
> -	void *addr_orig;
> -	void *addr;
> -	int32_t res;
> -
> -	fd = open (path, O_RDWR, 0600);
> -
> -	unlink (path);
> -
> -	if (fd == -1) {
> -		return (-1);
> -	}
> -
> -	res = ftruncate (fd, bytes);
> -	if (res == -1) {
> -		goto error_close_unlink;
> -	}
> -
> -	addr_orig = mmap (NULL, bytes, PROT_NONE,
> -		MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
> -
> -	if (addr_orig == MAP_FAILED) {
> -		goto error_close_unlink;
> -	}
> -
> -	addr = mmap (addr_orig, bytes, PROT_READ | PROT_WRITE,
> -		MAP_FIXED | MAP_SHARED, fd, 0);
> -
> -	if (addr != addr_orig) {
> -		munmap(addr_orig, bytes);
> -		goto error_close_unlink;
> -	}
> -#ifdef COROSYNC_BSD
> -	madvise(addr, bytes, MADV_NOSYNC);
> -#endif
> -
> -	res = close (fd);
> -	if (res) {
> -		return (-1);
> -	}
> -	*buf = addr_orig;
> -	return (0);
> -
> -error_close_unlink:
> -	close (fd);
> -	unlink(path);
> -	return -1;
> -}
> -
> -static inline int zcb_alloc (
> -	struct cpg_pd *cpd,
> -	const char *path_to_file,
> -	size_t size,
> -	void **addr)
> -{
> -	struct zcb_mapped *zcb_mapped;
> -	unsigned int res;
> -
> -	zcb_mapped = malloc (sizeof (struct zcb_mapped));
> -	if (zcb_mapped == NULL) {
> -		return (-1);
> -	}
> -
> -	res = memory_map (
> -		path_to_file,
> -		size,
> -		addr);
> -	if (res == -1) {
> -		free (zcb_mapped);
> -		return (-1);
> -	}
> -
> -	list_init (&zcb_mapped->list);
> -	zcb_mapped->addr = *addr;
> -	zcb_mapped->size = size;
> -	list_add_tail (&zcb_mapped->list, &cpd->zcb_mapped_list_head);
> -	return (0);
> -}
> -
> -
> -static inline int zcb_free (struct zcb_mapped *zcb_mapped)
> -{
> -	unsigned int res;
> -
> -	res = munmap (zcb_mapped->addr, zcb_mapped->size);
> -	list_del (&zcb_mapped->list);
> -	free (zcb_mapped);
> -	return (res);
> -}
> -
> -static inline int zcb_by_addr_free (struct cpg_pd *cpd, void *addr)
> -{
> -	struct list_head *list;
> -	struct zcb_mapped *zcb_mapped;
> -	unsigned int res = 0;
> -
> -	for (list = cpd->zcb_mapped_list_head.next;
> -		list != &cpd->zcb_mapped_list_head; list = list->next) {
> -
> -		zcb_mapped = list_entry (list, struct zcb_mapped, list);
> -
> -		if (zcb_mapped->addr == addr) {
> -			res = zcb_free (zcb_mapped);
> -			break;
> -		}
> -
> -	}
> -	return (res);
> -}
> -
> -static inline int zcb_all_free (
> -	struct cpg_pd *cpd)
> -{
> -	struct list_head *list;
> -	struct zcb_mapped *zcb_mapped;
> -
> -	for (list = cpd->zcb_mapped_list_head.next;
> -		list != &cpd->zcb_mapped_list_head;) {
> -
> -		zcb_mapped = list_entry (list, struct zcb_mapped, list);
> -
> -		list = list->next;
> -
> -		zcb_free (zcb_mapped);
> -	}
> -	return (0);
> -}
> -
> -union u {
> -	uint64_t server_addr;
> -	void *server_ptr;
> -};
> -
> -static uint64_t void2serveraddr (void *server_ptr)
> -{
> -	union u u;
> -
> -	u.server_ptr = server_ptr;
> -	return (u.server_addr);
> -}
> -
> -static void *serveraddr2void (uint64_t server_addr)
> -{
> -	union u u;
> -
> -	u.server_addr = server_addr;
> -	return (u.server_ptr);
> -};
> -
> -static void message_handler_req_lib_cpg_zc_alloc (
> -	void *conn,
> -	const void *message)
> -{
> -	mar_req_coroipcc_zc_alloc_t *hdr = (mar_req_coroipcc_zc_alloc_t *)message;
> -	struct qb_ipc_response_header res_header;
> -	void *addr = NULL;
> -	struct coroipcs_zc_header *zc_header;
> -	unsigned int res;
> -	struct cpg_pd *cpd = (struct cpg_pd *)api->ipc_private_data_get (conn);
> -
> -	log_printf(LOGSYS_LEVEL_DEBUG, "path: %s", hdr->path_to_file);
> -
> -	res = zcb_alloc (cpd, hdr->path_to_file, hdr->map_size,
> -		&addr);
> -	assert(res == 0);
> -
> -	zc_header = (struct coroipcs_zc_header *)addr;
> -	zc_header->server_address = void2serveraddr(addr);
> -
> -	res_header.size = sizeof (struct qb_ipc_response_header);
> -	res_header.id = 0;
> -	api->ipc_response_send (conn,
> -		&res_header,
> -		res_header.size);
> -}
> -
> -static void message_handler_req_lib_cpg_zc_free (
> -	void *conn,
> -	const void *message)
> -{
> -	mar_req_coroipcc_zc_free_t *hdr = (mar_req_coroipcc_zc_free_t *)message;
> -	struct qb_ipc_response_header res_header;
> -	void *addr = NULL;
> -	struct cpg_pd *cpd = (struct cpg_pd *)api->ipc_private_data_get (conn);
> -
> -	log_printf(LOGSYS_LEVEL_DEBUG, " free'ing");
> -
> -	addr = serveraddr2void (hdr->server_address);
> -
> -	zcb_by_addr_free (cpd, addr);
> -
> -	res_header.size = sizeof (struct qb_ipc_response_header);
> -	res_header.id = 0;
> -	api->ipc_response_send (
> -		conn, &res_header,
> -		res_header.size);
> -}
> -
> -/* Mcast message from the library */
> -static void message_handler_req_lib_cpg_mcast (void *conn, const void *message)
> -{
> -	const struct req_lib_cpg_mcast *req_lib_cpg_mcast = message;
> -	struct cpg_pd *cpd = (struct cpg_pd *)api->ipc_private_data_get (conn);
> -	mar_cpg_name_t group_name = cpd->group_name;
> -
> -	struct iovec req_exec_cpg_iovec[2];
> -	struct req_exec_cpg_mcast req_exec_cpg_mcast;
> -	int msglen = req_lib_cpg_mcast->msglen;
> -	int result;
> -	cs_error_t error = CS_ERR_NOT_EXIST;
> -
> -	log_printf(LOGSYS_LEVEL_DEBUG, "got mcast request on %p\n", conn);
> -
> -	switch (cpd->cpd_state) {
> -	case CPD_STATE_UNJOINED:
> -		error = CS_ERR_NOT_EXIST;
> -		break;
> -	case CPD_STATE_LEAVE_STARTED:
> -		error = CS_ERR_NOT_EXIST;
> -		break;
> -	case CPD_STATE_JOIN_STARTED:
> -		error = CS_OK;
> -		break;
> -	case CPD_STATE_JOIN_COMPLETED:
> -		error = CS_OK;
> -		break;
> -	}
> -
> -	if (error == CS_OK) {
> -		req_exec_cpg_mcast.header.size = sizeof(req_exec_cpg_mcast) + msglen;
> -		req_exec_cpg_mcast.header.id = SERVICE_ID_MAKE(CPG_SERVICE,
> -			MESSAGE_REQ_EXEC_CPG_MCAST);
> -		req_exec_cpg_mcast.pid = cpd->pid;
> -		req_exec_cpg_mcast.msglen = msglen;
> -		api->ipc_source_set (&req_exec_cpg_mcast.source, conn);
> -		memcpy(&req_exec_cpg_mcast.group_name, &group_name,
> -			sizeof(mar_cpg_name_t));
> -
> -		req_exec_cpg_iovec[0].iov_base = (char *)&req_exec_cpg_mcast;
> -		req_exec_cpg_iovec[0].iov_len = sizeof(req_exec_cpg_mcast);
> -		req_exec_cpg_iovec[1].iov_base = (char *)&req_lib_cpg_mcast->message;
> -		req_exec_cpg_iovec[1].iov_len = msglen;
> -
> -		result = api->totem_mcast (req_exec_cpg_iovec, 2, TOTEM_AGREED);
> -		assert(result == 0);
> -	} else {
> -		log_printf(LOGSYS_LEVEL_ERROR, "*** %p can't mcast to group %s state:%d, error:%d\n",
> -			conn, group_name.value, cpd->cpd_state, error);
> -	}
> -}
> -
> -static void message_handler_req_lib_cpg_zc_execute (
> -	void *conn,
> -	const void *message)
> -{
> -	mar_req_coroipcc_zc_execute_t *hdr = (mar_req_coroipcc_zc_execute_t *)message;
> -	struct qb_ipc_request_header *header;
> -	struct res_lib_cpg_mcast res_lib_cpg_mcast;
> -	struct cpg_pd *cpd = (struct cpg_pd *)api->ipc_private_data_get (conn);
> -	struct iovec req_exec_cpg_iovec[2];
> -	struct req_exec_cpg_mcast req_exec_cpg_mcast;
> -	struct req_lib_cpg_mcast *req_lib_cpg_mcast;
> -	int result;
> -	cs_error_t error = CS_ERR_NOT_EXIST;
> -
> -	log_printf(LOGSYS_LEVEL_DEBUG, "got ZC mcast request on %p\n", conn);
> -
> -	header = (struct qb_ipc_request_header *)(((char *)serveraddr2void(hdr->server_address) + sizeof (struct coroipcs_zc_header)));
> -	req_lib_cpg_mcast = (struct req_lib_cpg_mcast *)header;
> -
> -	switch (cpd->cpd_state) {
> -	case CPD_STATE_UNJOINED:
> -		error = CS_ERR_NOT_EXIST;
> -		break;
> -	case CPD_STATE_LEAVE_STARTED:
> -		error = CS_ERR_NOT_EXIST;
> -		break;
> -	case CPD_STATE_JOIN_STARTED:
> -		error = CS_OK;
> -		break;
> -	case CPD_STATE_JOIN_COMPLETED:
> -		error = CS_OK;
> -		break;
> -	}
> -
> -	res_lib_cpg_mcast.header.size = sizeof(res_lib_cpg_mcast);
> -	res_lib_cpg_mcast.header.id = MESSAGE_RES_CPG_MCAST;
> -	if (error == CS_OK) {
> -		req_exec_cpg_mcast.header.size = sizeof(req_exec_cpg_mcast) + req_lib_cpg_mcast->msglen;
> -		req_exec_cpg_mcast.header.id = SERVICE_ID_MAKE(CPG_SERVICE,
> -			MESSAGE_REQ_EXEC_CPG_MCAST);
> -		req_exec_cpg_mcast.pid = cpd->pid;
> -		req_exec_cpg_mcast.msglen = req_lib_cpg_mcast->msglen;
> -		api->ipc_source_set (&req_exec_cpg_mcast.source, conn);
> -		memcpy(&req_exec_cpg_mcast.group_name, &cpd->group_name,
> -			sizeof(mar_cpg_name_t));
> -
> -		req_exec_cpg_iovec[0].iov_base = (char *)&req_exec_cpg_mcast;
> -		req_exec_cpg_iovec[0].iov_len = sizeof(req_exec_cpg_mcast);
> -		req_exec_cpg_iovec[1].iov_base = (char *)header + sizeof(struct req_lib_cpg_mcast);
> -		req_exec_cpg_iovec[1].iov_len = req_exec_cpg_mcast.msglen;
> -
> -		result = api->totem_mcast (req_exec_cpg_iovec, 2, TOTEM_AGREED);
> -		if (result == 0) {
> -			res_lib_cpg_mcast.header.error = CS_OK;
> -		} else {
> -			res_lib_cpg_mcast.header.error = CS_ERR_TRY_AGAIN;
> -		}
> -	} else {
> -		res_lib_cpg_mcast.header.error = error;
> -	}
> -
> -	api->ipc_response_send (conn, &res_lib_cpg_mcast,
> -		sizeof (res_lib_cpg_mcast));
> -
> -}
> -
> -static void message_handler_req_lib_cpg_membership (void *conn,
> -						    const void *message)
> -{
> -	struct req_lib_cpg_membership_get *req_lib_cpg_membership_get =
> -		(struct req_lib_cpg_membership_get *)message;
> -	struct res_lib_cpg_membership_get res_lib_cpg_membership_get;
> -	struct list_head *iter;
> -	int member_count = 0;
> -
> -	res_lib_cpg_membership_get.header.id = MESSAGE_RES_CPG_MEMBERSHIP;
> -	res_lib_cpg_membership_get.header.error = CS_OK;
> -	res_lib_cpg_membership_get.header.size =
> -		sizeof (struct req_lib_cpg_membership_get);
> -
> -	for (iter = process_info_list_head.next;
> -		iter != &process_info_list_head; iter = iter->next) {
> -
> -		struct process_info *pi = list_entry (iter, struct process_info, list);
> -		if (mar_name_compare (&pi->group, &req_lib_cpg_membership_get->group_name) == 0) {
> -			res_lib_cpg_membership_get.member_list[member_count].nodeid = pi->nodeid;
> -			res_lib_cpg_membership_get.member_list[member_count].pid = pi->pid;
> -			member_count += 1;
> -		}
> -	}
> -	res_lib_cpg_membership_get.member_count = member_count;
> -
> -	api->ipc_response_send (conn, &res_lib_cpg_membership_get,
> -		sizeof (res_lib_cpg_membership_get));
> -}
> -
> -static void message_handler_req_lib_cpg_local_get (void *conn,
> -						   const void *message)
> -{
> -	struct res_lib_cpg_local_get res_lib_cpg_local_get;
> -
> -	res_lib_cpg_local_get.header.size = sizeof (res_lib_cpg_local_get);
> -	res_lib_cpg_local_get.header.id = MESSAGE_RES_CPG_LOCAL_GET;
> -	res_lib_cpg_local_get.header.error = CS_OK;
> -	res_lib_cpg_local_get.local_nodeid = api->totem_nodeid_get ();
> -
> -	api->ipc_response_send (conn, &res_lib_cpg_local_get,
> -		sizeof (res_lib_cpg_local_get));
> -}
> -
> -static void message_handler_req_lib_cpg_iteration_initialize (
> -	void *conn,
> -	const void *message)
> -{
> -	const struct req_lib_cpg_iterationinitialize *req_lib_cpg_iterationinitialize = message;
> -	struct cpg_pd *cpd = (struct cpg_pd *)api->ipc_private_data_get (conn);
> -	hdb_handle_t cpg_iteration_handle = 0;
> -	struct res_lib_cpg_iterationinitialize res_lib_cpg_iterationinitialize;
> -	struct list_head *iter, *iter2;
> -	struct cpg_iteration_instance *cpg_iteration_instance;
> -	cs_error_t error = CS_OK;
> -	int res;
> -
> -	log_printf (LOGSYS_LEVEL_DEBUG, "cpg iteration initialize\n");
> -
> -	/* Because between calling this function and *next can be some operations which will
> -	 * change list, we must do full copy.
> -	 */
> -
> -	/*
> -	 * Create new iteration instance
> -	 */
> -	res = hdb_handle_create (&cpg_iteration_handle_t_db, sizeof (struct cpg_iteration_instance),
> -			&cpg_iteration_handle);
> -
> -	if (res != 0) {
> -		error = CS_ERR_NO_MEMORY;
> -		goto response_send;
> -	}
> -
> -	res = hdb_handle_get (&cpg_iteration_handle_t_db, cpg_iteration_handle, (void *)&cpg_iteration_instance);
> -
> -	if (res != 0) {
> -		error = CS_ERR_BAD_HANDLE;
> -		goto error_destroy;
> -	}
> -
> -	list_init (&cpg_iteration_instance->items_list_head);
> -	cpg_iteration_instance->handle = cpg_iteration_handle;
> -
> -	/*
> -	 * Create copy of process_info list "grouped by" group name
> -	 */
> -	for (iter = process_info_list_head.next; iter != &process_info_list_head; iter = iter->next) {
> -		struct process_info *pi = list_entry (iter, struct process_info, list);
> -		struct process_info *new_pi;
> -
> -		if (req_lib_cpg_iterationinitialize->iteration_type == CPG_ITERATION_NAME_ONLY) {
> -			/*
> -			 * Try to find processed group name in our list new list
> -			 */
> -			int found = 0;
> -
> -			for (iter2 = cpg_iteration_instance->items_list_head.next;
> -			     iter2 != &cpg_iteration_instance->items_list_head;
> -			     iter2 = iter2->next) {
> -				 struct process_info *pi2 = list_entry (iter2, struct process_info, list);
> -
> -				 if (mar_name_compare (&pi2->group, &pi->group) == 0) {
> -					found = 1;
> -					break;
> -				 }
> -			}
> -
> -			if (found) {
> -				/*
> -				 * We have this name in list -> don't add
> -				 */
> -				continue ;
> -			}
> -		} else if (req_lib_cpg_iterationinitialize->iteration_type == CPG_ITERATION_ONE_GROUP) {
> -			/*
> -			 * Test pi group name with request
> -			 */
> -			if (mar_name_compare (&pi->group, &req_lib_cpg_iterationinitialize->group_name) != 0)
> -				/*
> -				 * Not same -> don't add
> -				 */
> -				continue ;
> -		}
> -
> -		new_pi = malloc (sizeof (struct process_info));
> -		if (!new_pi) {
> -			log_printf(LOGSYS_LEVEL_WARNING, "Unable to allocate process_info struct");
> -
> -			error = CS_ERR_NO_MEMORY;
> -
> -			goto error_put_destroy;
> -		}
> -
> -		memcpy (new_pi, pi, sizeof (struct process_info));
> -		list_init (&new_pi->list);
> -
> -		if (req_lib_cpg_iterationinitialize->iteration_type == CPG_ITERATION_NAME_ONLY) {
> -			/*
> -			 * pid and nodeid -> undefined
> -			 */
> -			new_pi->pid = new_pi->nodeid = 0;
> -		}
> -
> -		/*
> -		 * We will return list "grouped" by "group name", so try to find right place to add
> -		 */
> -		for (iter2 = cpg_iteration_instance->items_list_head.next;
> -		     iter2 != &cpg_iteration_instance->items_list_head;
> -		     iter2 = iter2->next) {
> -			 struct process_info *pi2 = list_entry (iter2, struct process_info, list);
> -
> -			 if (mar_name_compare (&pi2->group, &pi->group) == 0) {
> -				break;
> -			 }
> -		}
> -
> -		list_add (&new_pi->list, iter2);
> -	}
> -
> -	/*
> -	 * Now we have a full "grouped by" copy of process_info list
> -	 */
> -
> -	/*
> -	 * Add instance to current cpd list
> -	 */
> -	list_init (&cpg_iteration_instance->list);
> -	list_add (&cpg_iteration_instance->list, &cpd->iteration_instance_list_head);
> -
> -	cpg_iteration_instance->current_pointer = &cpg_iteration_instance->items_list_head;
> -
> -error_put_destroy:
> -	hdb_handle_put (&cpg_iteration_handle_t_db, cpg_iteration_handle);
> -error_destroy:
> -	if (error != CS_OK) {
> -		hdb_handle_destroy (&cpg_iteration_handle_t_db, cpg_iteration_handle);
> -	}
> -
> -response_send:
> -	res_lib_cpg_iterationinitialize.header.size = sizeof (res_lib_cpg_iterationinitialize);
> -	res_lib_cpg_iterationinitialize.header.id = MESSAGE_RES_CPG_ITERATIONINITIALIZE;
> -	res_lib_cpg_iterationinitialize.header.error = error;
> -	res_lib_cpg_iterationinitialize.iteration_handle = cpg_iteration_handle;
> -
> -	api->ipc_response_send (conn, &res_lib_cpg_iterationinitialize,
> -		sizeof (res_lib_cpg_iterationinitialize));
> -}
> -
> -static void message_handler_req_lib_cpg_iteration_next (
> -	void *conn,
> -	const void *message)
> -{
> -	const struct req_lib_cpg_iterationnext *req_lib_cpg_iterationnext = message;
> -	struct res_lib_cpg_iterationnext res_lib_cpg_iterationnext;
> -	struct cpg_iteration_instance *cpg_iteration_instance;
> -	cs_error_t error = CS_OK;
> -	int res;
> -	struct process_info *pi;
> -
> -	log_printf (LOGSYS_LEVEL_DEBUG, "cpg iteration next\n");
> -
> -	res = hdb_handle_get (&cpg_iteration_handle_t_db,
> -			req_lib_cpg_iterationnext->iteration_handle,
> -			(void *)&cpg_iteration_instance);
> -
> -	if (res != 0) {
> -		error = CS_ERR_LIBRARY;
> -		goto error_exit;
> -	}
> -
> -	assert (cpg_iteration_instance);
> -
> -	cpg_iteration_instance->current_pointer = cpg_iteration_instance->current_pointer->next;
> -
> -	if (cpg_iteration_instance->current_pointer == &cpg_iteration_instance->items_list_head) {
> -		error = CS_ERR_NO_SECTIONS;
> -		goto error_put;
> -	}
> -
> -	pi = list_entry (cpg_iteration_instance->current_pointer, struct process_info, list);
> -
> -	/*
> -	 * Copy iteration data
> -	 */
> -	res_lib_cpg_iterationnext.description.nodeid = pi->nodeid;
> -	res_lib_cpg_iterationnext.description.pid = pi->pid;
> -	memcpy (&res_lib_cpg_iterationnext.description.group,
> -			&pi->group,
> -			sizeof (mar_cpg_name_t));
> -
> -error_put:
> -	hdb_handle_put (&cpg_iteration_handle_t_db, req_lib_cpg_iterationnext->iteration_handle);
> -error_exit:
> -	res_lib_cpg_iterationnext.header.size = sizeof (res_lib_cpg_iterationnext);
> -	res_lib_cpg_iterationnext.header.id = MESSAGE_RES_CPG_ITERATIONNEXT;
> -	res_lib_cpg_iterationnext.header.error = error;
> -
> -	api->ipc_response_send (conn, &res_lib_cpg_iterationnext,
> -		sizeof (res_lib_cpg_iterationnext));
> -}
> -
> -static void message_handler_req_lib_cpg_iteration_finalize (
> -	void *conn,
> -	const void *message)
> -{
> -	const struct req_lib_cpg_iterationfinalize *req_lib_cpg_iterationfinalize = message;
> -	struct res_lib_cpg_iterationfinalize res_lib_cpg_iterationfinalize;
> -	struct cpg_iteration_instance *cpg_iteration_instance;
> -	cs_error_t error = CS_OK;
> -	int res;
> -
> -	log_printf (LOGSYS_LEVEL_DEBUG, "cpg iteration finalize\n");
> -
> -	res = hdb_handle_get (&cpg_iteration_handle_t_db,
> -			req_lib_cpg_iterationfinalize->iteration_handle,
> -			(void *)&cpg_iteration_instance);
> -
> -	if (res != 0) {
> -		error = CS_ERR_LIBRARY;
> -		goto error_exit;
> -	}
> -
> -	assert (cpg_iteration_instance);
> -
> -	cpg_iteration_instance_finalize (cpg_iteration_instance);
> -	hdb_handle_put (&cpg_iteration_handle_t_db, cpg_iteration_instance->handle);
> -
> -error_exit:
> -	res_lib_cpg_iterationfinalize.header.size = sizeof (res_lib_cpg_iterationfinalize);
> -	res_lib_cpg_iterationfinalize.header.id = MESSAGE_RES_CPG_ITERATIONFINALIZE;
> -	res_lib_cpg_iterationfinalize.header.error = error;
> -
> -	api->ipc_response_send (conn, &res_lib_cpg_iterationfinalize,
> -		sizeof (res_lib_cpg_iterationfinalize));
> -}
> diff --git a/services/evs.c b/services/evs.c
> deleted file mode 100644
> index bad8154..0000000
> --- a/services/evs.c
> +++ /dev/null
> @@ -1,531 +0,0 @@
> -/*
> - * Copyright (c) 2004-2006 MontaVista Software, Inc.
> - * Copyright (c) 2006-2009 Red Hat, Inc.
> - *
> - * All rights reserved.
> - *
> - * Author: Steven Dake (sdake@xxxxxxxxxx)
> - *
> - * This software licensed under BSD license, the text of which follows:
> - *
> - * Redistribution and use in source and binary forms, with or without
> - * modification, are permitted provided that the following conditions are met:
> - *
> - * - Redistributions of source code must retain the above copyright notice,
> - *   this list of conditions and the following disclaimer.
> - * - Redistributions in binary form must reproduce the above copyright notice,
> - *   this list of conditions and the following disclaimer in the documentation
> - *   and/or other materials provided with the distribution.
> - * - Neither the name of the MontaVista Software, Inc. nor the names of its
> - *   contributors may be used to endorse or promote products derived from this
> - *   software without specific prior written permission.
> - *
> - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
> - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
> - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
> - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
> - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
> - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
> - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
> - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
> - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
> - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
> - * THE POSSIBILITY OF SUCH DAMAGE.
> - */
> -
> -#include <config.h>
> -
> -#include <sys/types.h>
> -#include <sys/socket.h>
> -#include <sys/un.h>
> -#include <sys/ioctl.h>
> -#include <netinet/in.h>
> -#include <sys/uio.h>
> -#include <unistd.h>
> -#include <fcntl.h>
> -#include <stdlib.h>
> -#include <stdio.h>
> -#include <errno.h>
> -#include <assert.h>
> -#include <time.h>
> -#include <netinet/in.h>
> -#include <arpa/inet.h>
> -
> -#include <corosync/swab.h>
> -#include <corosync/corotypes.h>
> -#include <qb/qbipc_common.h>
> -#include <corosync/corodefs.h>
> -#include <corosync/mar_gen.h>
> -#include <corosync/lcr/lcr_comp.h>
> -#include <corosync/coroapi.h>
> -#include <corosync/logsys.h>
> -#include <corosync/list.h>
> -
> -#include <corosync/evs.h>
> -#include <corosync/ipc_evs.h>
> -
> -LOGSYS_DECLARE_SUBSYS ("EVS");
> -
> -enum evs_exec_message_req_types {
> -	MESSAGE_REQ_EXEC_EVS_MCAST = 0
> -};
> -
> -/*
> - * Service Interfaces required by service_message_handler struct
> - */
> -static int evs_exec_init_fn (
> -	struct corosync_api_v1 *corosync_api);
> -
> -static void evs_confchg_fn (
> -	enum totem_configuration_type configuration_type,
> -	const unsigned int *member_list, size_t member_list_entries,
> -	const unsigned int *left_list, size_t left_list_entries,
> -	const unsigned int *joined_list, size_t joined_list_entries,
> -	const struct memb_ring_id *ring_id);
> -
> -static void message_handler_req_exec_mcast (const void *msg, unsigned int nodeid);
> -
> -static void req_exec_mcast_endian_convert (void *msg);
> -
> -static void message_handler_req_evs_join (void *conn, const void *msg);
> -static void message_handler_req_evs_leave (void *conn, const void *msg);
> -static void message_handler_req_evs_mcast_joined (void *conn, const void *msg);
> -static void message_handler_req_evs_mcast_groups (void *conn, const void *msg);
> -static void message_handler_req_evs_membership_get (void *conn, const void *msg);
> -
> -static int evs_lib_init_fn (void *conn);
> -static int evs_lib_exit_fn (void *conn);
> -
> -struct evs_pd {
> -	struct evs_group *groups;
> -	int group_entries;
> -	struct list_head list;
> -	void *conn;
> -};
> -
> -static struct corosync_api_v1 *api;
> -
> -static struct corosync_lib_handler evs_lib_engine[] =
> -{
> -	{ /* 0 */
> -		.lib_handler_fn				= message_handler_req_evs_join,
> -		.flow_control				= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> -	},
> -	{ /* 1 */
> -		.lib_handler_fn				= message_handler_req_evs_leave,
> -		.flow_control				= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> -	},
> -	{ /* 2 */
> -		.lib_handler_fn				= message_handler_req_evs_mcast_joined,
> -		.flow_control				= CS_LIB_FLOW_CONTROL_REQUIRED
> -	},
> -	{ /* 3 */
> -		.lib_handler_fn				= message_handler_req_evs_mcast_groups,
> -		.flow_control				= CS_LIB_FLOW_CONTROL_REQUIRED
> -	},
> -	{ /* 4 */
> -		.lib_handler_fn				= message_handler_req_evs_membership_get,
> -		.flow_control				= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> -	}
> -};
> -
> -static struct corosync_exec_handler evs_exec_engine[] =
> -{
> -	{
> -		.exec_handler_fn 	= message_handler_req_exec_mcast,
> -		.exec_endian_convert_fn	= req_exec_mcast_endian_convert
> -	}
> -};
> -
> -struct corosync_service_engine evs_service_engine = {
> -	.name			= "corosync extended virtual synchrony service",
> -	.id			= EVS_SERVICE,
> -	.priority		= 1,
> -	.private_data_size	= sizeof (struct evs_pd),
> -	.flow_control		= CS_LIB_FLOW_CONTROL_REQUIRED,
> -	.lib_init_fn		= evs_lib_init_fn,
> -	.lib_exit_fn		= evs_lib_exit_fn,
> -	.lib_engine		= evs_lib_engine,
> -	.lib_engine_count	= sizeof (evs_lib_engine) / sizeof (struct corosync_lib_handler),
> -	.exec_engine		= evs_exec_engine,
> -	.exec_engine_count	= sizeof (evs_exec_engine) / sizeof (struct corosync_exec_handler),
> -	.confchg_fn		= evs_confchg_fn,
> -	.exec_init_fn		= evs_exec_init_fn,
> -	.exec_dump_fn		= NULL,
> -	.sync_mode		= CS_SYNC_V1
> -};
> -
> -static DECLARE_LIST_INIT (confchg_notify);
> -
> -/*
> - * Dynamic loading descriptor
> - */
> -
> -static struct corosync_service_engine *evs_get_service_engine_ver0 (void);
> -
> -static struct corosync_service_engine_iface_ver0 evs_service_engine_iface = {
> -	.corosync_get_service_engine_ver0	= evs_get_service_engine_ver0
> -};
> -
> -static struct lcr_iface corosync_evs_ver0[1] = {
> -	{
> -		.name			= "corosync_evs",
> -		.version		= 0,
> -		.versions_replace	= 0,
> -		.versions_replace_count = 0,
> -		.dependencies		= 0,
> -		.dependency_count	= 0,
> -		.constructor		= NULL,
> -		.destructor		= NULL,
> -		.interfaces		= NULL,
> -	}
> -};
> -
> -static struct lcr_comp evs_comp_ver0 = {
> -	.iface_count	= 1,
> -	.ifaces		= corosync_evs_ver0
> -};
> -
> -static struct corosync_service_engine *evs_get_service_engine_ver0 (void)
> -{
> -	return (&evs_service_engine);
> -}
> -
> -#ifdef COROSYNC_SOLARIS
> -void corosync_lcr_component_register (void);
> -
> -void corosync_lcr_component_register (void) {
> -#else
> -__attribute__ ((constructor)) static void corosync_lcr_component_register (void) {
> -#endif
> -	lcr_interfaces_set (&corosync_evs_ver0[0], &evs_service_engine_iface);
> -
> -	lcr_component_register (&evs_comp_ver0);
> -}
> -
> -static int evs_exec_init_fn (
> -	struct corosync_api_v1 *corosync_api)
> -{
> -#ifdef COROSYNC_SOLARIS
> -	logsys_subsys_init();
> -#endif
> -
> -	api = corosync_api;
> -
> -	return 0;
> -}
> -
> -struct res_evs_confchg_callback res_evs_confchg_callback;
> -
> -static void evs_confchg_fn (
> -	enum totem_configuration_type configuration_type,
> -	const unsigned int *member_list, size_t member_list_entries,
> -	const unsigned int *left_list, size_t left_list_entries,
> -	const unsigned int *joined_list, size_t joined_list_entries,
> -	const struct memb_ring_id *ring_id)
> -{
> -	struct list_head *list;
> -	struct evs_pd *evs_pd;
> -
> -	/*
> -	 * Build configuration change message
> -	 */
> -	res_evs_confchg_callback.header.size = sizeof (struct res_evs_confchg_callback);
> -	res_evs_confchg_callback.header.id = MESSAGE_RES_EVS_CONFCHG_CALLBACK;
> -	res_evs_confchg_callback.header.error = CS_OK;
> -
> -	memcpy (res_evs_confchg_callback.member_list,
> -		member_list, member_list_entries * sizeof(*member_list));
> -	res_evs_confchg_callback.member_list_entries = member_list_entries;
> -
> -	memcpy (res_evs_confchg_callback.left_list,
> -		left_list, left_list_entries * sizeof(*left_list));
> -	res_evs_confchg_callback.left_list_entries = left_list_entries;
> -
> -	memcpy (res_evs_confchg_callback.joined_list,
> -		joined_list, joined_list_entries * sizeof(*joined_list));
> -	res_evs_confchg_callback.joined_list_entries = joined_list_entries;
> -
> -	/*
> -	 * Send configuration change message to every EVS library user
> -	 */
> -	for (list = confchg_notify.next; list != &confchg_notify; list = list->next) {
> -		evs_pd = list_entry (list, struct evs_pd, list);
> -		api->ipc_dispatch_send (evs_pd->conn,
> -			&res_evs_confchg_callback,
> -			sizeof (res_evs_confchg_callback));
> -	}
> -}
> -
> -static int evs_lib_init_fn (void *conn)
> -{
> -	struct evs_pd *evs_pd = (struct evs_pd *)api->ipc_private_data_get (conn);
> -
> -	log_printf (LOGSYS_LEVEL_DEBUG, "Got request to initalize evs service.\n");
> -
> -	evs_pd->groups = NULL;
> -	evs_pd->group_entries = 0;
> -	evs_pd->conn = conn;
> -	list_init (&evs_pd->list);
> -	list_add (&evs_pd->list, &confchg_notify);
> -
> -	api->ipc_dispatch_send (conn, &res_evs_confchg_callback,
> -		sizeof (res_evs_confchg_callback));
> -
> -	return (0);
> -}
> -
> -static int evs_lib_exit_fn (void *conn)
> -{
> -    struct evs_pd *evs_pd = (struct evs_pd *)api->ipc_private_data_get (conn);
> -
> -	list_del (&evs_pd->list);
> -	return (0);
> -}
> -
> -static void message_handler_req_evs_join (void *conn, const void *msg)
> -{
> -	cs_error_t error = CS_OK;
> -	const struct req_lib_evs_join *req_lib_evs_join = msg;
> -	struct res_lib_evs_join res_lib_evs_join;
> -	void *addr;
> -	struct evs_pd *evs_pd = (struct evs_pd *)api->ipc_private_data_get (conn);
> -
> -	if (req_lib_evs_join->group_entries > 50) {
> -		error = CS_ERR_TOO_MANY_GROUPS;
> -		goto exit_error;
> -	}
> -
> -	addr = realloc (evs_pd->groups, sizeof (struct evs_group) *
> -		(evs_pd->group_entries + req_lib_evs_join->group_entries));
> -	if (addr == NULL) {
> -		error = CS_ERR_NO_MEMORY;
> -		goto exit_error;
> -	}
> -	evs_pd->groups = addr;
> -
> -	memcpy (&evs_pd->groups[evs_pd->group_entries],
> -		req_lib_evs_join->groups,
> -		sizeof (struct evs_group) * req_lib_evs_join->group_entries);
> -
> -	evs_pd->group_entries += req_lib_evs_join->group_entries;
> -
> -exit_error:
> -	res_lib_evs_join.header.size = sizeof (struct res_lib_evs_join);
> -	res_lib_evs_join.header.id = MESSAGE_RES_EVS_JOIN;
> -	res_lib_evs_join.header.error = error;
> -
> -	api->ipc_response_send (conn, &res_lib_evs_join,
> -		sizeof (struct res_lib_evs_join));
> -}
> -
> -static void message_handler_req_evs_leave (void *conn, const void *msg)
> -{
> -	const struct req_lib_evs_leave *req_lib_evs_leave = msg;
> -	struct res_lib_evs_leave res_lib_evs_leave;
> -	cs_error_t error = CS_OK;
> -	int i, j;
> -	int found;
> -	struct evs_pd *evs_pd = (struct evs_pd *)api->ipc_private_data_get (conn);
> -
> -	for (i = 0; i < req_lib_evs_leave->group_entries; i++) {
> -		found = 0;
> -		for (j = 0; j < evs_pd->group_entries;) {
> -
> -			if (memcmp (&req_lib_evs_leave->groups[i],
> -				&evs_pd->groups[j], sizeof (struct evs_group)) == 0) {
> -
> -				/*
> -				 * Delete entry
> -				 */
> -				memmove (&evs_pd->groups[j], &evs_pd->groups[j + 1],
> -					(evs_pd->group_entries - j - 1) * sizeof (struct evs_group));
> -
> -				evs_pd->group_entries -= 1;
> -
> -				found = 1;
> -				break;
> -			} else {
> -				j++;
> -			}
> -		}
> -		if (found == 0) {
> -			error = CS_ERR_NOT_EXIST;
> -			break;
> -		}
> -	}
> -
> -	res_lib_evs_leave.header.size = sizeof (struct res_lib_evs_leave);
> -	res_lib_evs_leave.header.id = MESSAGE_RES_EVS_LEAVE;
> -	res_lib_evs_leave.header.error = error;
> -
> -	api->ipc_response_send (conn, &res_lib_evs_leave,
> -		sizeof (struct res_lib_evs_leave));
> -}
> -
> -static void message_handler_req_evs_mcast_joined (void *conn, const void *msg)
> -{
> -	cs_error_t error = CS_ERR_TRY_AGAIN;
> -	const struct req_lib_evs_mcast_joined *req_lib_evs_mcast_joined = msg;
> -	struct res_lib_evs_mcast_joined res_lib_evs_mcast_joined;
> -	struct iovec req_exec_evs_mcast_iovec[3];
> -	struct req_exec_evs_mcast req_exec_evs_mcast;
> -	int res;
> -	struct evs_pd *evs_pd = (struct evs_pd *)api->ipc_private_data_get (conn);
> -
> -	req_exec_evs_mcast.header.size = sizeof (struct req_exec_evs_mcast) +
> -		evs_pd->group_entries * sizeof (struct evs_group) +
> -		req_lib_evs_mcast_joined->msg_len;
> -
> -	req_exec_evs_mcast.header.id =
> -		SERVICE_ID_MAKE (EVS_SERVICE, MESSAGE_REQ_EXEC_EVS_MCAST);
> -	req_exec_evs_mcast.msg_len = req_lib_evs_mcast_joined->msg_len;
> -	req_exec_evs_mcast.group_entries = evs_pd->group_entries;
> -
> -	req_exec_evs_mcast_iovec[0].iov_base = (char *)&req_exec_evs_mcast;
> -	req_exec_evs_mcast_iovec[0].iov_len = sizeof (req_exec_evs_mcast);
> -	req_exec_evs_mcast_iovec[1].iov_base = (char *)evs_pd->groups;
> -	req_exec_evs_mcast_iovec[1].iov_len = evs_pd->group_entries * sizeof (struct evs_group);
> -	req_exec_evs_mcast_iovec[2].iov_base = (char *)&req_lib_evs_mcast_joined->msg;
> -	req_exec_evs_mcast_iovec[2].iov_len = req_lib_evs_mcast_joined->msg_len;
> -
> -	res = api->totem_mcast (req_exec_evs_mcast_iovec, 3, TOTEM_AGREED);
> -		// TODO
> -	if (res == 0) {
> -		error = CS_OK;
> -	}
> -
> -	res_lib_evs_mcast_joined.header.size = sizeof (struct res_lib_evs_mcast_joined);
> -	res_lib_evs_mcast_joined.header.id = MESSAGE_RES_EVS_MCAST_JOINED;
> -	res_lib_evs_mcast_joined.header.error = error;
> -
> -	api->ipc_response_send (conn, &res_lib_evs_mcast_joined,
> -		sizeof (struct res_lib_evs_mcast_joined));
> -}
> -
> -static void message_handler_req_evs_mcast_groups (void *conn, const void *msg)
> -{
> -	cs_error_t error = CS_ERR_TRY_AGAIN;
> -	const struct req_lib_evs_mcast_groups *req_lib_evs_mcast_groups = msg;
> -	struct res_lib_evs_mcast_groups res_lib_evs_mcast_groups;
> -	struct iovec req_exec_evs_mcast_iovec[3];
> -	struct req_exec_evs_mcast req_exec_evs_mcast;
> -	const char *msg_addr;
> -	int res;
> -
> -	req_exec_evs_mcast.header.size = sizeof (struct req_exec_evs_mcast) +
> -		sizeof (struct evs_group) * req_lib_evs_mcast_groups->group_entries +
> -		req_lib_evs_mcast_groups->msg_len;
> -
> -	req_exec_evs_mcast.header.id =
> -		SERVICE_ID_MAKE (EVS_SERVICE, MESSAGE_REQ_EXEC_EVS_MCAST);
> -	req_exec_evs_mcast.msg_len = req_lib_evs_mcast_groups->msg_len;
> -	req_exec_evs_mcast.group_entries = req_lib_evs_mcast_groups->group_entries;
> -
> -	msg_addr = (const char *)req_lib_evs_mcast_groups +
> -		sizeof (struct req_lib_evs_mcast_groups) +
> -		(sizeof (struct evs_group) * req_lib_evs_mcast_groups->group_entries);
> -
> -	req_exec_evs_mcast_iovec[0].iov_base = (char *)&req_exec_evs_mcast;
> -	req_exec_evs_mcast_iovec[0].iov_len = sizeof (req_exec_evs_mcast);
> -	req_exec_evs_mcast_iovec[1].iov_base = (char *)&req_lib_evs_mcast_groups->groups;
> -	req_exec_evs_mcast_iovec[1].iov_len = sizeof (struct evs_group) * req_lib_evs_mcast_groups->group_entries;
> -	req_exec_evs_mcast_iovec[2].iov_base = (void *) msg_addr; /* discard const */
> -	req_exec_evs_mcast_iovec[2].iov_len = req_lib_evs_mcast_groups->msg_len;
> -
> -	res = api->totem_mcast (req_exec_evs_mcast_iovec, 3, TOTEM_AGREED);
> -	if (res == 0) {
> -		error = CS_OK;
> -	}
> -
> -	res_lib_evs_mcast_groups.header.size = sizeof (struct res_lib_evs_mcast_groups);
> -	res_lib_evs_mcast_groups.header.id = MESSAGE_RES_EVS_MCAST_GROUPS;
> -	res_lib_evs_mcast_groups.header.error = error;
> -
> -	api->ipc_response_send (conn, &res_lib_evs_mcast_groups,
> -		sizeof (struct res_lib_evs_mcast_groups));
> -}
> -
> -static void message_handler_req_evs_membership_get (void *conn, const void *msg)
> -{
> -	struct res_lib_evs_membership_get res_lib_evs_membership_get;
> -
> -	res_lib_evs_membership_get.header.size = sizeof (struct res_lib_evs_membership_get);
> -	res_lib_evs_membership_get.header.id = MESSAGE_RES_EVS_MEMBERSHIP_GET;
> -	res_lib_evs_membership_get.header.error = CS_OK;
> -	res_lib_evs_membership_get.local_nodeid = api->totem_nodeid_get ();
> -	memcpy (&res_lib_evs_membership_get.member_list,
> -		&res_evs_confchg_callback.member_list,
> -		sizeof (res_lib_evs_membership_get.member_list));
> -
> -	res_lib_evs_membership_get.member_list_entries =
> -		res_evs_confchg_callback.member_list_entries;
> -
> -	api->ipc_response_send (conn, &res_lib_evs_membership_get,
> -		sizeof (struct res_lib_evs_membership_get));
> -}
> -
> -static void req_exec_mcast_endian_convert (void *msg)
> -{
> -	struct req_exec_evs_mcast *req_exec_evs_mcast =
> -		(struct req_exec_evs_mcast *)msg;
> -	req_exec_evs_mcast->group_entries =
> -		swab32 (req_exec_evs_mcast->group_entries);
> -	req_exec_evs_mcast->msg_len = swab32 (req_exec_evs_mcast->msg_len);
> -}
> -
> -static void message_handler_req_exec_mcast (
> -	const void *msg,
> -	unsigned int nodeid)
> -{
> -	const struct req_exec_evs_mcast *req_exec_evs_mcast = msg;
> -	struct res_evs_deliver_callback res_evs_deliver_callback;
> -	const char *msg_addr;
> -	struct list_head *list;
> -	int found = 0;
> -	int i, j;
> -	struct evs_pd *evs_pd;
> -	struct iovec iov[2];
> -
> -	res_evs_deliver_callback.header.size = sizeof (struct res_evs_deliver_callback) +
> -		req_exec_evs_mcast->msg_len;
> -	res_evs_deliver_callback.header.id = MESSAGE_RES_EVS_DELIVER_CALLBACK;
> -	res_evs_deliver_callback.header.error = CS_OK;
> -	res_evs_deliver_callback.msglen = req_exec_evs_mcast->msg_len;
> -
> -	msg_addr = (const char *)req_exec_evs_mcast + sizeof (struct req_exec_evs_mcast) +
> -		(sizeof (struct evs_group) * req_exec_evs_mcast->group_entries);
> -
> -	for (list = confchg_notify.next; list != &confchg_notify; list = list->next) {
> -		found = 0;
> -		evs_pd = list_entry (list, struct evs_pd, list);
> -
> -		for (i = 0; i < evs_pd->group_entries; i++) {
> -			for (j = 0; j < req_exec_evs_mcast->group_entries; j++) {
> -
> -				if (memcmp (&evs_pd->groups[i], &req_exec_evs_mcast->groups[j],
> -					sizeof (struct evs_group)) == 0) {
> -
> -					found = 1;
> -					break;
> -				}
> -			}
> -			if (found) {
> -				break;
> -			}
> -		}
> -
> -		if (found) {
> -			res_evs_deliver_callback.local_nodeid = nodeid;
> -			iov[0].iov_base = (void *)&res_evs_deliver_callback;
> -			iov[0].iov_len = sizeof (struct res_evs_deliver_callback);
> -			iov[1].iov_base = (void *) msg_addr; /* discard const */
> -			iov[1].iov_len = req_exec_evs_mcast->msg_len;
> -
> -			api->ipc_dispatch_iov_send (
> -				evs_pd->conn,
> -				iov,
> -				2);
> -		}
> -	}
> -}
> diff --git a/services/mon.c b/services/mon.c
> deleted file mode 100644
> index 42735b0..0000000
> --- a/services/mon.c
> +++ /dev/null
> @@ -1,548 +0,0 @@
> -/*
> - * Copyright (c) 2010 Red Hat, Inc.
> - *
> - * All rights reserved.
> - *
> - * Author: Angus Salkeld <asalkeld@xxxxxxxxxx>
> - *
> - * This software licensed under BSD license, the text of which follows:
> - *
> - * Redistribution and use in source and binary forms, with or without
> - * modification, are permitted provided that the following conditions are met:
> - *
> - * - Redistributions of source code must retain the above copyright notice,
> - *   this list of conditions and the following disclaimer.
> - * - Redistributions in binary form must reproduce the above copyright notice,
> - *   this list of conditions and the following disclaimer in the documentation
> - *   and/or other materials provided with the distribution.
> - * - Neither the name of the MontaVista Software, Inc. nor the names of its
> - *   contributors may be used to endorse or promote products derived from this
> - *   software without specific prior written permission.
> - *
> - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
> - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
> - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
> - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
> - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
> - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
> - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
> - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
> - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
> - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
> - * THE POSSIBILITY OF SUCH DAMAGE.
> - */
> -
> -#include <config.h>
> -
> -#include <unistd.h>
> -#if defined(HAVE_LIBSTATGRAB)
> -#include <statgrab.h>
> -#endif
> -
> -#include <corosync/corotypes.h>
> -#include <corosync/corodefs.h>
> -#include <corosync/lcr/lcr_comp.h>
> -#include <corosync/coroapi.h>
> -#include <corosync/list.h>
> -#include <corosync/logsys.h>
> -#include <corosync/icmap.h>
> -#include "../exec/fsm.h"
> -
> -
> -LOGSYS_DECLARE_SUBSYS ("MON");
> -
> -/*
> - * Service Interfaces required by service_message_handler struct
> - */
> -static int mon_exec_init_fn (
> -	struct corosync_api_v1 *corosync_api);
> -
> -static struct corosync_api_v1 *api;
> -#define MON_DEFAULT_PERIOD 3000
> -#define MON_MIN_PERIOD 500
> -#define MON_MAX_PERIOD (120 * CS_TIME_MS_IN_SEC)
> -
> -struct corosync_service_engine mon_service_engine = {
> -	.name			= "corosync resource monitoring service",
> -	.id			= MON_SERVICE,
> -	.priority		= 1,
> -	.private_data_size	= 0,
> -	.flow_control		= CS_LIB_FLOW_CONTROL_NOT_REQUIRED,
> -	.lib_init_fn		= NULL,
> -	.lib_exit_fn		= NULL,
> -	.lib_engine		= NULL,
> -	.lib_engine_count	= 0,
> -	.exec_engine		= NULL,
> -	.exec_engine_count	= 0,
> -	.confchg_fn		= NULL,
> -	.exec_init_fn		= mon_exec_init_fn,
> -	.exec_dump_fn		= NULL,
> -	.sync_mode		= CS_SYNC_V2
> -};
> -
> -static DECLARE_LIST_INIT (confchg_notify);
> -
> -
> -struct resource_instance {
> -	const char *icmap_path;
> -	const char *name;
> -	corosync_timer_handle_t timer_handle;
> -	void (*update_stats_fn) (void *data);
> -	struct cs_fsm fsm;
> -	uint64_t period;
> -	icmap_value_types_t max_type;
> -	union {
> -		int32_t int32;
> -		double dbl;
> -	} max;
> -};
> -
> -static void mem_update_stats_fn (void *data);
> -static void load_update_stats_fn (void *data);
> -
> -static struct resource_instance memory_used_inst = {
> -	.name = "memory_used",
> -	.icmap_path = "resources.system.memory_used.",
> -	.update_stats_fn = mem_update_stats_fn,
> -	.max_type = ICMAP_VALUETYPE_INT32,
> -	.max.int32 = INT32_MAX,
> -	.period = MON_DEFAULT_PERIOD,
> -};
> -
> -static struct resource_instance load_15min_inst = {
> -	.name = "load_15min",
> -	.icmap_path = "resources.system.load_15min.",
> -	.update_stats_fn = load_update_stats_fn,
> -	.max_type = ICMAP_VALUETYPE_DOUBLE,
> -	.max.dbl = INT32_MAX,
> -	.period = MON_DEFAULT_PERIOD,
> -};
> -
> -
> -/*
> - * F S M
> - */
> -static void mon_config_changed (struct cs_fsm* fsm, int32_t event, void * data);
> -static void mon_resource_failed (struct cs_fsm* fsm, int32_t event, void * data);
> -
> -const char * mon_running_str = "running";
> -const char * mon_failed_str = "failed";
> -const char * mon_failure_str = "failure";
> -const char * mon_stopped_str = "stopped";
> -const char * mon_config_changed_str = "config_changed";
> -
> -enum mon_resource_state {
> -	MON_S_STOPPED,
> -	MON_S_RUNNING,
> -	MON_S_FAILED
> -};
> -enum mon_resource_event {
> -	MON_E_CONFIG_CHANGED,
> -	MON_E_FAILURE
> -};
> -
> -struct cs_fsm_entry mon_fsm_table[] = {
> -	{ MON_S_STOPPED, MON_E_CONFIG_CHANGED,	mon_config_changed,	{MON_S_STOPPED, MON_S_RUNNING, -1} },
> -	{ MON_S_STOPPED, MON_E_FAILURE,		NULL,			{-1} },
> -	{ MON_S_RUNNING, MON_E_CONFIG_CHANGED,	mon_config_changed,	{MON_S_RUNNING, MON_S_STOPPED, -1} },
> -	{ MON_S_RUNNING, MON_E_FAILURE,		mon_resource_failed,	{MON_S_FAILED, -1} },
> -	{ MON_S_FAILED,  MON_E_CONFIG_CHANGED,	mon_config_changed,	{MON_S_RUNNING, MON_S_STOPPED, -1} },
> -	{ MON_S_FAILED,  MON_E_FAILURE,		NULL,			{-1} },
> -};
> -
> -/*
> - * Dynamic loading descriptor
> - */
> -
> -static struct corosync_service_engine *mon_get_service_engine_ver0 (void);
> -
> -static struct corosync_service_engine_iface_ver0 mon_service_engine_iface = {
> -	.corosync_get_service_engine_ver0	= mon_get_service_engine_ver0
> -};
> -
> -static struct lcr_iface corosync_mon_ver0[1] = {
> -	{
> -		.name			= "corosync_mon",
> -		.version		= 0,
> -		.versions_replace	= 0,
> -		.versions_replace_count = 0,
> -		.dependencies		= 0,
> -		.dependency_count	= 0,
> -		.constructor		= NULL,
> -		.destructor		= NULL,
> -		.interfaces		= NULL,
> -	}
> -};
> -
> -static struct lcr_comp mon_comp_ver0 = {
> -	.iface_count	= 1,
> -	.ifaces		= corosync_mon_ver0
> -};
> -
> -static struct corosync_service_engine *mon_get_service_engine_ver0 (void)
> -{
> -	return (&mon_service_engine);
> -}
> -
> -#ifdef COROSYNC_SOLARIS
> -void corosync_lcr_component_register (void);
> -
> -void corosync_lcr_component_register (void) {
> -#else
> -__attribute__ ((constructor)) static void corosync_lcr_component_register (void) {
> -#endif
> -	lcr_interfaces_set (&corosync_mon_ver0[0], &mon_service_engine_iface);
> -
> -	lcr_component_register (&mon_comp_ver0);
> -}
> -
> -static const char * mon_res_state_to_str(struct cs_fsm* fsm,
> -	int32_t state)
> -{
> -	switch (state) {
> -	case MON_S_STOPPED:
> -		return mon_stopped_str;
> -		break;
> -	case MON_S_RUNNING:
> -		return mon_running_str;
> -		break;
> -	case MON_S_FAILED:
> -		return mon_failed_str;
> -		break;
> -	}
> -	return NULL;
> -}
> -
> -static const char * mon_res_event_to_str(struct cs_fsm* fsm,
> -	int32_t event)
> -{
> -	switch (event) {
> -	case MON_E_CONFIG_CHANGED:
> -		return mon_config_changed_str;
> -		break;
> -	case MON_E_FAILURE:
> -		return mon_failure_str;
> -		break;
> -	}
> -	return NULL;
> -}
> -
> -static void mon_fsm_state_set (struct cs_fsm* fsm,
> -	enum mon_resource_state next_state, struct resource_instance* inst)
> -{
> -	enum mon_resource_state prev_state = fsm->curr_state;
> -	const char *state_str;
> -	char key_name[ICMAP_KEYNAME_MAXLEN];
> -
> -	ENTER();
> -
> -	cs_fsm_state_set(fsm, next_state, inst);
> -
> -	if (prev_state == fsm->curr_state) {
> -		return;
> -	}
> -	state_str = mon_res_state_to_str(fsm, fsm->curr_state);
> -
> -	snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "%s%s", inst->icmap_path, "state");
> -	icmap_set_string(key_name, state_str);
> -}
> -
> -
> -static void mon_config_changed (struct cs_fsm* fsm, int32_t event, void * data)
> -{
> -	struct resource_instance * inst = (struct resource_instance *)data;
> -	uint64_t tmp_value;
> -	char key_name[ICMAP_KEYNAME_MAXLEN];
> -	int run_updater;
> -
> -	ENTER();
> -
> -	snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "%s%s", inst->icmap_path, "poll_period");
> -	if (icmap_get_uint64(key_name, &tmp_value) == CS_OK) {
> -		if (tmp_value >= MON_MIN_PERIOD && tmp_value <= MON_MAX_PERIOD) {
> -			log_printf (LOGSYS_LEVEL_DEBUG,
> -				"poll_period changing from:%"PRIu64" to %"PRIu64".",
> -				inst->period, tmp_value);
> -			inst->period = tmp_value;
> -		} else {
> -			log_printf (LOGSYS_LEVEL_WARNING,
> -				"Could NOT use poll_period:%"PRIu64" ms for resource %s",
> -				tmp_value, inst->name);
> -		}
> -	}
> -
> -	if (inst->timer_handle) {
> -		api->timer_delete(inst->timer_handle);
> -		inst->timer_handle = 0;
> -	}
> -
> -	run_updater = 0;
> -
> -	snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "%s%s", inst->icmap_path, "max");
> -	if (inst->max_type == ICMAP_VALUETYPE_INT32) {
> -		if (icmap_get_int32(key_name, &inst->max.int32) != CS_OK) {
> -			inst->max.int32 = INT32_MAX;
> -
> -			mon_fsm_state_set (fsm, MON_S_STOPPED, inst);
> -		} else {
> -			run_updater = 1;
> -		}
> -	}
> -	if (inst->max_type == ICMAP_VALUETYPE_DOUBLE) {
> -		if (icmap_get_double(key_name, &inst->max.dbl) != CS_OK) {
> -			inst->max.dbl = INT32_MAX;
> -
> -			mon_fsm_state_set (fsm, MON_S_STOPPED, inst);
> -		} else {
> -			run_updater = 1;
> -		}
> -	}
> -
> -	if (run_updater) {
> -		mon_fsm_state_set (fsm, MON_S_RUNNING, inst);
> -		/*
> -		 * run the updater, incase the period has shortened
> -		 * and to start the timer.
> -		 */
> -		inst->update_stats_fn (inst);
> -	}
> -}
> -
> -void mon_resource_failed (struct cs_fsm* fsm, int32_t event, void * data)
> -{
> -	struct resource_instance * inst = (struct resource_instance *)data;
> -	ENTER();
> -	mon_fsm_state_set (fsm, MON_S_FAILED, inst);
> -}
> -
> -static int32_t percent_mem_used_get(void)
> -{
> -#if defined(HAVE_LIBSTATGRAB)
> -	sg_mem_stats *mem_stats;
> -	sg_swap_stats *swap_stats;
> -	long long total, freemem;
> -
> -	mem_stats = sg_get_mem_stats();
> -	swap_stats = sg_get_swap_stats();
> -
> -	if (mem_stats == NULL || swap_stats != NULL) {
> -		log_printf (LOGSYS_LEVEL_ERROR, "Unable to get memory stats: %s\n",
> -			sg_str_error(sg_get_error()));
> -		return -1;
> -	}
> -	total = mem_stats->total + swap_stats->total;
> -	freemem = mem_stats->free + swap_stats->free;
> -	return ((total - freemem) * 100) / total;
> -#else
> -#if defined(COROSYNC_LINUX)
> -	char *line_ptr;
> -	char line[512];
> -	unsigned long long value;
> -	FILE *f;
> -	long long total = 0;
> -	long long freemem = 0;
> -
> -	if ((f = fopen("/proc/meminfo", "r")) == NULL) {
> -		return -1;
> -	}
> -
> -	while ((line_ptr = fgets(line, sizeof(line), f)) != NULL) {
> -		if (sscanf(line_ptr, "%*s %llu kB", &value) != 1) {
> -			continue;
> -		}
> -		value *= 1024;
> -
> -		if (strncmp(line_ptr, "MemTotal:", 9) == 0) {
> -			total += value;
> -		} else if (strncmp(line_ptr, "MemFree:", 8) == 0) {
> -			freemem += value;
> -		} else if (strncmp(line_ptr, "SwapTotal:", 10) == 0) {
> -			total += value;
> -		} else if (strncmp(line_ptr, "SwapFree:", 9) == 0) {
> -			freemem += value;
> -		}
> -	}
> -
> -	fclose(f);
> -	return ((total - freemem) * 100) / total;
> -#else
> -#error need libstatgrab or linux.
> -#endif /* COROSYNC_LINUX */
> -#endif /* HAVE_LIBSTATGRAB */
> -}
> -
> -
> -static void mem_update_stats_fn (void *data)
> -{
> -	struct resource_instance * inst = (struct resource_instance *)data;
> -	int32_t new_value;
> -	uint64_t timestamp;
> -	char key_name[ICMAP_KEYNAME_MAXLEN];
> -
> -	new_value = percent_mem_used_get();
> -	fprintf(stderr,"BLA = %u\n", new_value);
> -	if (new_value > 0) {
> -		snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "%s%s", inst->icmap_path, "current");
> -		icmap_set_uint32(key_name, new_value);
> -
> -		timestamp = cs_timestamp_get();
> -
> -		snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "%s%s", inst->icmap_path, "last_updated");
> -		icmap_set_uint64(key_name, timestamp);
> -
> -		if (new_value > inst->max.int32 && inst->fsm.curr_state != MON_S_FAILED) {
> -			cs_fsm_process (&inst->fsm, MON_E_FAILURE, inst);
> -		}
> -	}
> -	api->timer_add_duration(inst->period * MILLI_2_NANO_SECONDS,
> -		inst, inst->update_stats_fn, &inst->timer_handle);
> -}
> -
> -static double min15_loadavg_get(void)
> -{
> -#if defined(HAVE_LIBSTATGRAB)
> -	sg_load_stats *load_stats;
> -	load_stats = sg_get_load_stats ();
> -	if (load_stats == NULL) {
> -		log_printf (LOGSYS_LEVEL_ERROR, "Unable to get load stats: %s\n",
> -			sg_str_error (sg_get_error()));
> -		return -1;
> -	}
> -	return load_stats->min15;
> -#else
> -#if defined(COROSYNC_LINUX)
> -	double loadav[3];
> -	if (getloadavg(loadav,3) < 0) {
> -		return -1;
> -	}
> -	return loadav[2];
> -#else
> -#error need libstatgrab or linux.
> -#endif /* COROSYNC_LINUX */
> -#endif /* HAVE_LIBSTATGRAB */
> -}
> -
> -static void load_update_stats_fn (void *data)
> -{
> -	struct resource_instance * inst = (struct resource_instance *)data;
> -	uint64_t timestamp;
> -	char key_name[ICMAP_KEYNAME_MAXLEN];
> -	double min15 = min15_loadavg_get();
> -
> -	if (min15 > 0) {
> -		snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "%s%s", inst->icmap_path, "current");
> -		icmap_set_double(key_name, min15);
> -
> -		timestamp = cs_timestamp_get();
> -
> -		snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "%s%s", inst->icmap_path, "last_updated");
> -		icmap_set_uint64(key_name, timestamp);
> -
> -		if (min15 > inst->max.dbl && inst->fsm.curr_state != MON_S_FAILED) {
> -			cs_fsm_process (&inst->fsm, MON_E_FAILURE, &inst);
> -		}
> -	}
> -
> -	api->timer_add_duration(inst->period * MILLI_2_NANO_SECONDS,
> -		inst, inst->update_stats_fn, &inst->timer_handle);
> -}
> -
> -static void mon_key_changed_cb (
> -	int32_t event,
> -	const char *key_name,
> -	struct icmap_notify_value new_value,
> -	struct icmap_notify_value old_value,
> -	void *user_data)
> -{
> -	struct resource_instance* inst = (struct resource_instance*)user_data;
> -	char *last_key_part;
> -
> -	if (event == ICMAP_TRACK_DELETE && inst) {
> -		log_printf (LOGSYS_LEVEL_WARNING,
> -			"resource \"%s\" deleted from cmap!",
> -			inst->name);
> -
> -		cs_fsm_process (&inst->fsm, MON_E_CONFIG_CHANGED, inst);
> -	}
> -
> -	if (event == ICMAP_TRACK_MODIFY) {
> -		last_key_part = strrchr(key_name, '.');
> -		if (last_key_part == NULL)
> -			return ;
> -
> -		last_key_part++;
> -		if (strcmp(last_key_part, "max") == 0 ||
> -		    strcmp(last_key_part, "poll_period") == 0) {
> -			ENTER();
> -			cs_fsm_process (&inst->fsm, MON_E_CONFIG_CHANGED, inst);
> -		}
> -	}
> -}
> -
> -static void mon_instance_init (struct resource_instance* inst)
> -{
> -	uint64_t tmp_value;
> -	char key_name[ICMAP_KEYNAME_MAXLEN];
> -	icmap_track_t icmap_track;
> -
> -	snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "%s%s", inst->icmap_path, "current");
> -	if (inst->max_type == ICMAP_VALUETYPE_INT32) {
> -		icmap_set_int32(key_name, 0);
> -	} else {
> -		icmap_set_double(key_name, 0);
> -	}
> -
> -	snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "%s%s", inst->icmap_path, "last_updated");
> -	icmap_set_uint64(key_name, 0);
> -
> -	snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "%s%s", inst->icmap_path, "state");
> -	icmap_set_string(key_name, mon_stopped_str);
> -
> -	inst->fsm.name = inst->name;
> -	inst->fsm.curr_entry = 0;
> -	inst->fsm.curr_state = MON_S_STOPPED;
> -	inst->fsm.table = mon_fsm_table;
> -	inst->fsm.entries = sizeof(mon_fsm_table) / sizeof(struct cs_fsm_entry);
> -	inst->fsm.state_to_str = mon_res_state_to_str;
> -	inst->fsm.event_to_str = mon_res_event_to_str;
> -
> -	snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "%s%s", inst->icmap_path, "poll_period");
> -	if (icmap_get_uint64(key_name, &tmp_value) != CS_OK) {
> -		icmap_set_uint64(key_name, inst->period);
> -	}
> -	else {
> -		if (tmp_value >= MON_MIN_PERIOD && tmp_value <= MON_MAX_PERIOD) {
> -			inst->period = tmp_value;
> -		} else {
> -			log_printf (LOGSYS_LEVEL_WARNING,
> -				"Could NOT use poll_period:%"PRIu64" ms for resource %s",
> -				tmp_value, inst->name);
> -		}
> -	}
> -	cs_fsm_process (&inst->fsm, MON_E_CONFIG_CHANGED, inst);
> -
> -	icmap_track_add(inst->icmap_path,
> -			ICMAP_TRACK_ADD | ICMAP_TRACK_MODIFY | ICMAP_TRACK_DELETE | ICMAP_TRACK_PREFIX,
> -			mon_key_changed_cb, inst, &icmap_track);
> -}
> -
> -static int mon_exec_init_fn (
> -	struct corosync_api_v1 *corosync_api)
> -{
> -
> -#ifdef HAVE_LIBSTATGRAB
> -	sg_init();
> -#endif /* HAVE_LIBSTATGRAB */
> -
> -#ifdef COROSYNC_SOLARIS
> -	logsys_subsys_init();
> -#endif
> -	api = corosync_api;
> -
> -	mon_instance_init (&memory_used_inst);
> -	mon_instance_init (&load_15min_inst);
> -
> -	return 0;
> -}
> -
> -
> diff --git a/services/pload.c b/services/pload.c
> deleted file mode 100644
> index c8f64ce..0000000
> --- a/services/pload.c
> +++ /dev/null
> @@ -1,379 +0,0 @@
> -/*
> - * Copyright (c) 2008-2009 Red Hat, Inc.
> - *
> - * All rights reserved.
> - *
> - * Author: Steven Dake (sdake@xxxxxxxxxx)
> - *
> - * This software licensed under BSD license, the text of which follows:
> - *
> - * Redistribution and use in source and binary forms, with or without
> - * modification, are permitted provided that the following conditions are met:
> - *
> - * - Redistributions of source code must retain the above copyright notice,
> - *   this list of conditions and the following disclaimer.
> - * - Redistributions in binary form must reproduce the above copyright notice,
> - *   this list of conditions and the following disclaimer in the documentation
> - *   and/or other materials provided with the distribution.
> - * - Neither the name of the MontaVista Software, Inc. nor the names of its
> - *   contributors may be used to endorse or promote products derived from this
> - *   software without specific prior written permission.
> - *
> - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
> - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
> - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
> - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
> - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
> - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
> - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
> - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
> - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
> - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
> - * THE POSSIBILITY OF SUCH DAMAGE.
> - */
> -
> -#include <config.h>
> -
> -#include <sys/types.h>
> -#include <sys/socket.h>
> -#include <sys/un.h>
> -#include <sys/time.h>
> -#include <sys/ioctl.h>
> -#include <netinet/in.h>
> -#include <sys/uio.h>
> -#include <unistd.h>
> -#include <fcntl.h>
> -#include <stdlib.h>
> -#include <stdio.h>
> -#include <errno.h>
> -#include <time.h>
> -#include <netinet/in.h>
> -#include <arpa/inet.h>
> -#include <string.h>
> -#include <assert.h>
> -
> -#include <qb/qblist.h>
> -#include <qb/qbutil.h>
> -#include <qb/qbipc_common.h>
> -
> -#include <corosync/swab.h>
> -#include <corosync/corotypes.h>
> -#include <corosync/corodefs.h>
> -#include <corosync/lcr/lcr_comp.h>
> -#include <corosync/mar_gen.h>
> -#include <corosync/coroapi.h>
> -#include <corosync/ipc_pload.h>
> -#include <corosync/list.h>
> -#include <corosync/logsys.h>
> -
> -LOGSYS_DECLARE_SUBSYS ("PLOAD");
> -
> -enum pload_exec_message_req_types {
> -	MESSAGE_REQ_EXEC_PLOAD_START = 0,
> -	MESSAGE_REQ_EXEC_PLOAD_MCAST = 1
> -};
> -
> -/*
> - * Service Interfaces required by service_message_handler struct
> - */
> -static int pload_exec_init_fn (
> -	struct corosync_api_v1 *corosync_api);
> -
> -static void pload_confchg_fn (
> -	enum totem_configuration_type configuration_type,
> -	const unsigned int *member_list, size_t member_list_entries,
> -	const unsigned int *left_list, size_t left_list_entries,
> -	const unsigned int *joined_list, size_t joined_list_entries,
> -	const struct memb_ring_id *ring_id);
> -
> -static void message_handler_req_exec_pload_start (const void *msg,
> -						  unsigned int nodeid);
> -
> -static void message_handler_req_exec_pload_mcast (const void *msg,
> -						  unsigned int nodeid);
> -
> -static void req_exec_pload_start_endian_convert (void *msg);
> -
> -static void req_exec_pload_mcast_endian_convert (void *msg);
> -
> -static void message_handler_req_pload_start (void *conn, const void *msg);
> -
> -static int pload_lib_init_fn (void *conn);
> -
> -static int pload_lib_exit_fn (void *conn);
> -
> -static char buffer[1000000];
> -
> -static unsigned int msgs_delivered = 0;
> -
> -static unsigned int msgs_wanted = 0;
> -
> -static unsigned int msg_size = 0;
> -
> -static unsigned int msg_code = 1;
> -
> -static unsigned int msgs_sent = 0;
> -
> -
> -static struct corosync_api_v1 *api;
> -
> -struct req_exec_pload_start {
> -	struct qb_ipc_request_header header;
> -	unsigned int msg_code;
> -	unsigned int msg_count;
> -	unsigned int msg_size;
> -	unsigned int time_interval;
> -};
> -
> -struct req_exec_pload_mcast {
> -	struct qb_ipc_request_header header;
> -	unsigned int msg_code;
> -};
> -
> -static struct corosync_lib_handler pload_lib_engine[] =
> -{
> -	{ /* 0 */
> -		.lib_handler_fn		= message_handler_req_pload_start,
> -		.flow_control		= CS_LIB_FLOW_CONTROL_NOT_REQUIRED
> -	}
> -};
> -
> -static struct corosync_exec_handler pload_exec_engine[] =
> -{
> -	{
> -		.exec_handler_fn 	= message_handler_req_exec_pload_start,
> -		.exec_endian_convert_fn	= req_exec_pload_start_endian_convert
> -	},
> -	{
> -		.exec_handler_fn 	= message_handler_req_exec_pload_mcast,
> -		.exec_endian_convert_fn	= req_exec_pload_mcast_endian_convert
> -	}
> -};
> -
> -struct corosync_service_engine pload_service_engine = {
> -	.name			= "corosync profile loading service",
> -	.id			= PLOAD_SERVICE,
> -	.priority		= 1,
> -	.private_data_size	= 0,
> -	.flow_control		= CS_LIB_FLOW_CONTROL_REQUIRED,
> -	.lib_init_fn		= pload_lib_init_fn,
> -	.lib_exit_fn		= pload_lib_exit_fn,
> -	.lib_engine		= pload_lib_engine,
> -	.lib_engine_count	= sizeof (pload_lib_engine) / sizeof (struct corosync_lib_handler),
> -	.exec_engine		= pload_exec_engine,
> -	.exec_engine_count	= sizeof (pload_exec_engine) / sizeof (struct corosync_exec_handler),
> -	.confchg_fn		= pload_confchg_fn,
> -	.exec_init_fn		= pload_exec_init_fn,
> -	.exec_dump_fn		= NULL,
> -	.sync_mode		= CS_SYNC_V2
> -};
> -
> -static DECLARE_LIST_INIT (confchg_notify);
> -
> -/*
> - * Dynamic loading descriptor
> - */
> -
> -static struct corosync_service_engine *pload_get_service_engine_ver0 (void);
> -
> -static struct corosync_service_engine_iface_ver0 pload_service_engine_iface = {
> -	.corosync_get_service_engine_ver0	= pload_get_service_engine_ver0
> -};
> -
> -static struct lcr_iface corosync_pload_ver0[1] = {
> -	{
> -		.name			= "corosync_pload",
> -		.version		= 0,
> -		.versions_replace	= 0,
> -		.versions_replace_count = 0,
> -		.dependencies		= 0,
> -		.dependency_count	= 0,
> -		.constructor		= NULL,
> -		.destructor		= NULL,
> -		.interfaces		= NULL,
> -	}
> -};
> -
> -static struct lcr_comp pload_comp_ver0 = {
> -	.iface_count	= 1,
> -	.ifaces		= corosync_pload_ver0
> -};
> -
> -static struct corosync_service_engine *pload_get_service_engine_ver0 (void)
> -{
> -	return (&pload_service_engine);
> -}
> -
> -#ifdef COROSYNC_SOLARIS
> -void corosync_lcr_component_register (void);
> -
> -void corosync_lcr_component_register (void) {
> -#else
> -__attribute__ ((constructor)) static void corosync_lcr_component_register (void) {
> -#endif
> -	lcr_interfaces_set (&corosync_pload_ver0[0], &pload_service_engine_iface);
> -
> -	lcr_component_register (&pload_comp_ver0);
> -}
> -
> -static int pload_exec_init_fn (
> -	struct corosync_api_v1 *corosync_api)
> -{
> -#ifdef COROSYNC_SOLARIS
> -	logsys_subsys_init();
> -#endif
> -	api = corosync_api;
> -
> -	return 0;
> -}
> -
> -static void pload_confchg_fn (
> -	enum totem_configuration_type configuration_type,
> -	const unsigned int *member_list, size_t member_list_entries,
> -	const unsigned int *left_list, size_t left_list_entries,
> -	const unsigned int *joined_list, size_t joined_list_entries,
> -	const struct memb_ring_id *ring_id)
> -{
> -}
> -
> -static int pload_lib_init_fn (void *conn)
> -{
> -	return (0);
> -}
> -
> -static int pload_lib_exit_fn (void *conn)
> -{
> -	return (0);
> -}
> -
> -static void message_handler_req_pload_start (void *conn, const void *msg)
> -{
> -	const struct req_lib_pload_start *req_lib_pload_start = msg;
> -	struct req_exec_pload_start req_exec_pload_start;
> -	struct iovec iov;
> -
> -	req_exec_pload_start.header.id =
> -		SERVICE_ID_MAKE (PLOAD_SERVICE, MESSAGE_REQ_EXEC_PLOAD_START);
> -	req_exec_pload_start.msg_code = req_lib_pload_start->msg_code;
> -	req_exec_pload_start.msg_size = req_lib_pload_start->msg_size;
> -	req_exec_pload_start.msg_count = req_lib_pload_start->msg_count;
> -	req_exec_pload_start.time_interval = req_lib_pload_start->time_interval;
> -	iov.iov_base = (void *)&req_exec_pload_start;
> -	iov.iov_len = sizeof (struct req_exec_pload_start);
> -
> -	msgs_delivered = 0;
> -
> -	msgs_wanted = 0;
> -
> -	msgs_sent = 0;
> -
> -	api->totem_mcast (&iov, 1, TOTEM_AGREED);
> -}
> -
> -static void req_exec_pload_start_endian_convert (void *msg)
> -{
> -}
> -
> -static void req_exec_pload_mcast_endian_convert (void *msg)
> -{
> -}
> -
> -static int send_message (const void *arg)
> -{
> -	struct req_exec_pload_mcast req_exec_pload_mcast;
> -	struct iovec iov[2];
> -	unsigned int res;
> -	unsigned int iov_len = 1;
> -
> -	req_exec_pload_mcast.header.id =
> -		SERVICE_ID_MAKE (PLOAD_SERVICE, MESSAGE_REQ_EXEC_PLOAD_MCAST);
> -	req_exec_pload_mcast.header.size = sizeof (struct req_exec_pload_mcast) + msg_size;
> -
> -	iov[0].iov_base = (void *)&req_exec_pload_mcast;
> -	iov[0].iov_len = sizeof (struct req_exec_pload_mcast);
> -	if (msg_size > sizeof (req_exec_pload_mcast)) {
> -		iov[1].iov_base = buffer;
> -		iov[1].iov_len = msg_size - sizeof (req_exec_pload_mcast);
> -		iov_len = 2;
> -	}
> -
> -	do {
> -		res = api->totem_mcast (iov, iov_len, TOTEM_AGREED);
> -		if (res == -1) {
> -			break;
> -		} else {
> -			msgs_sent++;
> -			msg_code++;
> -		}
> -	} while (msgs_sent < msgs_wanted);
> -	if (msgs_sent == msgs_wanted) {
> -		return (0);
> -	} else {
> -		return (-1);
> -	}
> -}
> -
> -hdb_handle_t start_mcasting_handle;
> -
> -static void start_mcasting (void)
> -{
> -	api->schedwrk_create (
> -		&start_mcasting_handle,
> -		send_message,
> -		&start_mcasting_handle);
> -}
> -
> -static void message_handler_req_exec_pload_start (
> -	const void *msg,
> -	unsigned int nodeid)
> -{
> -	const struct req_exec_pload_start *req_exec_pload_start = msg;
> -
> -	msgs_wanted = req_exec_pload_start->msg_count;
> -	msg_size = req_exec_pload_start->msg_size;
> -	msg_code = req_exec_pload_start->msg_code;
> -
> -	start_mcasting ();
> -}
> -#ifndef timersub
> -#define timersub(a, b, result)					\
> -do {								\
> -	(result)->tv_sec = (a)->tv_sec - (b)->tv_sec;		\
> -	(result)->tv_usec = (a)->tv_usec - (b)->tv_usec;	\
> -	if ((result)->tv_usec < 0) {				\
> -		--(result)->tv_sec;				\
> -		(result)->tv_usec += 1000000;			\
> -	}							\
> -} while (0)
> -#endif /* timersub */
> -
> -unsigned long long int tv1;
> -unsigned long long int tv2;
> -unsigned long long int tv_elapsed;
> -int last_msg_no = 0;
> -
> -static void message_handler_req_exec_pload_mcast (
> -	const void *msg,
> -	unsigned int nodeid)
> -{
> -	const struct req_exec_pload_mcast *pload_mcast = msg;
> -	char log_buffer[1024];
> -
> -	last_msg_no = pload_mcast->msg_code;
> -	if (msgs_delivered == 0) {
> -		tv1 = qb_util_nano_current_get ();
> -	}
> -	msgs_delivered += 1;
> -	if (msgs_delivered == msgs_wanted) {
> -		tv2 = qb_util_nano_current_get ();
> -		tv_elapsed = tv2 - tv1;
> -		sprintf (log_buffer, "%5d Writes %d bytes per write %7.3f seconds runtime, %9.3f TP/S, %9.3f MB/S.\n",
> -			msgs_delivered,
> -			msg_size,
> -			(tv_elapsed / 1000000000.0),
> -			((float)msgs_delivered) /  (tv_elapsed / 1000000000.0),
> -			(((float)msgs_delivered) * ((float)msg_size) /
> -				(tv_elapsed / 1000000000.0)) / (1024.0 * 1024.0));
> -		log_printf (LOGSYS_LEVEL_NOTICE, "%s", log_buffer);
> -	}
> -}
> diff --git a/services/testquorum.c b/services/testquorum.c
> deleted file mode 100644
> index 7f35159..0000000
> --- a/services/testquorum.c
> +++ /dev/null
> @@ -1,156 +0,0 @@
> -/*
> - * Copyright (c) 2008, 2009 Red Hat, Inc.
> - *
> - * All rights reserved.
> - *
> - * Author: Christine Caulfield (ccaulfie@xxxxxxxxxx)
> - *
> - * This software licensed under BSD license, the text of which follows:
> - *
> - * Redistribution and use in source and binary forms, with or without
> - * modification, are permitted provided that the following conditions are met:
> - *
> - * - Redistributions of source code must retain the above copyright notice,
> - *   this list of conditions and the following disclaimer.
> - * - Redistributions in binary form must reproduce the above copyright notice,
> - *   this list of conditions and the following disclaimer in the documentation
> - *   and/or other materials provided with the distribution.
> - * - Neither the name of Red Hat, Inc. nor the names of its
> - *   contributors may be used to endorse or promote products derived from this
> - *   software without specific prior written permission.
> - *
> - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
> - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
> - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
> - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
> - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
> - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
> - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
> - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
> - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
> - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
> - * THE POSSIBILITY OF SUCH DAMAGE.
> - */
> -
> -#include <config.h>
> -
> -#include <pwd.h>
> -#include <grp.h>
> -#include <sys/types.h>
> -#include <sys/poll.h>
> -#include <sys/uio.h>
> -#include <sys/mman.h>
> -#include <sys/socket.h>
> -#include <sys/un.h>
> -#include <sys/time.h>
> -#include <sys/resource.h>
> -#include <netinet/in.h>
> -#include <arpa/inet.h>
> -#include <unistd.h>
> -#include <fcntl.h>
> -#include <stdlib.h>
> -#include <stdio.h>
> -#include <errno.h>
> -#include <sched.h>
> -#include <time.h>
> -
> -#include <corosync/corotypes.h>
> -#include <qb/qbipc_common.h>
> -#include <corosync/corodefs.h>
> -#include <corosync/logsys.h>
> -#include <corosync/icmap.h>
> -
> -#include <corosync/mar_gen.h>
> -#include <corosync/lcr/lcr_comp.h>
> -#include <corosync/coroapi.h>
> -
> -#include <corosync/engine/quorum.h>
> -
> -LOGSYS_DECLARE_SUBSYS ("TEST");
> -
> -static void test_init(struct corosync_api_v1 *api, quorum_set_quorate_fn_t report);
> -
> -/*
> - * lcrso object definition
> - */
> -static struct quorum_services_api_ver1 test_quorum_iface_ver0 = {
> -	.init				= test_init
> -};
> -
> -static struct lcr_iface corosync_test_quorum_ver0[1] = {
> -	{
> -		.name			= "testquorum",
> -		.version		= 0,
> -		.versions_replace	= 0,
> -		.versions_replace_count	= 0,
> -		.dependencies		= 0,
> -		.dependency_count	= 0,
> -		.constructor		= NULL,
> -		.destructor		= NULL,
> -		.interfaces		= (void **)(void *)&test_quorum_iface_ver0,
> -	},
> -};
> -
> -static struct lcr_comp test_quorum_comp_ver0 = {
> -	.iface_count			= 1,
> -	.ifaces				= corosync_test_quorum_ver0
> -};
> -
> -#ifdef COROSYNC_SOLARIS
> -void corosync_lcr_component_register (void);
> -
> -void corosync_lcr_component_register (void) {
> -	logsys_subsys_init();
> -#else
> -__attribute__ ((constructor)) static void corosync_lcr_component_register (void) {
> -#endif
> -	lcr_interfaces_set (&corosync_test_quorum_ver0[0], &test_quorum_iface_ver0);
> -	lcr_component_register (&test_quorum_comp_ver0);
> -}
> -
> -/* -------------------------------------------------- */
> -
> -static quorum_set_quorate_fn_t set_quorum;
> -
> -static void key_change_notify(
> -	int32_t event,
> -	const char *key_name,
> -	struct icmap_notify_value new_val,
> -	struct icmap_notify_value old_val,
> -	void *user_data)
> -{
> -	struct memb_ring_id ring_id;
> -	unsigned int members[1];
> -	uint8_t u8;
> -
> -	memset(&ring_id, 0, sizeof(ring_id));
> -	if (icmap_get_uint8(key_name, &u8) == CS_OK) {
> -		set_quorum(members, 0, u8, &ring_id);
> -	}
> -}
> -
> -static void quorum_callback(int quorate, void *context)
> -{
> -	log_printf(LOGSYS_LEVEL_DEBUG, "quorum callback: quorate = %d\n", quorate);
> -}
> -
> -static void test_init(struct corosync_api_v1 *api,
> -		      quorum_set_quorate_fn_t report)
> -{
> -
> -	icmap_track_t icmap_track;
> -
> -	set_quorum = report;
> -
> -	/*
> -	 * Register for icmap changes on quorum.quorate
> -	 */
> -	icmap_track_add("quorum.quorate",
> -		ICMAP_TRACK_ADD | ICMAP_TRACK_DELETE | ICMAP_TRACK_MODIFY,
> -		key_change_notify,
> -		NULL,
> -		&icmap_track);
> -
> -	/* Register for quorum changes too! */
> -	api->quorum_register_callback(quorum_callback, NULL);
> -}
> diff --git a/services/votequorum.c b/services/votequorum.c
> deleted file mode 100644
> index 8151cc3..0000000
> --- a/services/votequorum.c
> +++ /dev/null
> @@ -1,1639 +0,0 @@
> -/*
> - * Copyright (c) 2009-2011 Red Hat, Inc.
> - *
> - * All rights reserved.
> - *
> - * Authors: Christine Caulfield (ccaulfie@xxxxxxxxxx)
> - *          Fabio M. Di Nitto   (fdinitto@xxxxxxxxxx)
> - *
> - * This software licensed under BSD license, the text of which follows:
> - *
> - * Redistribution and use in source and binary forms, with or without
> - * modification, are permitted provided that the following conditions are met:
> - *
> - * - Redistributions of source code must retain the above copyright notice,
> - *   this list of conditions and the following disclaimer.
> - * - Redistributions in binary form must reproduce the above copyright notice,
> - *   this list of conditions and the following disclaimer in the documentation
> - *   and/or other materials provided with the distribution.
> - * - Neither the name of the MontaVista Software, Inc. nor the names of its
> - *   contributors may be used to endorse or promote products derived from this
> - *   software without specific prior written permission.
> - *
> - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTIBUTORS "AS IS"
> - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
> - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
> - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
> - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
> - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
> - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
> - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
> - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
> - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
> - * THE POSSIBILITY OF SUCH DAMAGE.
> - */
> -
> -#include <config.h>
> -
> -#include <sys/types.h>
> -#ifdef HAVE_ALLOCA_H
> -#include <alloca.h>
> -#endif
> -#include <sys/types.h>
> -#include <sys/socket.h>
> -#include <sys/un.h>
> -#include <sys/time.h>
> -#include <sys/ioctl.h>
> -#include <netinet/in.h>
> -#include <sys/uio.h>
> -#include <unistd.h>
> -#include <fcntl.h>
> -#include <stdlib.h>
> -#include <stdio.h>
> -#include <errno.h>
> -#include <time.h>
> -#include <unistd.h>
> -#include <netinet/in.h>
> -#include <arpa/inet.h>
> -
> -#include <qb/qbipc_common.h>
> -#include <qb/qbdefs.h>
> -#include <qb/qbutil.h>
> -
> -#include <corosync/corotypes.h>
> -#include <corosync/corodefs.h>
> -#include <corosync/cfg.h>
> -#include <corosync/list.h>
> -#include <corosync/lcr/lcr_comp.h>
> -#include <corosync/logsys.h>
> -#include <corosync/mar_gen.h>
> -#include <corosync/coroapi.h>
> -#include <corosync/engine/quorum.h>
> -#include <corosync/icmap.h>
> -#include <corosync/ipc_votequorum.h>
> -
> -#define VOTEQUORUM_MAJOR_VERSION 7
> -#define VOTEQUORUM_MINOR_VERSION 0
> -#define VOTEQUORUM_PATCH_VERSION 0
> -
> -/*
> - * Silly default to prevent accidents!
> - */
> -#define DEFAULT_EXPECTED   1024
> -#define DEFAULT_QDEV_POLL 10000
> -#define DEFAULT_LEAVE_TMO 10000
> -#define DEFAULT_LMS_WIN   10000
> -
> -LOGSYS_DECLARE_SUBSYS ("VOTEQ");
> -
> -enum quorum_message_req_types {
> -	MESSAGE_REQ_EXEC_VOTEQUORUM_NODEINFO  = 0,
> -	MESSAGE_REQ_EXEC_VOTEQUORUM_RECONFIGURE = 1,
> -};
> -
> -#define NODE_FLAGS_BEENDOWN         1
> -#define NODE_FLAGS_QDISK            8
> -#define NODE_FLAGS_REMOVED         16
> -#define NODE_FLAGS_US              32
> -
> -#define NODEID_US 0
> -#define NODEID_QDEVICE -1
> -
> -typedef enum {
> -	NODESTATE_JOINING=1,
> -	NODESTATE_MEMBER,
> -	NODESTATE_DEAD,
> -	NODESTATE_LEAVING
> -} nodestate_t;
> -
> -struct cluster_node {
> -	int flags;
> -	int node_id;
> -	unsigned int expected_votes;
> -	unsigned int votes;
> -	time_t join_time;
> -	nodestate_t state;
> -	unsigned long long int last_hello; /* Only used for quorum devices */
> -	struct list_head list;
> -};
> -
> -static int quorum;
> -static int cluster_is_quorate;
> -static int first_trans = 1;
> -static unsigned int quorumdev_poll = DEFAULT_QDEV_POLL;
> -
> -static uint8_t two_node = 0;
> -static uint8_t wait_for_all = 0;
> -static uint8_t wait_for_all_status = 0;
> -static uint8_t auto_tie_breaker = 0;
> -static int lowest_node_id = -1;
> -static uint8_t last_man_standing = 0;
> -static uint32_t last_man_standing_window = DEFAULT_LMS_WIN;
> -static int last_man_standing_timer_set = 0;
> -static corosync_timer_handle_t last_man_standing_timer;
> -
> -static struct cluster_node *us;
> -static struct cluster_node *quorum_device = NULL;
> -static char quorum_device_name[VOTEQUORUM_MAX_QDISK_NAME_LEN];
> -static corosync_timer_handle_t quorum_device_timer;
> -static struct list_head cluster_members_list;
> -static struct corosync_api_v1 *corosync_api;
> -static struct list_head trackers_list;
> -static unsigned int quorum_members[PROCESSOR_COUNT_MAX+1];
> -static int quorum_members_entries = 0;
> -static struct memb_ring_id quorum_ringid;
> -
> -#define max(a,b) (((a) > (b)) ? (a) : (b))
> -static struct cluster_node *find_node_by_nodeid(int nodeid);
> -static struct cluster_node *allocate_node(int nodeid);
> -
> -#define list_iterate(v, head) \
> -	for (v = (head)->next; v != head; v = v->next)
> -
> -struct quorum_pd {
> -	unsigned char track_flags;
> -	int tracking_enabled;
> -	uint64_t tracking_context;
> -	struct list_head list;
> -	void *conn;
> -};
> -
> -/*
> - * Service Interfaces required by service_message_handler struct
> - */
> -
> -static void votequorum_init(struct corosync_api_v1 *api,
> -			    quorum_set_quorate_fn_t report);
> -
> -static void quorum_confchg_fn (
> -	enum totem_configuration_type configuration_type,
> -	const unsigned int *member_list, size_t member_list_entries,
> -	const unsigned int *left_list, size_t left_list_entries,
> -	const unsigned int *joined_list, size_t joined_list_entries,
> -	const struct memb_ring_id *ring_id);
> -
> -static int votequorum_exec_init_fn (struct corosync_api_v1 *api);
> -
> -static int quorum_lib_init_fn (void *conn);
> -
> -static int quorum_lib_exit_fn (void *conn);
> -
> -static void message_handler_req_exec_votequorum_nodeinfo (
> -	const void *message,
> -	unsigned int nodeid);
> -
> -static void message_handler_req_exec_votequorum_reconfigure (
> -	const void *message,
> -	unsigned int nodeid);
> -
> -static void message_handler_req_lib_votequorum_getinfo (void *conn,
> -							const void *message);
> -
> -static void message_handler_req_lib_votequorum_setexpected (void *conn,
> -							    const void *message);
> -
> -static void message_handler_req_lib_votequorum_setvotes (void *conn,
> -							 const void *message);
> -
> -static void message_handler_req_lib_votequorum_qdisk_register (void *conn,
> -							       const void *message);
> -
> -static void message_handler_req_lib_votequorum_qdisk_unregister (void *conn,
> -								 const void *message);
> -
> -static void message_handler_req_lib_votequorum_qdisk_poll (void *conn,
> -							   const void *message);
> -
> -static void message_handler_req_lib_votequorum_qdisk_getinfo (void *conn,
> -							      const void *message);
> -
> -static void message_handler_req_lib_votequorum_trackstart (void *conn,
> -							   const void *message);
> -static void message_handler_req_lib_votequorum_trackstop (void *conn,
> -							  const void *message);
> -
> -static int quorum_exec_send_nodeinfo(void);
> -static int quorum_exec_send_reconfigure(int param, int nodeid, int value);
> -
> -static void exec_votequorum_nodeinfo_endian_convert (void *message);
> -static void exec_votequorum_reconfigure_endian_convert (void *message);
> -
> -static void add_votequorum_config_notification(void);
> -
> -static void recalculate_quorum(int allow_decrease, int by_current_nodes);
> -
> -/*
> - * Library Handler Definition
> - */
> -static struct corosync_lib_handler quorum_lib_service[] =
> -{
> -	{ /* 0 */
> -		.lib_handler_fn		= message_handler_req_lib_votequorum_getinfo,
> -		.flow_control		= COROSYNC_LIB_FLOW_CONTROL_NOT_REQUIRED
> -	},
> -	{ /* 1 */
> -		.lib_handler_fn		= message_handler_req_lib_votequorum_setexpected,
> -		.flow_control		= COROSYNC_LIB_FLOW_CONTROL_NOT_REQUIRED
> -	},
> -	{ /* 2 */
> -		.lib_handler_fn		= message_handler_req_lib_votequorum_setvotes,
> -		.flow_control		= COROSYNC_LIB_FLOW_CONTROL_NOT_REQUIRED
> -	},
> -	{ /* 3 */
> -		.lib_handler_fn		= message_handler_req_lib_votequorum_qdisk_register,
> -		.flow_control		= COROSYNC_LIB_FLOW_CONTROL_NOT_REQUIRED
> -	},
> -	{ /* 4 */
> -		.lib_handler_fn		= message_handler_req_lib_votequorum_qdisk_unregister,
> -		.flow_control		= COROSYNC_LIB_FLOW_CONTROL_NOT_REQUIRED
> -	},
> -	{ /* 5 */
> -		.lib_handler_fn		= message_handler_req_lib_votequorum_qdisk_poll,
> -		.flow_control		= COROSYNC_LIB_FLOW_CONTROL_NOT_REQUIRED
> -	},
> -	{ /* 6 */
> -		.lib_handler_fn		= message_handler_req_lib_votequorum_qdisk_getinfo,
> -		.flow_control		= COROSYNC_LIB_FLOW_CONTROL_NOT_REQUIRED
> -	},
> -	{ /* 7 */
> -		.lib_handler_fn		= message_handler_req_lib_votequorum_trackstart,
> -		.flow_control		= COROSYNC_LIB_FLOW_CONTROL_NOT_REQUIRED
> -	},
> -	{ /* 8 */
> -		.lib_handler_fn		= message_handler_req_lib_votequorum_trackstop,
> -		.flow_control		= COROSYNC_LIB_FLOW_CONTROL_NOT_REQUIRED
> -	}
> -};
> -
> -static struct corosync_exec_handler votequorum_exec_engine[] =
> -{
> -	{ /* 0 */
> -		.exec_handler_fn	= message_handler_req_exec_votequorum_nodeinfo,
> -		.exec_endian_convert_fn	= exec_votequorum_nodeinfo_endian_convert
> -	},
> -	{ /* 1 */
> -		.exec_handler_fn	= message_handler_req_exec_votequorum_reconfigure,
> -		.exec_endian_convert_fn	= exec_votequorum_reconfigure_endian_convert
> -	},
> -};
> -
> -static quorum_set_quorate_fn_t set_quorum;
> -
> -/*
> - * lcrso object definition
> - */
> -static struct quorum_services_api_ver1 votequorum_iface_ver0 = {
> -	.init				= votequorum_init
> -};
> -
> -static struct corosync_service_engine quorum_service_handler = {
> -	.name					= "corosync votes quorum service v0.91",
> -	.id					= VOTEQUORUM_SERVICE,
> -	.private_data_size			= sizeof (struct quorum_pd),
> -	.allow_inquorate			= CS_LIB_ALLOW_INQUORATE,
> -	.flow_control				= COROSYNC_LIB_FLOW_CONTROL_REQUIRED,
> -	.lib_init_fn				= quorum_lib_init_fn,
> -	.lib_exit_fn				= quorum_lib_exit_fn,
> -	.lib_engine				= quorum_lib_service,
> -	.lib_engine_count			= sizeof (quorum_lib_service) / sizeof (struct corosync_lib_handler),
> -	.exec_init_fn				= votequorum_exec_init_fn,
> -	.exec_engine				= votequorum_exec_engine,
> -	.exec_engine_count			= sizeof (votequorum_exec_engine) / sizeof (struct corosync_exec_handler),
> -	.confchg_fn				= quorum_confchg_fn,
> -	.sync_mode				= CS_SYNC_V1
> -};
> -
> -/*
> - * Dynamic loader definition
> - */
> -static struct corosync_service_engine *quorum_get_service_handler_ver0 (void);
> -
> -static struct corosync_service_engine_iface_ver0 quorum_service_handler_iface = {
> -	.corosync_get_service_engine_ver0 = quorum_get_service_handler_ver0
> -};
> -
> -static struct lcr_iface corosync_quorum_ver0[2] = {
> -	{
> -		.name				= "corosync_votequorum",
> -		.version			= 0,
> -		.versions_replace		= 0,
> -		.versions_replace_count		= 0,
> -		.dependencies			= 0,
> -		.dependency_count		= 0,
> -		.constructor			= NULL,
> -		.destructor			= NULL,
> -		.interfaces			= (void **)(void *)&votequorum_iface_ver0
> -	},
> -	{
> -		.name				= "corosync_votequorum_iface",
> -		.version			= 0,
> -		.versions_replace		= 0,
> -		.versions_replace_count		= 0,
> -		.dependencies			= 0,
> -		.dependency_count		= 0,
> -		.constructor			= NULL,
> -		.destructor			= NULL,
> -		.interfaces			= NULL
> -	}
> -};
> -
> -static struct lcr_comp quorum_comp_ver0 = {
> -	.iface_count			= 2,
> -	.ifaces				= corosync_quorum_ver0
> -};
> -
> -
> -static struct corosync_service_engine *quorum_get_service_handler_ver0 (void)
> -{
> -	return (&quorum_service_handler);
> -}
> -
> -#ifdef COROSYNC_SOLARIS
> -void corosync_lcr_component_register (void);
> -void corosync_lcr_component_register (void) {
> -#else
> -__attribute__ ((constructor)) static void corosync_lcr_component_register (void) {
> -#endif
> -	lcr_interfaces_set (&corosync_quorum_ver0[0], &votequorum_iface_ver0);
> -	lcr_interfaces_set (&corosync_quorum_ver0[1], &quorum_service_handler_iface);
> -	lcr_component_register (&quorum_comp_ver0);
> -}
> -
> -static void votequorum_init(struct corosync_api_v1 *api,
> -			    quorum_set_quorate_fn_t report)
> -{
> -	ENTER();
> -
> -	set_quorum = report;
> -
> -	icmap_get_uint8("quorum.wait_for_all", &wait_for_all);
> -	icmap_get_uint8("quorum.auto_tie_breaker", &auto_tie_breaker);
> -	icmap_get_uint8("quorum.last_man_standing", &last_man_standing);
> -	icmap_get_uint32("quorum.last_man_standing_window", &last_man_standing_window);
> -
> -	/*
> -	 * TODO: we need to know the lowest node-id in the cluster
> -	 * current lack of node list with node-id's requires us to see all nodes
> -	 * to determine which is the lowest.
> -	 */
> -	if (auto_tie_breaker) {
> -		wait_for_all = 1;
> -	}
> -
> -	if (wait_for_all) {
> -		wait_for_all_status = 1;
> -	}
> -
> -	/* Load the library-servicing part of this module */
> -	api->service_link_and_init(api, "corosync_votequorum_iface", 0);
> -
> -	LEAVE();
> -}
> -
> -struct req_exec_quorum_nodeinfo {
> -	struct qb_ipc_request_header header __attribute__((aligned(8)));
> -	unsigned int first_trans;
> -	unsigned int votes;
> -	unsigned int expected_votes;
> -	unsigned int major_version;	/* Not backwards compatible */
> -	unsigned int minor_version;	/* Backwards compatible */
> -	unsigned int patch_version;	/* Backwards/forwards compatible */
> -	unsigned int config_version;
> -	unsigned int flags;
> -	unsigned int wait_for_all_status;
> -	unsigned int quorate;
> -} __attribute__((packed));
> -
> -/*
> - * Parameters for RECONFIG command
> - */
> -#define RECONFIG_PARAM_EXPECTED_VOTES 1
> -#define RECONFIG_PARAM_NODE_VOTES     2
> -
> -struct req_exec_quorum_reconfigure {
> -	struct qb_ipc_request_header header __attribute__((aligned(8)));
> -	unsigned int param;
> -	unsigned int nodeid;
> -	unsigned int value;
> -};
> -
> -static void read_quorum_config(void)
> -{
> -	int cluster_members = 0;
> -	struct list_head *tmp;
> -
> -	ENTER();
> -
> -	log_printf(LOGSYS_LEVEL_DEBUG, "Reading configuration\n");
> -
> -	if (icmap_get_uint32("quorum.expected_votes", &us->expected_votes) != CS_OK) {
> -		us->expected_votes = DEFAULT_EXPECTED;
> -	}
> -
> -	if (icmap_get_uint32("quorum.votes", &us->votes) != CS_OK) {
> -		us->votes = 1;
> -	}
> -
> -	if (icmap_get_uint32("quorum.quorumdev_poll", &quorumdev_poll) != CS_OK) {
> -		quorumdev_poll = DEFAULT_QDEV_POLL;
> -	}
> -
> -	icmap_get_uint8("quorum.two_node", &two_node);
> -
> -	/*
> -	 * two_node mode is invalid if there are more than 2 nodes in the cluster!
> -	 */
> -	list_iterate(tmp, &cluster_members_list) {
> -		cluster_members++;
> -        }
> -
> -	if (two_node && cluster_members > 2) {
> -		log_printf(LOGSYS_LEVEL_WARNING, "quorum.two_node was set but there are more than 2 nodes in the cluster. It will be ignored.\n");
> -		two_node = 0;
> -	}
> -
> -	LEAVE();
> -}
> -
> -static int votequorum_exec_init_fn (struct corosync_api_v1 *api)
> -{
> -#ifdef COROSYNC_SOLARIS
> -	logsys_subsys_init();
> -#endif
> -
> -	ENTER();
> -
> -	corosync_api = api;
> -
> -	list_init(&cluster_members_list);
> -	list_init(&trackers_list);
> -
> -	/*
> -	 * Allocate a cluster_node for us
> -	 */
> -	us = allocate_node(corosync_api->totem_nodeid_get());
> -	if (!us) {
> -		LEAVE();
> -		return (1);
> -	}
> -
> -	us->flags |= NODE_FLAGS_US;
> -	us->state = NODESTATE_MEMBER;
> -	us->expected_votes = DEFAULT_EXPECTED;
> -	us->votes = 1;
> -	time(&us->join_time);
> -
> -	read_quorum_config();
> -	recalculate_quorum(0, 0);
> -
> -	/*
> -	 * Listen for changes
> -	 */
> -	add_votequorum_config_notification();
> -
> -	/*
> -	 * Start us off with one node
> -	 */
> -	quorum_exec_send_nodeinfo();
> -
> -	LEAVE();
> -
> -	return (0);
> -}
> -
> -static int quorum_lib_exit_fn (void *conn)
> -{
> -	struct quorum_pd *quorum_pd = (struct quorum_pd *)corosync_api->ipc_private_data_get (conn);
> -
> -	ENTER();
> -
> -	if (quorum_pd->tracking_enabled) {
> -		list_del (&quorum_pd->list);
> -		list_init (&quorum_pd->list);
> -	}
> -
> -	LEAVE();
> -
> -	return (0);
> -}
> -
> -
> -static int send_quorum_notification(void *conn, uint64_t context)
> -{
> -	struct res_lib_votequorum_notification *res_lib_votequorum_notification;
> -	struct list_head *tmp;
> -	struct cluster_node *node;
> -	int cluster_members = 0;
> -	int i = 0;
> -	int size;
> -	char *buf;
> -
> -	ENTER();
> -
> -	list_iterate(tmp, &cluster_members_list) {
> -		node = list_entry(tmp, struct cluster_node, list);
> -		cluster_members++;
> -        }
> -	if (quorum_device) {
> -		cluster_members++;
> -	}
> -
> -	size = sizeof(struct res_lib_votequorum_notification) + sizeof(struct votequorum_node) * cluster_members;
> -	buf = alloca(size);
> -	if (!buf) {
> -		LEAVE();
> -		return -1;
> -	}
> -
> -	res_lib_votequorum_notification = (struct res_lib_votequorum_notification *)buf;
> -	res_lib_votequorum_notification->quorate = cluster_is_quorate;
> -	res_lib_votequorum_notification->node_list_entries = cluster_members;
> -	res_lib_votequorum_notification->context = context;
> -	list_iterate(tmp, &cluster_members_list) {
> -		node = list_entry(tmp, struct cluster_node, list);
> -		res_lib_votequorum_notification->node_list[i].nodeid = node->node_id;
> -		res_lib_votequorum_notification->node_list[i++].state = node->state;
> -        }
> -	if (quorum_device) {
> -		res_lib_votequorum_notification->node_list[i].nodeid = 0;
> -		res_lib_votequorum_notification->node_list[i++].state = quorum_device->state | 0x80;
> -	}
> -	res_lib_votequorum_notification->header.id = MESSAGE_RES_VOTEQUORUM_NOTIFICATION;
> -	res_lib_votequorum_notification->header.size = size;
> -	res_lib_votequorum_notification->header.error = CS_OK;
> -
> -	/* Send it to all interested parties */
> -	if (conn) {
> -		int ret = corosync_api->ipc_dispatch_send(conn, buf, size);
> -		LEAVE();
> -		return ret;
> -	} else {
> -		struct quorum_pd *qpd;
> -
> -		list_iterate(tmp, &trackers_list) {
> -			qpd = list_entry(tmp, struct quorum_pd, list);
> -			res_lib_votequorum_notification->context = qpd->tracking_context;
> -			corosync_api->ipc_dispatch_send(qpd->conn, buf, size);
> -		}
> -	}
> -
> -	LEAVE();
> -
> -	return 0;
> -}
> -
> -static void send_expectedvotes_notification(void)
> -{
> -	struct res_lib_votequorum_expectedvotes_notification res_lib_votequorum_expectedvotes_notification;
> -	struct quorum_pd *qpd;
> -	struct list_head *tmp;
> -
> -	ENTER();
> -
> -	log_printf(LOGSYS_LEVEL_DEBUG, "Sending expected votes callback\n");
> -
> -	res_lib_votequorum_expectedvotes_notification.header.id = MESSAGE_RES_VOTEQUORUM_EXPECTEDVOTES_NOTIFICATION;
> -	res_lib_votequorum_expectedvotes_notification.header.size = sizeof(res_lib_votequorum_expectedvotes_notification);
> -	res_lib_votequorum_expectedvotes_notification.header.error = CS_OK;
> -	res_lib_votequorum_expectedvotes_notification.expected_votes = us->expected_votes;
> -
> -	list_iterate(tmp, &trackers_list) {
> -		qpd = list_entry(tmp, struct quorum_pd, list);
> -		res_lib_votequorum_expectedvotes_notification.context = qpd->tracking_context;
> -		corosync_api->ipc_dispatch_send(qpd->conn, &res_lib_votequorum_expectedvotes_notification,
> -						sizeof(struct res_lib_votequorum_expectedvotes_notification));
> -	}
> -
> -	LEAVE();
> -}
> -
> -static void get_lowest_node_id(void)
> -{
> -	struct cluster_node *node = NULL;
> -	struct list_head *tmp;
> -
> -	ENTER();
> -
> -	lowest_node_id = us->node_id;
> -
> -	list_iterate(tmp, &cluster_members_list) {
> -		node = list_entry(tmp, struct cluster_node, list);
> -		if (node->node_id < lowest_node_id) {
> -			lowest_node_id = node->node_id;
> -		}
> -	}
> -	log_printf(LOGSYS_LEVEL_DEBUG, "lowest node id: %d us: %d\n", lowest_node_id, us->node_id);
> -
> -	LEAVE();
> -}
> -
> -static int check_low_node_id_partition(void)
> -{
> -	struct cluster_node *node = NULL;
> -	struct list_head *tmp;
> -	int found = 0;
> -
> -	ENTER();
> -
> -	list_iterate(tmp, &cluster_members_list) {
> -		node = list_entry(tmp, struct cluster_node, list);
> -		if (node->state == NODESTATE_MEMBER) {
> -			if (node->node_id == lowest_node_id) {
> -				found = 1;
> -			}
> -		}
> -	}
> -
> -	LEAVE();
> -	return found;
> -}
> -
> -static void set_quorate(int total_votes)
> -{
> -	int quorate;
> -	int quorum_change = 0;
> -
> -	ENTER();
> -
> -	/*
> -	 * wait for all nodes to show up before granting quorum
> -	 */
> -
> -	if ((wait_for_all) && (wait_for_all_status)) {
> -		if (total_votes != us->expected_votes) {
> -			log_printf(LOGSYS_LEVEL_NOTICE,
> -				   "Waiting for all cluster members. "
> -				   "Current votes: %d expected_votes: %d\n",
> -				   total_votes, us->expected_votes);
> -			cluster_is_quorate = 0;
> -			return;
> -		}
> -		wait_for_all_status = 0;
> -		get_lowest_node_id();
> -	}
> -
> -	if (quorum > total_votes) {
> -		quorate = 0;
> -	} else {
> -		quorate = 1;
> -	}
> -
> -	if ((auto_tie_breaker) &&
> -	    (total_votes == (us->expected_votes / 2)) &&
> -	    (check_low_node_id_partition() == 1)) {
> -		quorate = 1;
> -	}
> -
> -	if (cluster_is_quorate && !quorate) {
> -		quorum_change = 1;
> -		log_printf(LOGSYS_LEVEL_DEBUG, "quorum lost, blocking activity\n");
> -	}
> -	if (!cluster_is_quorate && quorate) {
> -		quorum_change = 1;
> -		log_printf(LOGSYS_LEVEL_DEBUG, "quorum regained, resuming activity\n");
> -	}
> -
> -	cluster_is_quorate = quorate;
> -
> -	if (wait_for_all) {
> -		if (quorate) {
> -			wait_for_all_status = 0;
> -		} else {
> -			wait_for_all_status = 1;
> -		}
> -	}
> -
> -	if (quorum_change) {
> -		set_quorum(quorum_members, quorum_members_entries,
> -			   cluster_is_quorate, &quorum_ringid);
> -	}
> -
> -	LEAVE();
> -}
> -
> -static int calculate_quorum(int allow_decrease, int max_expected, unsigned int *ret_total_votes)
> -{
> -	struct list_head *nodelist;
> -	struct cluster_node *node;
> -	unsigned int total_votes = 0;
> -	unsigned int highest_expected = 0;
> -	unsigned int newquorum, q1, q2;
> -	unsigned int total_nodes = 0;
> -
> -	ENTER();
> -
> -	list_iterate(nodelist, &cluster_members_list) {
> -		node = list_entry(nodelist, struct cluster_node, list);
> -
> -		log_printf(LOGSYS_LEVEL_DEBUG, "node %x state=%d, votes=%d, expected=%d\n",
> -			   node->node_id, node->state, node->votes, node->expected_votes);
> -
> -		if (node->state == NODESTATE_MEMBER) {
> -			if (max_expected) {
> -				node->expected_votes = max_expected;
> -			} else {
> -				highest_expected = max(highest_expected, node->expected_votes);
> -			}
> -			total_votes += node->votes;
> -			total_nodes++;
> -		}
> -	}
> -
> -	if (quorum_device && quorum_device->state == NODESTATE_MEMBER) {
> -		total_votes += quorum_device->votes;
> -	}
> -
> -	if (max_expected > 0) {
> -		highest_expected = max_expected;
> -	}
> -
> -	/*
> -	 * This quorum calculation is taken from the OpenVMS Cluster Systems
> -	 * manual, but, then, you guessed that didn't you
> -	 */
> -	q1 = (highest_expected + 2) / 2;
> -	q2 = (total_votes + 2) / 2;
> -	newquorum = max(q1, q2);
> -
> -	/*
> -	 * Normally quorum never decreases but the system administrator can
> -	 * force it down by setting expected votes to a maximum value
> -	 */
> -	if (!allow_decrease) {
> -		newquorum = max(quorum, newquorum);
> -	}
> -
> -	/*
> -	 * The special two_node mode allows each of the two nodes to retain
> -	 * quorum if the other fails.  Only one of the two should live past
> -	 * fencing (as both nodes try to fence each other in split-brain.)
> -	 * Also: if there are more than two nodes, force us inquorate to avoid
> -	 * any damage or confusion.
> -	 */
> -	if (two_node && total_nodes <= 2) {
> -		newquorum = 1;
> -	}
> -
> -	if (ret_total_votes) {
> -		*ret_total_votes = total_votes;
> -	}
> -
> -	LEAVE();
> -	return newquorum;
> -}
> -
> -/* Recalculate cluster quorum, set quorate and notify changes */
> -static void recalculate_quorum(int allow_decrease, int by_current_nodes)
> -{
> -	unsigned int total_votes = 0;
> -	int cluster_members = 0;
> -	struct list_head *nodelist;
> -	struct cluster_node *node;
> -
> -	ENTER();
> -
> -	list_iterate(nodelist, &cluster_members_list) {
> -		node = list_entry(nodelist, struct cluster_node, list);
> -		if (node->state == NODESTATE_MEMBER) {
> -			if (by_current_nodes) {
> -				cluster_members++;
> -			}
> -			total_votes += node->votes;
> -		}
> -	}
> -
> -	/*
> -	 * Keep expected_votes at the highest number of votes in the cluster
> -	 */
> -	log_printf(LOGSYS_LEVEL_DEBUG, "total_votes=%d, expected_votes=%d\n", total_votes, us->expected_votes);
> -	if (total_votes > us->expected_votes) {
> -		us->expected_votes = total_votes;
> -		send_expectedvotes_notification();
> -	}
> -
> -	quorum = calculate_quorum(allow_decrease, cluster_members, &total_votes);
> -	set_quorate(total_votes);
> -
> -	send_quorum_notification(NULL, 0L);
> -
> -	LEAVE();
> -}
> -
> -static void node_add_ordered(struct cluster_node *newnode)
> -{
> -	struct cluster_node *node = NULL;
> -	struct list_head *tmp;
> -	struct list_head *newlist = &newnode->list;
> -
> -	ENTER();
> -
> -	list_iterate(tmp, &cluster_members_list) {
> -		node = list_entry(tmp, struct cluster_node, list);
> -		if (newnode->node_id < node->node_id) {
> -			break;
> -		}
> -	}
> -
> -	if (!node) {
> -		list_add(&newnode->list, &cluster_members_list);
> -	} else {
> -		newlist->prev = tmp->prev;
> -		newlist->next = tmp;
> -		tmp->prev->next = newlist;
> -		tmp->prev = newlist;
> -	}
> -
> -	LEAVE();
> -}
> -
> -static struct cluster_node *allocate_node(int nodeid)
> -{
> -	struct cluster_node *cl;
> -
> -	ENTER();
> -
> -	cl = malloc(sizeof(struct cluster_node));
> -	if (cl) {
> -		memset(cl, 0, sizeof(struct cluster_node));
> -		cl->node_id = nodeid;
> -		if (nodeid) {
> -			node_add_ordered(cl);
> -		}
> -	}
> -
> -	LEAVE();
> -
> -	return cl;
> -}
> -
> -static struct cluster_node *find_node_by_nodeid(int nodeid)
> -{
> -	struct cluster_node *node;
> -	struct list_head *tmp;
> -
> -	ENTER();
> -
> -	if (nodeid == NODEID_US) {
> -		LEAVE();
> -		return us;
> -	}
> -
> -	if (nodeid == NODEID_QDEVICE) {
> -		LEAVE();
> -		return quorum_device;
> -	}
> -
> -	list_iterate(tmp, &cluster_members_list) {
> -		node = list_entry(tmp, struct cluster_node, list);
> -		if (node->node_id == nodeid) {
> -			LEAVE();
> -			return node;
> -		}
> -	}
> -
> -	LEAVE();
> -	return NULL;
> -}
> -
> -
> -static int quorum_exec_send_nodeinfo()
> -{
> -	struct req_exec_quorum_nodeinfo req_exec_quorum_nodeinfo;
> -	struct iovec iov[1];
> -	int ret;
> -
> -	ENTER();
> -
> -	req_exec_quorum_nodeinfo.expected_votes = us->expected_votes;
> -	req_exec_quorum_nodeinfo.votes = us->votes;
> -	req_exec_quorum_nodeinfo.major_version = VOTEQUORUM_MAJOR_VERSION;
> -	req_exec_quorum_nodeinfo.minor_version = VOTEQUORUM_MINOR_VERSION;
> -	req_exec_quorum_nodeinfo.patch_version = VOTEQUORUM_PATCH_VERSION;
> -	req_exec_quorum_nodeinfo.flags = us->flags;
> -	req_exec_quorum_nodeinfo.first_trans = first_trans;
> -	req_exec_quorum_nodeinfo.wait_for_all_status = wait_for_all_status;
> -	req_exec_quorum_nodeinfo.quorate = cluster_is_quorate;
> -
> -	req_exec_quorum_nodeinfo.header.id = SERVICE_ID_MAKE(VOTEQUORUM_SERVICE, MESSAGE_REQ_EXEC_VOTEQUORUM_NODEINFO);
> -	req_exec_quorum_nodeinfo.header.size = sizeof(req_exec_quorum_nodeinfo);
> -
> -	iov[0].iov_base = (void *)&req_exec_quorum_nodeinfo;
> -	iov[0].iov_len = sizeof(req_exec_quorum_nodeinfo);
> -
> -	ret = corosync_api->totem_mcast (iov, 1, TOTEM_AGREED);
> -
> -	LEAVE();
> -	return ret;
> -}
> -
> -
> -static int quorum_exec_send_reconfigure(int param, int nodeid, int value)
> -{
> -	struct req_exec_quorum_reconfigure req_exec_quorum_reconfigure;
> -	struct iovec iov[1];
> -	int ret;
> -
> -	ENTER();
> -
> -	req_exec_quorum_reconfigure.param = param;
> -	req_exec_quorum_reconfigure.nodeid = nodeid;
> -	req_exec_quorum_reconfigure.value = value;
> -
> -	req_exec_quorum_reconfigure.header.id = SERVICE_ID_MAKE(VOTEQUORUM_SERVICE, MESSAGE_REQ_EXEC_VOTEQUORUM_RECONFIGURE);
> -	req_exec_quorum_reconfigure.header.size = sizeof(req_exec_quorum_reconfigure);
> -
> -	iov[0].iov_base = (void *)&req_exec_quorum_reconfigure;
> -	iov[0].iov_len = sizeof(req_exec_quorum_reconfigure);
> -
> -	ret = corosync_api->totem_mcast (iov, 1, TOTEM_AGREED);
> -
> -	LEAVE();
> -	return ret;
> -}
> -
> -static void lms_timer_fn(void *arg)
> -{
> -	ENTER();
> -
> -	last_man_standing_timer_set = 0;
> -	if (cluster_is_quorate) {
> -		recalculate_quorum(1,1);
> -	}
> -
> -	LEAVE();
> -}
> -
> -static void quorum_confchg_fn (
> -	enum totem_configuration_type configuration_type,
> -	const unsigned int *member_list, size_t member_list_entries,
> -	const unsigned int *left_list, size_t left_list_entries,
> -	const unsigned int *joined_list, size_t joined_list_entries,
> -	const struct memb_ring_id *ring_id)
> -{
> -	int i;
> -	int leaving = 0;
> -	struct cluster_node *node;
> -
> -	ENTER();
> -
> -	if (member_list_entries > 1) {
> -		first_trans = 0;
> -	}
> -
> -	if (left_list_entries) {
> -		for (i = 0; i< left_list_entries; i++) {
> -			node = find_node_by_nodeid(left_list[i]);
> -			if (node) {
> -				if (node->state == NODESTATE_LEAVING) {
> -					leaving = 1;
> -				}
> -				node->state = NODESTATE_DEAD;
> -				node->flags |= NODE_FLAGS_BEENDOWN;
> -			}
> -		}
> -	}
> -
> -	if (last_man_standing) {
> -		if (((member_list_entries >= quorum) && (left_list_entries)) ||
> -		    ((member_list_entries <= quorum) && (auto_tie_breaker) && (check_low_node_id_partition() == 1))) {
> -			if (last_man_standing_timer_set) {
> -				corosync_api->timer_delete(last_man_standing_timer);
> -				last_man_standing_timer_set = 0;
> -			}
> -			corosync_api->timer_add_duration((unsigned long long)last_man_standing_window*1000000, NULL, lms_timer_fn, &last_man_standing_timer);
> -			last_man_standing_timer_set = 1;
> -		}
> -	}
> -
> -	if (member_list_entries) {
> -		memcpy(quorum_members, member_list, sizeof(unsigned int) * member_list_entries);
> -		quorum_members_entries = member_list_entries;
> -		if (quorum_device) {
> -			quorum_members[quorum_members_entries++] = 0;
> -		}
> -		quorum_exec_send_nodeinfo();
> -	}
> -
> -	if (left_list_entries) {
> -		recalculate_quorum(leaving, leaving);
> -	}
> -
> -	memcpy(&quorum_ringid, ring_id, sizeof(*ring_id));
> -
> -	if (configuration_type == TOTEM_CONFIGURATION_REGULAR) {
> -		set_quorum(quorum_members, quorum_members_entries,
> -			   cluster_is_quorate, &quorum_ringid);
> -	}
> -
> -	LEAVE();
> -}
> -
> -static void exec_votequorum_nodeinfo_endian_convert (void *message)
> -{
> -	struct req_exec_quorum_nodeinfo *nodeinfo = message;
> -
> -	ENTER();
> -
> -	nodeinfo->votes = swab32(nodeinfo->votes);
> -	nodeinfo->expected_votes = swab32(nodeinfo->expected_votes);
> -	nodeinfo->major_version = swab32(nodeinfo->major_version);
> -	nodeinfo->minor_version = swab32(nodeinfo->minor_version);
> -	nodeinfo->patch_version = swab32(nodeinfo->patch_version);
> -	nodeinfo->config_version = swab32(nodeinfo->config_version);
> -	nodeinfo->flags = swab32(nodeinfo->flags);
> -	nodeinfo->wait_for_all_status = swab32(nodeinfo->wait_for_all_status);
> -	nodeinfo->quorate = swab32(nodeinfo->quorate);
> -
> -	LEAVE();
> -}
> -
> -static void exec_votequorum_reconfigure_endian_convert (void *message)
> -{
> -	struct req_exec_quorum_reconfigure *reconfigure = message;
> -
> -	ENTER();
> -
> -	reconfigure->nodeid = swab32(reconfigure->nodeid);
> -	reconfigure->value = swab32(reconfigure->value);
> -
> -	LEAVE();
> -}
> -
> -static void message_handler_req_exec_votequorum_nodeinfo (
> -	const void *message,
> -	unsigned int nodeid)
> -{
> -	const struct req_exec_quorum_nodeinfo *req_exec_quorum_nodeinfo = message;
> -	struct cluster_node *node;
> -	int old_votes;
> -	int old_expected;
> -	nodestate_t old_state;
> -	int new_node = 0;
> -
> -	ENTER();
> -
> -	log_printf(LOGSYS_LEVEL_DEBUG, "got nodeinfo message from cluster node %d\n", nodeid);
> -
> -	node = find_node_by_nodeid(nodeid);
> -	if (!node) {
> -		node = allocate_node(nodeid);
> -		new_node = 1;
> -	}
> -	if (!node) {
> -		corosync_api->error_memory_failure();
> -		LEAVE();
> -		return;
> -	}
> -
> -	old_votes = node->votes;
> -	old_expected = node->expected_votes;
> -	old_state = node->state;
> -
> -	/* Update node state */
> -	node->votes = req_exec_quorum_nodeinfo->votes;
> -	node->expected_votes = req_exec_quorum_nodeinfo->expected_votes;
> -	node->state = NODESTATE_MEMBER;
> -
> -	log_printf(LOGSYS_LEVEL_DEBUG, "nodeinfo message: votes: %d, expected: %d wfa: %d quorate: %d\n",
> -					req_exec_quorum_nodeinfo->votes,
> -					req_exec_quorum_nodeinfo->expected_votes,
> -					req_exec_quorum_nodeinfo->wait_for_all_status,
> -					req_exec_quorum_nodeinfo->quorate);
> -
> -	if ((last_man_standing) && (req_exec_quorum_nodeinfo->votes > 1)) {
> -		log_printf(LOGSYS_LEVEL_WARNING, "Last Man Standing feature is supported only when all"
> -						 "cluster nodes votes are set to 1. Disabling LMS.");
> -		last_man_standing = 0;
> -		if (last_man_standing_timer_set) {
> -			corosync_api->timer_delete(last_man_standing_timer);
> -			last_man_standing_timer_set = 0;
> -		}
> -	}
> -
> -	node->flags &= ~NODE_FLAGS_BEENDOWN;
> -
> -	if (new_node ||
> -	    req_exec_quorum_nodeinfo->first_trans || 
> -	    old_votes != node->votes ||
> -	    old_expected != node->expected_votes ||
> -	    old_state != node->state) {
> -		recalculate_quorum(0, 0);
> -	}
> -
> -	if (!nodeid) {
> -		free(node);
> -	}
> -
> -	if ((wait_for_all) &&
> -	    (!req_exec_quorum_nodeinfo->wait_for_all_status) &&
> -	    (req_exec_quorum_nodeinfo->quorate)) {
> -		wait_for_all_status = 0;
> -	}
> -
> -	LEAVE();
> -}
> -
> -static void message_handler_req_exec_votequorum_reconfigure (
> -	const void *message,
> -	unsigned int nodeid)
> -{
> -	const struct req_exec_quorum_reconfigure *req_exec_quorum_reconfigure = message;
> -	struct cluster_node *node;
> -	struct list_head *nodelist;
> -
> -	ENTER();
> -
> -	log_printf(LOGSYS_LEVEL_DEBUG, "got reconfigure message from cluster node %d\n", nodeid);
> -
> -	node = find_node_by_nodeid(req_exec_quorum_reconfigure->nodeid);
> -	if (!node) {
> -		LEAVE();
> -		return;
> -	}
> -
> -	switch(req_exec_quorum_reconfigure->param)
> -	{
> -	case RECONFIG_PARAM_EXPECTED_VOTES:
> -		list_iterate(nodelist, &cluster_members_list) {
> -			node = list_entry(nodelist, struct cluster_node, list);
> -			if (node->state == NODESTATE_MEMBER &&
> -			    node->expected_votes > req_exec_quorum_reconfigure->value) {
> -				node->expected_votes = req_exec_quorum_reconfigure->value;
> -			}
> -		}
> -		send_expectedvotes_notification();
> -		recalculate_quorum(1, 0);  /* Allow decrease */
> -		break;
> -
> -	case RECONFIG_PARAM_NODE_VOTES:
> -		node->votes = req_exec_quorum_reconfigure->value;
> -		recalculate_quorum(1, 0);  /* Allow decrease */
> -		break;
> -
> -	}
> -
> -	LEAVE();
> -}
> -
> -static int quorum_lib_init_fn (void *conn)
> -{
> -	struct quorum_pd *pd = (struct quorum_pd *)corosync_api->ipc_private_data_get (conn);
> -
> -	ENTER();
> -
> -	list_init (&pd->list);
> -	pd->conn = conn;
> -
> -	LEAVE();
> -	return (0);
> -}
> -
> -/*
> - * Message from the library
> - */
> -static void message_handler_req_lib_votequorum_getinfo (void *conn, const void *message)
> -{
> -	const struct req_lib_votequorum_getinfo *req_lib_votequorum_getinfo = message;
> -	struct res_lib_votequorum_getinfo res_lib_votequorum_getinfo;
> -	struct cluster_node *node;
> -	unsigned int highest_expected = 0;
> -	unsigned int total_votes = 0;
> -	cs_error_t error = CS_OK;
> -
> -	ENTER();
> -
> -	log_printf(LOGSYS_LEVEL_DEBUG, "got getinfo request on %p for node %d\n", conn, req_lib_votequorum_getinfo->nodeid);
> -
> -	node = find_node_by_nodeid(req_lib_votequorum_getinfo->nodeid);
> -	if (node) {
> -		struct cluster_node *iternode;
> -		struct list_head *nodelist;
> -
> -		list_iterate(nodelist, &cluster_members_list) {
> -			iternode = list_entry(nodelist, struct cluster_node, list);
> -
> -			if (iternode->state == NODESTATE_MEMBER) {
> -				highest_expected =
> -					max(highest_expected, iternode->expected_votes);
> -				total_votes += iternode->votes;
> -			}
> -		}
> -
> -		if (quorum_device && quorum_device->state == NODESTATE_MEMBER) {
> -			total_votes += quorum_device->votes;
> -		}
> -
> -		res_lib_votequorum_getinfo.votes = us->votes;
> -		res_lib_votequorum_getinfo.expected_votes = us->expected_votes;
> -		res_lib_votequorum_getinfo.highest_expected = highest_expected;
> -
> -		res_lib_votequorum_getinfo.quorum = quorum;
> -		res_lib_votequorum_getinfo.total_votes = total_votes;
> -		res_lib_votequorum_getinfo.flags = 0;
> -		res_lib_votequorum_getinfo.nodeid = node->node_id;
> -
> -		if (two_node) {
> -			res_lib_votequorum_getinfo.flags |= VOTEQUORUM_INFO_FLAG_TWONODE;
> -		}
> -		if (cluster_is_quorate) {
> -			res_lib_votequorum_getinfo.flags |= VOTEQUORUM_INFO_FLAG_QUORATE;
> -		}
> -		if (wait_for_all) {
> -			res_lib_votequorum_getinfo.flags |= VOTEQUORUM_INFO_WAIT_FOR_ALL;
> -		}
> -		if (last_man_standing) {
> -			res_lib_votequorum_getinfo.flags |= VOTEQUORUM_INFO_LAST_MAN_STANDING;
> -		}
> -		if (auto_tie_breaker) {
> -			res_lib_votequorum_getinfo.flags |= VOTEQUORUM_INFO_AUTO_TIE_BREAKER;
> -		}
> -	} else {
> -		error = CS_ERR_NOT_EXIST;
> -	}
> -
> -	res_lib_votequorum_getinfo.header.size = sizeof(res_lib_votequorum_getinfo);
> -	res_lib_votequorum_getinfo.header.id = MESSAGE_RES_VOTEQUORUM_GETINFO;
> -	res_lib_votequorum_getinfo.header.error = error;
> -	corosync_api->ipc_response_send(conn, &res_lib_votequorum_getinfo, sizeof(res_lib_votequorum_getinfo));
> -	log_printf(LOGSYS_LEVEL_DEBUG, "getinfo response error: %d\n", error);
> -
> -	LEAVE();
> -}
> -
> -/*
> - * Message from the library
> - */
> -static void message_handler_req_lib_votequorum_setexpected (void *conn, const void *message)
> -{
> -	const struct req_lib_votequorum_setexpected *req_lib_votequorum_setexpected = message;
> -	struct res_lib_votequorum_status res_lib_votequorum_status;
> -	cs_error_t error = CS_OK;
> -	unsigned int newquorum;
> -	unsigned int total_votes;
> -
> -	ENTER();
> -
> -	/*
> -	 * Validate new expected votes
> -	 */
> -	newquorum = calculate_quorum(1, req_lib_votequorum_setexpected->expected_votes, &total_votes);
> -	if (newquorum < total_votes / 2 ||
> -	    newquorum > total_votes) {
> -		error = CS_ERR_INVALID_PARAM;
> -		goto error_exit;
> -	}
> -
> -	quorum_exec_send_reconfigure(RECONFIG_PARAM_EXPECTED_VOTES, us->node_id,
> -				     req_lib_votequorum_setexpected->expected_votes);
> -
> -	/*
> -	 * send status
> -	 */
> -error_exit:
> -	res_lib_votequorum_status.header.size = sizeof(res_lib_votequorum_status);
> -	res_lib_votequorum_status.header.id = MESSAGE_RES_VOTEQUORUM_STATUS;
> -	res_lib_votequorum_status.header.error = error;
> -	corosync_api->ipc_response_send(conn, &res_lib_votequorum_status, sizeof(res_lib_votequorum_status));
> -
> -	LEAVE();
> -}
> -
> -/*
> - * Message from the library
> - */
> -static void message_handler_req_lib_votequorum_setvotes (void *conn, const void *message)
> -{
> -	const struct req_lib_votequorum_setvotes *req_lib_votequorum_setvotes = message;
> -	struct res_lib_votequorum_status res_lib_votequorum_status;
> -	struct cluster_node *node;
> -	unsigned int newquorum;
> -	unsigned int total_votes;
> -	unsigned int saved_votes;
> -	cs_error_t error = CS_OK;
> -	unsigned int nodeid;
> -
> -	ENTER();
> -
> -	nodeid = req_lib_votequorum_setvotes->nodeid;
> -	node = find_node_by_nodeid(nodeid);
> -	if (!node) {
> -		error = CS_ERR_NAME_NOT_FOUND;
> -		goto error_exit;
> -	}
> -
> -	/*
> -	 * Check votes is valid
> -	 */
> -	saved_votes = node->votes;
> -	node->votes = req_lib_votequorum_setvotes->votes;
> -
> -	newquorum = calculate_quorum(1, 0, &total_votes);
> -
> -	if (newquorum < total_votes / 2 ||
> -	    newquorum > total_votes) {
> -		node->votes = saved_votes;
> -		error = CS_ERR_INVALID_PARAM;
> -		goto error_exit;
> -	}
> -
> -	if (!nodeid) {
> -		nodeid = corosync_api->totem_nodeid_get();
> -	}
> -
> -	quorum_exec_send_reconfigure(RECONFIG_PARAM_NODE_VOTES, nodeid,
> -				     req_lib_votequorum_setvotes->votes);
> -
> -	/*
> -	 * send status
> -	 */
> -error_exit:
> -	res_lib_votequorum_status.header.size = sizeof(res_lib_votequorum_status);
> -	res_lib_votequorum_status.header.id = MESSAGE_RES_VOTEQUORUM_STATUS;
> -	res_lib_votequorum_status.header.error = error;
> -	corosync_api->ipc_response_send(conn, &res_lib_votequorum_status, sizeof(res_lib_votequorum_status));
> -
> -	LEAVE();
> -}
> -
> -static void quorum_device_timer_fn(void *arg)
> -{
> -	ENTER();
> -
> -	if (!quorum_device || quorum_device->state == NODESTATE_DEAD) {
> -		LEAVE();
> -		return;
> -	}
> -
> -	if ((quorum_device->last_hello / QB_TIME_NS_IN_SEC) + quorumdev_poll/1000 <
> -	    (qb_util_nano_current_get () / QB_TIME_NS_IN_SEC)) {
> -		quorum_device->state = NODESTATE_DEAD;
> -		log_printf(LOGSYS_LEVEL_INFO, "lost contact with quorum device\n");
> -		recalculate_quorum(0, 0);
> -	} else {
> -		corosync_api->timer_add_duration((unsigned long long)quorumdev_poll*1000000, quorum_device,
> -						 quorum_device_timer_fn, &quorum_device_timer);
> -	}
> -
> -	LEAVE();
> -}
> -
> -static void message_handler_req_lib_votequorum_qdisk_register (void *conn,
> -							       const void *message)
> -{
> -	const struct req_lib_votequorum_qdisk_register *req_lib_votequorum_qdisk_register = message;
> -	struct res_lib_votequorum_status res_lib_votequorum_status;
> -	cs_error_t error = CS_OK;
> -
> -	ENTER();
> -
> -	if (quorum_device) {
> -		error = CS_ERR_EXIST;
> -	} else {
> -		quorum_device = allocate_node(0);
> -		quorum_device->state = NODESTATE_DEAD;
> -		quorum_device->votes = req_lib_votequorum_qdisk_register->votes;
> -		strcpy(quorum_device_name, req_lib_votequorum_qdisk_register->name);
> -		list_add(&quorum_device->list, &cluster_members_list);
> -	}
> -
> -	/*
> -	 * send status
> -	 */
> -	res_lib_votequorum_status.header.size = sizeof(res_lib_votequorum_status);
> -	res_lib_votequorum_status.header.id = MESSAGE_RES_VOTEQUORUM_STATUS;
> -	res_lib_votequorum_status.header.error = error;
> -	corosync_api->ipc_response_send(conn, &res_lib_votequorum_status, sizeof(res_lib_votequorum_status));
> -
> -	LEAVE();
> -}
> -
> -static void message_handler_req_lib_votequorum_qdisk_unregister (void *conn,
> -								 const void *message)
> -{
> -	struct res_lib_votequorum_status res_lib_votequorum_status;
> -	cs_error_t error = CS_OK;
> -
> -	ENTER();
> -
> -	if (quorum_device) {
> -		struct cluster_node *node = quorum_device;
> -
> -		quorum_device = NULL;
> -		list_del(&node->list);
> -		free(node);
> -		recalculate_quorum(0, 0);
> -	} else {
> -		error = CS_ERR_NOT_EXIST;
> -	}
> -
> -	/*
> -	 * send status
> -	 */
> -	res_lib_votequorum_status.header.size = sizeof(res_lib_votequorum_status);
> -	res_lib_votequorum_status.header.id = MESSAGE_RES_VOTEQUORUM_STATUS;
> -	res_lib_votequorum_status.header.error = error;
> -	corosync_api->ipc_response_send(conn, &res_lib_votequorum_status, sizeof(res_lib_votequorum_status));
> -
> -	LEAVE();
> -}
> -
> -static void message_handler_req_lib_votequorum_qdisk_poll (void *conn,
> -							   const void *message)
> -{
> -	const struct req_lib_votequorum_qdisk_poll *req_lib_votequorum_qdisk_poll = message;
> -	struct res_lib_votequorum_status res_lib_votequorum_status;
> -	cs_error_t error = CS_OK;
> -
> -	ENTER();
> -
> -	if (quorum_device) {
> -		if (req_lib_votequorum_qdisk_poll->state) {
> -			quorum_device->last_hello = qb_util_nano_current_get ();
> -			if (quorum_device->state == NODESTATE_DEAD) {
> -				quorum_device->state = NODESTATE_MEMBER;
> -				recalculate_quorum(0, 0);
> -
> -				corosync_api->timer_add_duration((unsigned long long)quorumdev_poll*1000000, quorum_device,
> -								 quorum_device_timer_fn, &quorum_device_timer);
> -			}
> -		} else {
> -			if (quorum_device->state == NODESTATE_MEMBER) {
> -				quorum_device->state = NODESTATE_DEAD;
> -				recalculate_quorum(0, 0);
> -				corosync_api->timer_delete(quorum_device_timer);
> -			}
> -		}
> -	} else {
> -		error = CS_ERR_NOT_EXIST;
> -	}
> -
> -	/*
> -	 * send status
> -	 */
> -	res_lib_votequorum_status.header.size = sizeof(res_lib_votequorum_status);
> -	res_lib_votequorum_status.header.id = MESSAGE_RES_VOTEQUORUM_STATUS;
> -	res_lib_votequorum_status.header.error = error;
> -	corosync_api->ipc_response_send(conn, &res_lib_votequorum_status, sizeof(res_lib_votequorum_status));
> -
> -	LEAVE();
> -}
> -
> -static void message_handler_req_lib_votequorum_qdisk_getinfo (void *conn,
> -							      const void *message)
> -{
> -	struct res_lib_votequorum_qdisk_getinfo res_lib_votequorum_qdisk_getinfo;
> -	cs_error_t error = CS_OK;
> -
> -	ENTER();
> -
> -	if (quorum_device) {
> -		log_printf(LOGSYS_LEVEL_DEBUG, "got qdisk_getinfo state %d\n", quorum_device->state);
> -		res_lib_votequorum_qdisk_getinfo.votes = quorum_device->votes;
> -		if (quorum_device->state == NODESTATE_MEMBER) {
> -			res_lib_votequorum_qdisk_getinfo.state = 1;
> -		} else {
> -			res_lib_votequorum_qdisk_getinfo.state = 0;
> -		}
> -		strcpy(res_lib_votequorum_qdisk_getinfo.name, quorum_device_name);
> -	} else {
> -		error = CS_ERR_NOT_EXIST;
> -	}
> -
> -	/*
> -	 * send status
> -	 */
> -	res_lib_votequorum_qdisk_getinfo.header.size = sizeof(res_lib_votequorum_qdisk_getinfo);
> -	res_lib_votequorum_qdisk_getinfo.header.id = MESSAGE_RES_VOTEQUORUM_GETINFO;
> -	res_lib_votequorum_qdisk_getinfo.header.error = error;
> -	corosync_api->ipc_response_send(conn, &res_lib_votequorum_qdisk_getinfo, sizeof(res_lib_votequorum_qdisk_getinfo));
> -
> -	LEAVE();
> -}
> -
> -static void message_handler_req_lib_votequorum_trackstart (void *conn,
> -							   const void *message)
> -{
> -	const struct req_lib_votequorum_trackstart *req_lib_votequorum_trackstart = message;
> -	struct res_lib_votequorum_status res_lib_votequorum_status;
> -	struct quorum_pd *quorum_pd = (struct quorum_pd *)corosync_api->ipc_private_data_get (conn);
> -
> -	ENTER();
> -	/*
> -	 * If an immediate listing of the current cluster membership
> -	 * is requested, generate membership list
> -	 */
> -	if (req_lib_votequorum_trackstart->track_flags & CS_TRACK_CURRENT ||
> -	    req_lib_votequorum_trackstart->track_flags & CS_TRACK_CHANGES) {
> -		log_printf(LOGSYS_LEVEL_DEBUG, "sending initial status to %p\n", conn);
> -		send_quorum_notification(conn, req_lib_votequorum_trackstart->context);
> -	}
> -
> -	/*
> -	 * Record requests for tracking
> -	 */
> -	if (req_lib_votequorum_trackstart->track_flags & CS_TRACK_CHANGES ||
> -	    req_lib_votequorum_trackstart->track_flags & CS_TRACK_CHANGES_ONLY) {
> -
> -		quorum_pd->track_flags = req_lib_votequorum_trackstart->track_flags;
> -		quorum_pd->tracking_enabled = 1;
> -		quorum_pd->tracking_context = req_lib_votequorum_trackstart->context;
> -
> -		list_add (&quorum_pd->list, &trackers_list);
> -	}
> -
> -	/*
> -	 * Send status
> -	 */
> -	res_lib_votequorum_status.header.size = sizeof(res_lib_votequorum_status);
> -	res_lib_votequorum_status.header.id = MESSAGE_RES_VOTEQUORUM_STATUS;
> -	res_lib_votequorum_status.header.error = CS_OK;
> -	corosync_api->ipc_response_send(conn, &res_lib_votequorum_status, sizeof(res_lib_votequorum_status));
> -
> -	LEAVE();
> -}
> -
> -static void message_handler_req_lib_votequorum_trackstop (void *conn,
> -							  const void *message)
> -{
> -	struct res_lib_votequorum_status res_lib_votequorum_status;
> -	struct quorum_pd *quorum_pd = (struct quorum_pd *)corosync_api->ipc_private_data_get (conn);
> -	int error = CS_OK;
> -
> -	ENTER();
> -
> -	if (quorum_pd->tracking_enabled) {
> -		error = CS_OK;
> -		quorum_pd->tracking_enabled = 0;
> -		list_del (&quorum_pd->list);
> -		list_init (&quorum_pd->list);
> -	} else {
> -		error = CS_ERR_NOT_EXIST;
> -	}
> -
> -	/*
> -	 * send status
> -	 */
> -	res_lib_votequorum_status.header.size = sizeof(res_lib_votequorum_status);
> -	res_lib_votequorum_status.header.id = MESSAGE_RES_VOTEQUORUM_STATUS;
> -	res_lib_votequorum_status.header.error = error;
> -	corosync_api->ipc_response_send(conn, &res_lib_votequorum_status, sizeof(res_lib_votequorum_status));
> -
> -	LEAVE();
> -}
> -
> -static void reread_config(void)
> -{
> -	unsigned int old_votes;
> -	unsigned int old_expected;
> -
> -	ENTER();
> -
> -	old_votes = us->votes;
> -	old_expected = us->expected_votes;
> -
> -	/*
> -	 * Reload the configuration
> -	 */
> -	read_quorum_config();
> -
> -	/*
> -	 * Check for fundamental changes that we need to propogate
> -	 */
> -	if (old_votes != us->votes) {
> -		quorum_exec_send_reconfigure(RECONFIG_PARAM_NODE_VOTES, us->node_id, us->votes);
> -	}
> -	if (old_expected != us->expected_votes) {
> -		quorum_exec_send_reconfigure(RECONFIG_PARAM_EXPECTED_VOTES, us->node_id, us->expected_votes);
> -	}
> -
> -	LEAVE();
> -}
> -
> -static void key_change_quorum(
> -	int32_t event,
> -	const char *key_name,
> -	struct icmap_notify_value new_val,
> -	struct icmap_notify_value old_val,
> -	void *user_data)
> -{
> -	ENTER();
> -
> -	reread_config();
> -
> -	LEAVE();
> -}
> -
> -static void add_votequorum_config_notification(void)
> -{
> -	icmap_track_t icmap_track;
> -
> -	ENTER();
> -
> -	icmap_track_add("quorum.",
> -		ICMAP_TRACK_ADD | ICMAP_TRACK_DELETE | ICMAP_TRACK_MODIFY | ICMAP_TRACK_PREFIX,
> -		key_change_quorum,
> -		NULL,
> -		&icmap_track);
> -
> -	LEAVE();
> -}
> diff --git a/services/wd.c b/services/wd.c
> deleted file mode 100644
> index 9c45e32..0000000
> --- a/services/wd.c
> +++ /dev/null
> @@ -1,749 +0,0 @@
> -/*
> - * Copyright (c) 2010 Red Hat, Inc.
> - *
> - * All rights reserved.
> - *
> - * Author: Angus Salkeld <asalkeld@xxxxxxxxxx>
> - *
> - * This software licensed under BSD license, the text of which follows:
> - *
> - * Redistribution and use in source and binary forms, with or without
> - * modification, are permitted provided that the following conditions are met:
> - *
> - * - Redistributions of source code must retain the above copyright notice,
> - *   this list of conditions and the following disclaimer.
> - * - Redistributions in binary form must reproduce the above copyright notice,
> - *   this list of conditions and the following disclaimer in the documentation
> - *   and/or other materials provided with the distribution.
> - * - Neither the name of the MontaVista Software, Inc. nor the names of its
> - *   contributors may be used to endorse or promote products derived from this
> - *   software without specific prior written permission.
> - *
> - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
> - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
> - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
> - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
> - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
> - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
> - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
> - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
> - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
> - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
> - * THE POSSIBILITY OF SUCH DAMAGE.
> - */
> -
> -#include <config.h>
> -
> -#include <unistd.h>
> -#include <fcntl.h>
> -#include <sys/ioctl.h>
> -#include <linux/types.h>
> -#include <linux/watchdog.h>
> -#include <sys/reboot.h>
> -
> -#include <corosync/corotypes.h>
> -#include <corosync/corodefs.h>
> -#include <corosync/lcr/lcr_comp.h>
> -#include <corosync/coroapi.h>
> -#include <corosync/list.h>
> -#include <corosync/logsys.h>
> -#include <corosync/icmap.h>
> -#include "../exec/fsm.h"
> -
> -
> -typedef enum {
> -	WD_RESOURCE_GOOD,
> -	WD_RESOURCE_FAILED,
> -	WD_RESOURCE_STATE_UNKNOWN,
> -	WD_RESOURCE_NOT_MONITORED
> -} wd_resource_state_t;
> -
> -struct resource {
> -	char res_path[ICMAP_KEYNAME_MAXLEN];
> -	char *recovery;
> -	char name[CS_MAX_NAME_LENGTH];
> -	time_t last_updated;
> -	struct cs_fsm fsm;
> -
> -	corosync_timer_handle_t check_timer;
> -	uint64_t check_timeout;
> -	icmap_track_t icmap_track;
> -};
> -
> -LOGSYS_DECLARE_SUBSYS("WD");
> -
> -/*
> - * Service Interfaces required by service_message_handler struct
> - */
> -static int wd_exec_init_fn (
> -	struct corosync_api_v1 *corosync_api);
> -static int wd_exec_exit_fn (void);
> -static void wd_resource_check_fn (void* resource_ref);
> -
> -static struct corosync_api_v1 *api;
> -#define WD_DEFAULT_TIMEOUT_SEC 6
> -#define WD_DEFAULT_TIMEOUT_MS (WD_DEFAULT_TIMEOUT_SEC * CS_TIME_MS_IN_SEC)
> -#define WD_MIN_TIMEOUT_MS 500
> -#define WD_MAX_TIMEOUT_MS (120 * CS_TIME_MS_IN_SEC)
> -static uint32_t watchdog_timeout = WD_DEFAULT_TIMEOUT_SEC;
> -static uint64_t tickle_timeout = (WD_DEFAULT_TIMEOUT_MS / 2);
> -static int dog = -1;
> -static corosync_timer_handle_t wd_timer;
> -static int watchdog_ok = 1;
> -
> -struct corosync_service_engine wd_service_engine = {
> -	.name			= "corosync watchdog service",
> -	.id			= WD_SERVICE,
> -	.priority		= 1,
> -	.private_data_size	= 0,
> -	.flow_control		= CS_LIB_FLOW_CONTROL_NOT_REQUIRED,
> -	.lib_init_fn		= NULL,
> -	.lib_exit_fn		= NULL,
> -	.lib_engine		= NULL,
> -	.lib_engine_count	= 0,
> -	.exec_engine		= NULL,
> -	.exec_engine_count	= 0,
> -	.confchg_fn		= NULL,
> -	.exec_init_fn		= wd_exec_init_fn,
> -	.exec_exit_fn		= wd_exec_exit_fn,
> -	.exec_dump_fn		= NULL,
> -	.sync_mode		= CS_SYNC_V2
> -};
> -
> -static DECLARE_LIST_INIT (confchg_notify);
> -
> -/*
> - * F S M
> - */
> -static void wd_config_changed (struct cs_fsm* fsm, int32_t event, void * data);
> -static void wd_resource_failed (struct cs_fsm* fsm, int32_t event, void * data);
> -
> -enum wd_resource_state {
> -	WD_S_RUNNING,
> -	WD_S_FAILED,
> -	WD_S_STOPPED
> -};
> -
> -enum wd_resource_event {
> -	WD_E_FAILURE,
> -	WD_E_CONFIG_CHANGED
> -};
> -
> -const char * wd_running_str		= "running";
> -const char * wd_failed_str		= "failed";
> -const char * wd_failure_str		= "failure";
> -const char * wd_stopped_str		= "stopped";
> -const char * wd_config_changed_str	= "config_changed";
> -
> -struct cs_fsm_entry wd_fsm_table[] = {
> -	{ WD_S_STOPPED,	WD_E_CONFIG_CHANGED,	wd_config_changed,	{WD_S_STOPPED, WD_S_RUNNING, -1} },
> -	{ WD_S_STOPPED,	WD_E_FAILURE,		NULL,			{-1} },
> -	{ WD_S_RUNNING,	WD_E_CONFIG_CHANGED,	wd_config_changed,	{WD_S_RUNNING, WD_S_STOPPED, -1} },
> -	{ WD_S_RUNNING,	WD_E_FAILURE,		wd_resource_failed,	{WD_S_FAILED, -1} },
> -	{ WD_S_FAILED,	WD_E_CONFIG_CHANGED,	wd_config_changed,	{WD_S_RUNNING, WD_S_STOPPED, -1} },
> -	{ WD_S_FAILED,	WD_E_FAILURE,		NULL,			{-1} },
> -};
> -
> -/*
> - * Dynamic loading descriptor
> - */
> -
> -static struct corosync_service_engine *wd_get_service_engine_ver0 (void);
> -
> -static struct corosync_service_engine_iface_ver0 wd_service_engine_iface = {
> -	.corosync_get_service_engine_ver0	= wd_get_service_engine_ver0
> -};
> -
> -static struct lcr_iface corosync_wd_ver0[1] = {
> -	{
> -		.name			= "corosync_wd",
> -		.version		= 0,
> -		.versions_replace	= 0,
> -		.versions_replace_count = 0,
> -		.dependencies		= 0,
> -		.dependency_count	= 0,
> -		.constructor		= NULL,
> -		.destructor		= NULL,
> -		.interfaces		= NULL,
> -	}
> -};
> -
> -static struct lcr_comp wd_comp_ver0 = {
> -	.iface_count	= 1,
> -	.ifaces		= corosync_wd_ver0
> -};
> -
> -static struct corosync_service_engine *wd_get_service_engine_ver0 (void)
> -{
> -	return (&wd_service_engine);
> -}
> -
> -#ifdef COROSYNC_SOLARIS
> -void corosync_lcr_component_register (void);
> -
> -void corosync_lcr_component_register (void) {
> -#else
> -__attribute__ ((constructor)) static void corosync_lcr_component_register (void) {
> -#endif
> -	lcr_interfaces_set (&corosync_wd_ver0[0], &wd_service_engine_iface);
> -
> -	lcr_component_register (&wd_comp_ver0);
> -}
> -
> -static const char * wd_res_state_to_str(struct cs_fsm* fsm,
> -	int32_t state)
> -{
> -	switch (state) {
> -	case WD_S_STOPPED:
> -		return wd_stopped_str;
> -		break;
> -	case WD_S_RUNNING:
> -		return wd_running_str;
> -		break;
> -	case WD_S_FAILED:
> -		return wd_failed_str;
> -		break;
> -	}
> -	return NULL;
> -}
> -
> -static const char * wd_res_event_to_str(struct cs_fsm* fsm,
> -	int32_t event)
> -{
> -	switch (event) {
> -	case WD_E_CONFIG_CHANGED:
> -		return wd_config_changed_str;
> -		break;
> -	case WD_E_FAILURE:
> -		return wd_failure_str;
> -		break;
> -	}
> -	return NULL;
> -}
> -
> -/*
> - * returns (CS_TRUE == OK, CS_FALSE == failed)
> - */
> -static int32_t wd_resource_state_is_ok (struct resource *ref)
> -{
> -	char* state;
> -	uint64_t last_updated;
> -	uint64_t my_time;
> -	uint64_t allowed_period;
> -	char key_name[ICMAP_KEYNAME_MAXLEN];
> -
> -	snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "%s%s", ref->res_path, "last_updated");
> -	if (icmap_get_uint64(key_name, &last_updated) != CS_OK) {
> -		/* key does not exist.
> -		*/
> -		return CS_FALSE;
> -	}
> -
> -	snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "%s%s", ref->res_path, "state");
> -	if (icmap_get_string(key_name, &state) != CS_OK || strcmp(state, "disabled") == 0) {
> -		/* key does not exist.
> -		*/
> -		return CS_FALSE;
> -	}
> -
> -	if (last_updated == 0) {
> -		/* initial value */
> -		free(state);
> -		return CS_TRUE;
> -	}
> -
> -	my_time = cs_timestamp_get();
> -
> -	/*
> -	 * Here we check that the monitor has written a timestamp within the poll_period
> -	 * plus a grace factor of (0.5 * poll_period).
> -	 */
> -	allowed_period = (ref->check_timeout * MILLI_2_NANO_SECONDS * 3) / 2;
> -	if ((last_updated + allowed_period) < my_time) {
> -		log_printf (LOGSYS_LEVEL_ERROR,
> -			"last_updated %"PRIu64" ms too late, period:%"PRIu64".",
> -			(uint64_t)(my_time/MILLI_2_NANO_SECONDS - ((last_updated + allowed_period) / MILLI_2_NANO_SECONDS)),
> -			ref->check_timeout);
> -		return CS_FALSE;
> -	}
> -
> -	if (strcmp (state, wd_failed_str) == 0) {
> -		free(state);
> -		return CS_FALSE;
> -	}
> -
> -	free(state);
> -	return CS_TRUE;
> -}
> -
> -static void wd_config_changed (struct cs_fsm* fsm, int32_t event, void * data)
> -{
> -	char *state;
> -	uint64_t tmp_value;
> -	uint64_t next_timeout;
> -	struct resource *ref = (struct resource*)data;
> -	char key_name[ICMAP_KEYNAME_MAXLEN];
> -
> -	next_timeout = ref->check_timeout;
> -
> -	snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "%s%s", ref->res_path, "poll_period");
> -	if (icmap_get_uint64(ref->res_path, &tmp_value) == CS_OK) {
> -		if (tmp_value >= WD_MIN_TIMEOUT_MS && tmp_value <= WD_MAX_TIMEOUT_MS) {
> -			log_printf (LOGSYS_LEVEL_DEBUG,
> -				"poll_period changing from:%"PRIu64" to %"PRIu64".",
> -				ref->check_timeout, tmp_value);
> -			/*
> -			 * To easy in the transition between poll_period's we are going
> -			 * to make the first timeout the bigger of the new and old value.
> -			 * This is to give the monitoring system time to adjust.
> -			 */
> -			next_timeout = CS_MAX(tmp_value, ref->check_timeout);
> -			ref->check_timeout = tmp_value;
> -		} else {
> -			log_printf (LOGSYS_LEVEL_WARNING,
> -				"Could NOT use poll_period:%"PRIu64" ms for resource %s",
> -				tmp_value, ref->name);
> -		}
> -	}
> -
> -	snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "%s%s", ref->res_path, "recovery");
> -	if (icmap_get_string(key_name, &ref->recovery) != CS_OK) {
> -		/* key does not exist.
> -		 */
> -		log_printf (LOGSYS_LEVEL_WARNING,
> -			"resource %s missing a recovery key.", ref->name);
> -		cs_fsm_state_set(&ref->fsm, WD_S_STOPPED, ref);
> -		return;
> -	}
> -	snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "%s%s", ref->res_path, "state");
> -	if (icmap_get_string(key_name, &state) != CS_OK) {
> -		/* key does not exist.
> -		*/
> -		log_printf (LOGSYS_LEVEL_WARNING,
> -			"resource %s missing a state key.", ref->name);
> -		cs_fsm_state_set(&ref->fsm, WD_S_STOPPED, ref);
> -		return;
> -	}
> -	if (ref->check_timer) {
> -		api->timer_delete(ref->check_timer);
> -		ref->check_timer = 0;
> -	}
> -
> -	if (strcmp(wd_stopped_str, state) == 0) {
> -		cs_fsm_state_set(&ref->fsm, WD_S_STOPPED, ref);
> -	} else {
> -		api->timer_add_duration(next_timeout * MILLI_2_NANO_SECONDS,
> -			ref, wd_resource_check_fn, &ref->check_timer);
> -		cs_fsm_state_set(&ref->fsm, WD_S_RUNNING, ref);
> -	}
> -	free(state);
> -}
> -
> -static void wd_resource_failed (struct cs_fsm* fsm, int32_t event, void * data)
> -{
> -	struct resource* ref = (struct resource*)data;
> -
> -	if (ref->check_timer) {
> -		api->timer_delete(ref->check_timer);
> -		ref->check_timer = 0;
> -	}
> -
> -	log_printf (LOGSYS_LEVEL_CRIT, "%s resource \"%s\" failed!",
> -		ref->recovery, (char*)ref->name);
> -	if (strcmp (ref->recovery, "watchdog") == 0 ||
> -	    strcmp (ref->recovery, "quit") == 0) {
> -		watchdog_ok = 0;
> -	}
> -	else if (strcmp (ref->recovery, "reboot") == 0) {
> -		reboot(RB_AUTOBOOT);
> -	}
> -	else if (strcmp (ref->recovery, "shutdown") == 0) {
> -		reboot(RB_POWER_OFF);
> -	}
> -	cs_fsm_state_set(fsm, WD_S_FAILED, data);
> -}
> -
> -static void wd_key_changed(
> -	int32_t event,
> -	const char *key_name,
> -	struct icmap_notify_value new_val,
> -	struct icmap_notify_value old_val,
> -	void *user_data)
> -{
> -	struct resource* ref = (struct resource*)user_data;
> -	char *last_key_part;
> -
> -	if (ref == NULL) {
> -		return ;
> -	}
> -
> -	last_key_part = strrchr(key_name, '.');
> -	if (last_key_part == NULL) {
> -		return ;
> -	}
> -	last_key_part++;
> -
> -	if (event == ICMAP_TRACK_ADD || event == ICMAP_TRACK_MODIFY) {
> -		if (strcmp(last_key_part, "last_updated") == 0 ||
> -			strcmp(last_key_part, "current") == 0) {
> -			return;
> -		}
> -
> -		cs_fsm_process(&ref->fsm, WD_E_CONFIG_CHANGED, ref);
> -	}
> -
> -	if (event == ICMAP_TRACK_DELETE && ref != NULL) {
> -		if (strcmp(last_key_part, "state") != 0) {
> -			return ;
> -		}
> -
> -		log_printf (LOGSYS_LEVEL_WARNING,
> -			"resource \"%s\" deleted from cmap!",
> -			ref->name);
> -
> -		api->timer_delete(ref->check_timer);
> -		ref->check_timer = 0;
> -		icmap_track_delete(ref->icmap_track);
> -
> -		free(ref);
> -	}
> -}
> -
> -static void wd_resource_check_fn (void* resource_ref)
> -{
> -	struct resource* ref = (struct resource*)resource_ref;
> -
> -	if (wd_resource_state_is_ok (ref) == CS_FALSE) {
> -		cs_fsm_process(&ref->fsm, WD_E_FAILURE, ref);
> -		return;
> -	}
> -	api->timer_add_duration(ref->check_timeout*MILLI_2_NANO_SECONDS,
> -		ref, wd_resource_check_fn, &ref->check_timer);
> -}
> -
> -/*
> - * return 0   - fully configured
> - * return -1  - partially configured
> - */
> -static int32_t wd_resource_create (char *res_path, char *res_name)
> -{
> -	char *state;
> -	uint64_t tmp_value;
> -	struct resource *ref = malloc (sizeof (struct resource));
> -	char key_name[ICMAP_KEYNAME_MAXLEN];
> -
> -	strcpy(ref->res_path, res_path);
> -	ref->check_timeout = WD_DEFAULT_TIMEOUT_MS;
> -	ref->check_timer = 0;
> -
> -	strcpy(ref->name, res_name);
> -	ref->fsm.name = ref->name;
> -	ref->fsm.table = wd_fsm_table;
> -	ref->fsm.entries = sizeof(wd_fsm_table) / sizeof(struct cs_fsm_entry);
> -	ref->fsm.curr_entry = 0;
> -	ref->fsm.curr_state = WD_S_STOPPED;
> -	ref->fsm.state_to_str = wd_res_state_to_str;
> -	ref->fsm.event_to_str = wd_res_event_to_str;
> -
> -	snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "%s%s", res_path, "poll_period");
> -	if (icmap_get_uint64(key_name, &tmp_value) != CS_OK) {
> -		icmap_set_uint64(key_name, ref->check_timeout);
> -	} else {
> -		if (tmp_value >= WD_MIN_TIMEOUT_MS && tmp_value <= WD_MAX_TIMEOUT_MS) {
> -			ref->check_timeout = tmp_value;
> -		} else {
> -			log_printf (LOGSYS_LEVEL_WARNING,
> -				"Could NOT use poll_period:%"PRIu64" ms for resource %s",
> -				tmp_value, ref->name);
> -		}
> -	}
> -
> -	icmap_track_add(res_path,
> -			ICMAP_TRACK_ADD | ICMAP_TRACK_MODIFY | ICMAP_TRACK_DELETE | ICMAP_TRACK_PREFIX,
> -			wd_key_changed,
> -			ref, &ref->icmap_track);
> -
> -	snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "%s%s", res_path, "recovery");
> -	if (icmap_get_string(key_name, &ref->recovery) != CS_OK) {
> -		/* key does not exist.
> -		 */
> -		log_printf (LOGSYS_LEVEL_WARNING,
> -			"resource %s missing a recovery key.", ref->name);
> -		return -1;
> -	}
> -	snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "%s%s", res_path, "state");
> -	if (icmap_get_string(key_name, &state) != CS_OK) {
> -		/* key does not exist.
> -		*/
> -		log_printf (LOGSYS_LEVEL_WARNING,
> -			"resource %s missing a state key.", ref->name);
> -		return -1;
> -	}
> -
> -	snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "%s%s", res_path, "last_updated");
> -	if (icmap_get_uint64(key_name, &tmp_value) != CS_OK) {
> -		/* key does not exist.
> -		 */
> -		ref->last_updated = 0;
> -	} else {
> -		ref->last_updated = tmp_value;
> -	}
> -
> -	/*
> -	 * delay the first check to give the monitor time to start working.
> -	 */
> -	tmp_value = CS_MAX(ref->check_timeout * 2, WD_DEFAULT_TIMEOUT_MS);
> -	api->timer_add_duration(tmp_value * MILLI_2_NANO_SECONDS,
> -		ref,
> -		wd_resource_check_fn, &ref->check_timer);
> -
> -	cs_fsm_state_set(&ref->fsm, WD_S_RUNNING, ref);
> -	return 0;
> -}
> -
> -
> -static void wd_tickle_fn (void* arg)
> -{
> -	ENTER();
> -
> -	if (watchdog_ok) {
> -		if (dog > 0) {
> -			ioctl(dog, WDIOC_KEEPALIVE, &watchdog_ok);
> -		}
> -		api->timer_add_duration(tickle_timeout*MILLI_2_NANO_SECONDS, NULL,
> -			wd_tickle_fn, &wd_timer);
> -	}
> -	else {
> -		log_printf (LOGSYS_LEVEL_ALERT, "NOT tickling the watchdog!");
> -	}
> -
> -}
> -
> -static void wd_resource_created_cb(
> -	int32_t event,
> -	const char *key_name,
> -	struct icmap_notify_value new_val,
> -	struct icmap_notify_value old_val,
> -	void *user_data)
> -{
> -	char res_name[ICMAP_KEYNAME_MAXLEN];
> -	char res_type[ICMAP_KEYNAME_MAXLEN];
> -	char tmp_key[ICMAP_KEYNAME_MAXLEN];
> -	int res;
> -
> -	if (event != ICMAP_TRACK_ADD) {
> -		return ;
> -	}
> -
> -	res = sscanf(key_name, "resources.%[^.].%[^.].%[^.]", res_type, res_name, tmp_key);
> -	if (res != 3) {
> -		return ;
> -	}
> -
> -	if (strcmp(tmp_key, "state") != 0) {
> -		return ;
> -	}
> -
> -	snprintf(tmp_key, ICMAP_KEYNAME_MAXLEN, "resources.%s.%s.", res_type, res_name);
> -	wd_resource_create (tmp_key, res_name);
> -}
> -
> -static void wd_scan_resources (void)
> -{
> -	int res_count = 0;
> -	icmap_track_t icmap_track;
> -	icmap_iter_t iter;
> -	const char *key_name;
> -	int res;
> -	char res_name[ICMAP_KEYNAME_MAXLEN];
> -	char res_type[ICMAP_KEYNAME_MAXLEN];
> -	char tmp_key[ICMAP_KEYNAME_MAXLEN];
> -
> -	ENTER();
> -
> -	iter = icmap_iter_init("resources.");
> -	while ((key_name = icmap_iter_next(iter, NULL, NULL)) != NULL) {
> -		res = sscanf(key_name, "resources.%[^.].%[^.].%[^.]", res_type, res_name, tmp_key);
> -		if (res != 3) {
> -			continue ;
> -		}
> -
> -		if (strcmp(tmp_key, "state") != 0) {
> -			continue ;
> -		}
> -
> -		snprintf(tmp_key, ICMAP_KEYNAME_MAXLEN, "resources.%s.%s.", res_type, res_name);
> -		if (wd_resource_create (tmp_key, res_name) == 0) {
> -			res_count++;
> -		}
> -	}
> -	icmap_iter_finalize(iter);
> -
> -	icmap_track_add("resources.process.", ICMAP_TRACK_ADD | ICMAP_TRACK_PREFIX,
> -			wd_resource_created_cb, NULL, &icmap_track);
> -	icmap_track_add("resources.system.", ICMAP_TRACK_ADD | ICMAP_TRACK_PREFIX,
> -			wd_resource_created_cb, NULL, &icmap_track);
> -
> -	if (res_count == 0) {
> -		log_printf (LOGSYS_LEVEL_INFO, "no resources configured.");
> -	}
> -}
> -
> -
> -static void watchdog_timeout_apply (uint32_t new)
> -{
> -	struct watchdog_info ident;
> -	uint32_t original_timeout = watchdog_timeout;
> -
> -	if (new == original_timeout) {
> -		return;
> -	}
> -
> -	watchdog_timeout = new;
> -
> -	if (dog > 0) {
> -		ioctl(dog, WDIOC_GETSUPPORT, &ident);
> -		if (ident.options & WDIOF_SETTIMEOUT) {
> -			/* yay! the dog is trained.
> -			 */
> -			ioctl(dog, WDIOC_SETTIMEOUT, &watchdog_timeout);
> -		}
> -		ioctl(dog, WDIOC_GETTIMEOUT, &watchdog_timeout);
> -	}
> -
> -	if (watchdog_timeout == new) {
> -		tickle_timeout = (watchdog_timeout * CS_TIME_MS_IN_SEC)/ 2;
> -
> -		/* reset the tickle timer in case it was reduced.
> -		 */
> -		api->timer_delete (wd_timer);
> -		api->timer_add_duration(tickle_timeout*MILLI_2_NANO_SECONDS, NULL,
> -			wd_tickle_fn, &wd_timer);
> -
> -		log_printf (LOGSYS_LEVEL_DEBUG, "The Watchdog timeout is %d seconds\n", watchdog_timeout);
> -		log_printf (LOGSYS_LEVEL_DEBUG, "The tickle timeout is %"PRIu64" ms\n", tickle_timeout);
> -	} else {
> -		log_printf (LOGSYS_LEVEL_WARNING,
> -			"Could not change the Watchdog timeout from %d to %d seconds\n",
> -			original_timeout, new);
> -	}
> -
> -}
> -
> -static int setup_watchdog(void)
> -{
> -	struct watchdog_info ident;
> -
> -	ENTER();
> -	if (access ("/dev/watchdog", W_OK) != 0) {
> -		log_printf (LOGSYS_LEVEL_WARNING, "No Watchdog, try modprobe <a watchdog>");
> -		dog = -1;
> -		return -1;
> -	}
> -
> -	/* here goes, lets hope they have "Magic Close"
> -	 */
> -	dog = open("/dev/watchdog", O_WRONLY);
> -
> -	if (dog == -1) {
> -		log_printf (LOGSYS_LEVEL_WARNING, "Watchdog exists but couldn't be opened.");
> -		dog = -1;
> -		return -1;
> -	}
> -
> -	/* Right we have the dog.
> -	 * Lets see what breed it is.
> -	 */
> -
> -	ioctl(dog, WDIOC_GETSUPPORT, &ident);
> -	log_printf (LOGSYS_LEVEL_INFO, "Watchdog is now been tickled by corosync.");
> -	log_printf (LOGSYS_LEVEL_DEBUG, "%s", ident.identity);
> -
> -	watchdog_timeout_apply (watchdog_timeout);
> -
> -	ioctl(dog, WDIOC_SETOPTIONS, WDIOS_ENABLECARD);
> -
> -	return 0;
> -}
> -
> -static void wd_top_level_key_changed(
> -	int32_t event,
> -	const char *key_name,
> -	struct icmap_notify_value new_val,
> -	struct icmap_notify_value old_val,
> -	void *user_data)
> -{
> -	uint32_t tmp_value_32;
> -
> -	ENTER();
> -
> -	if (icmap_get_uint32("resources.watchdog_timeout", &tmp_value_32) != CS_OK) {
> -		if (tmp_value_32 >= 2 && tmp_value_32 <= 120) {
> -			watchdog_timeout_apply (tmp_value_32);
> -		}
> -	}
> -	else {
> -		watchdog_timeout_apply (WD_DEFAULT_TIMEOUT_SEC);
> -	}
> -}
> -
> -static void watchdog_timeout_get_initial (void)
> -{
> -	uint32_t tmp_value_32;
> -	icmap_track_t icmap_track;
> -
> -	ENTER();
> -
> -	if (icmap_get_uint32("resources.watchdog_timeout", &tmp_value_32) != CS_OK) {
> -		watchdog_timeout_apply (WD_DEFAULT_TIMEOUT_SEC);
> -
> -		icmap_set_uint32("resources.watchdog_timeout", watchdog_timeout);
> -	}
> -	else {
> -		if (tmp_value_32 >= 2 && tmp_value_32 <= 120) {
> -			watchdog_timeout_apply (tmp_value_32);
> -		} else {
> -			watchdog_timeout_apply (WD_DEFAULT_TIMEOUT_SEC);
> -		}
> -	}
> -
> -	icmap_track_add("resources.watchdog_timeout", ICMAP_TRACK_MODIFY,
> -			wd_top_level_key_changed, NULL, &icmap_track);
> -
> -}
> -
> -static int wd_exec_init_fn (
> -	struct corosync_api_v1 *corosync_api)
> -{
> -
> -	ENTER();
> -#ifdef COROSYNC_SOLARIS
> -	logsys_subsys_init();
> -#endif
> -	api = corosync_api;
> -
> -	watchdog_timeout_get_initial();
> -
> -	setup_watchdog();
> -
> -	wd_scan_resources();
> -
> -	api->timer_add_duration(tickle_timeout*MILLI_2_NANO_SECONDS, NULL,
> -				wd_tickle_fn, &wd_timer);
> -
> -	return 0;
> -}
> -
> -static int wd_exec_exit_fn (void)
> -{
> -	char magic = 'V';
> -	ENTER();
> -
> -	if (dog > 0) {
> -		log_printf (LOGSYS_LEVEL_INFO, "magically closing the watchdog.");
> -		write (dog, &magic, 1);
> -	}
> -	return 0;
> -}
> -
> -

_______________________________________________
discuss mailing list
discuss@xxxxxxxxxxxx
http://lists.corosync.org/mailman/listinfo/discuss


[Index of Archives]     [Linux Clusters]     [Corosync Project]     [Linux USB Devel]     [Linux Audio Users]     [Photo]     [Yosemite News]    [Yosemite Photos]    [Linux Kernel]     [Linux SCSI]     [X.Org]

  Powered by Linux