Reviewed-by: Steven Dake <sdake@xxxxxxxxxx> On 08/09/2012 04:50 AM, Jan Friesse wrote: > Multithreaded corosync used to use many ugly workarounds. One of them is > shutdown process, where we had to solve problem with two locks. This was > solved by scheduling jobs between service exit_fn call and actual > service unload. Sadly this can cause to receive message from other node > in that meantime causing corosync to segfault on exit. > > Because corosync is now single threaded, we don't need such hacks any > longer. > > Signed-off-by: Jan Friesse <jfriesse@xxxxxxxxxx> > --- > exec/ipc_glue.c | 1 - > exec/service.c | 64 ++++++++++-------------------------------------------- > exec/service.h | 2 - > 3 files changed, 12 insertions(+), 55 deletions(-) > > diff --git a/exec/ipc_glue.c b/exec/ipc_glue.c > index 23123fa..ef04899 100644 > --- a/exec/ipc_glue.c > +++ b/exec/ipc_glue.c > @@ -175,7 +175,6 @@ static int32_t cs_ipcs_connection_accept (qb_ipcs_connection_t *c, uid_t euid, g > } > > if (corosync_service[service] == NULL || > - corosync_service_exiting[service] || > ipcs_mapper[service].inst == NULL) { > return -ENOSYS; > } > diff --git a/exec/service.c b/exec/service.c > index f22a0af..eaacfbb 100644 > --- a/exec/service.c > +++ b/exec/service.c > @@ -112,8 +112,6 @@ struct corosync_service_engine *corosync_service[SERVICES_COUNT_MAX]; > const char *service_stats_rx[SERVICES_COUNT_MAX][SERVICE_HANDLER_MAXIMUM_COUNT]; > const char *service_stats_tx[SERVICES_COUNT_MAX][SERVICE_HANDLER_MAXIMUM_COUNT]; > > -int corosync_service_exiting[SERVICES_COUNT_MAX]; > - > static void (*service_unlink_all_complete) (void) = NULL; > > char *corosync_service_link_and_init ( > @@ -194,7 +192,7 @@ static int service_priority_max(void) > * use the force > */ > static unsigned int > -corosync_service_unlink_priority ( > +corosync_service_unlink_and_exit_priority ( > struct corosync_api_v1 *corosync_api, > int lowest_priority, > int *current_priority, > @@ -229,7 +227,16 @@ corosync_service_unlink_priority ( > } > } > > - corosync_service_exiting[*current_service_engine] = 1; > + /* > + * Exit all ipc connections dependent on this service > + */ > + cs_ipcs_service_destroy (*current_service_engine); > + > + log_printf(LOGSYS_LEVEL_NOTICE, > + "Service engine unloaded: %s", > + corosync_service[*current_service_engine]->name); > + > + corosync_service[*current_service_engine] = NULL; > > /* > * Call should call this function again > @@ -349,43 +356,6 @@ unsigned int corosync_service_defaults_link_and_init (struct corosync_api_v1 *co > return (0); > } > > -/* > - * Declaration of exit_schedwrk_handler, because of cycle > - * (service_exit_schedwrk_handler calls service_unlink_schedwrk_handler, and vice-versa) > - */ > -static void service_exit_schedwrk_handler (void *data); > - > -static void service_unlink_schedwrk_handler (void *data) { > - struct seus_handler_data *cb_data = (struct seus_handler_data *)data; > - > - /* > - * Exit all ipc connections dependent on this service > - */ > - if (cs_ipcs_service_destroy (cb_data->service_engine) == -1) { > - goto redo_this_function; > - } > - > - log_printf(LOGSYS_LEVEL_NOTICE, > - "Service engine unloaded: %s", > - corosync_service[cb_data->service_engine]->name); > - > - corosync_service[cb_data->service_engine] = NULL; > - > - qb_loop_job_add(cs_poll_handle_get(), > - QB_LOOP_HIGH, > - data, > - service_exit_schedwrk_handler); > - > - return; > - > - redo_this_function: > - qb_loop_job_add(cs_poll_handle_get(), > - QB_LOOP_HIGH, > - data, > - service_unlink_schedwrk_handler); > - > -} > - > static void service_exit_schedwrk_handler (void *data) { > int res; > static int current_priority = 0; > @@ -401,7 +371,7 @@ static void service_exit_schedwrk_handler (void *data) { > called = 1; > } > > - res = corosync_service_unlink_priority ( > + res = corosync_service_unlink_and_exit_priority ( > api, > 0, > ¤t_priority, > @@ -411,16 +381,6 @@ static void service_exit_schedwrk_handler (void *data) { > return; > } > > - if (res == 1) { > - cb_data->service_engine = current_service_engine; > - > - qb_loop_job_add(cs_poll_handle_get(), > - QB_LOOP_HIGH, > - data, > - service_unlink_schedwrk_handler); > - return; > - } > - > qb_loop_job_add(cs_poll_handle_get(), > QB_LOOP_HIGH, > data, > diff --git a/exec/service.h b/exec/service.h > index 1c41343..12fd751 100644 > --- a/exec/service.h > +++ b/exec/service.h > @@ -75,8 +75,6 @@ extern unsigned int corosync_service_defaults_link_and_init ( > > extern struct corosync_service_engine *corosync_service[]; > > -extern int corosync_service_exiting[]; > - > extern const char *service_stats_rx[SERVICES_COUNT_MAX][SERVICE_HANDLER_MAXIMUM_COUNT]; > extern const char *service_stats_tx[SERVICES_COUNT_MAX][SERVICE_HANDLER_MAXIMUM_COUNT]; > > _______________________________________________ discuss mailing list discuss@xxxxxxxxxxxx http://lists.corosync.org/mailman/listinfo/discuss