service: remove leftovers from mt corosync

Multithreaded corosync used to use many ugly workarounds. One of them is
shutdown process, where we had to solve problem with two locks. This was
solved by scheduling jobs between service exit_fn call and actual
service unload. Sadly this can cause to receive message from other node
in that meantime causing corosync to segfault on exit.

Because corosync is now single threaded, we don't need such hacks any
longer.

Signed-off-by: Jan Friesse <jfriesse@redhat.com>
Reviewed-by: Fabio M. Di Nitto <fdinitto@redhat.com>
This commit is contained in:
Jan Friesse 2012-08-07 17:33:03 +02:00
parent fefdc2db87
commit dfe34d330c
3 changed files with 12 additions and 55 deletions

View File

@ -175,7 +175,6 @@ static int32_t cs_ipcs_connection_accept (qb_ipcs_connection_t *c, uid_t euid, g
}
if (corosync_service[service] == NULL ||
corosync_service_exiting[service] ||
ipcs_mapper[service].inst == NULL) {
return -ENOSYS;
}

View File

@ -112,8 +112,6 @@ struct corosync_service_engine *corosync_service[SERVICES_COUNT_MAX];
const char *service_stats_rx[SERVICES_COUNT_MAX][SERVICE_HANDLER_MAXIMUM_COUNT];
const char *service_stats_tx[SERVICES_COUNT_MAX][SERVICE_HANDLER_MAXIMUM_COUNT];
int corosync_service_exiting[SERVICES_COUNT_MAX];
static void (*service_unlink_all_complete) (void) = NULL;
char *corosync_service_link_and_init (
@ -194,7 +192,7 @@ static int service_priority_max(void)
* use the force
*/
static unsigned int
corosync_service_unlink_priority (
corosync_service_unlink_and_exit_priority (
struct corosync_api_v1 *corosync_api,
int lowest_priority,
int *current_priority,
@ -229,7 +227,16 @@ corosync_service_unlink_priority (
}
}
corosync_service_exiting[*current_service_engine] = 1;
/*
* Exit all ipc connections dependent on this service
*/
cs_ipcs_service_destroy (*current_service_engine);
log_printf(LOGSYS_LEVEL_NOTICE,
"Service engine unloaded: %s",
corosync_service[*current_service_engine]->name);
corosync_service[*current_service_engine] = NULL;
/*
* Call should call this function again
@ -349,43 +356,6 @@ unsigned int corosync_service_defaults_link_and_init (struct corosync_api_v1 *co
return (0);
}
/*
* Declaration of exit_schedwrk_handler, because of cycle
* (service_exit_schedwrk_handler calls service_unlink_schedwrk_handler, and vice-versa)
*/
static void service_exit_schedwrk_handler (void *data);
static void service_unlink_schedwrk_handler (void *data) {
struct seus_handler_data *cb_data = (struct seus_handler_data *)data;
/*
* Exit all ipc connections dependent on this service
*/
if (cs_ipcs_service_destroy (cb_data->service_engine) == -1) {
goto redo_this_function;
}
log_printf(LOGSYS_LEVEL_NOTICE,
"Service engine unloaded: %s",
corosync_service[cb_data->service_engine]->name);
corosync_service[cb_data->service_engine] = NULL;
qb_loop_job_add(cs_poll_handle_get(),
QB_LOOP_HIGH,
data,
service_exit_schedwrk_handler);
return;
redo_this_function:
qb_loop_job_add(cs_poll_handle_get(),
QB_LOOP_HIGH,
data,
service_unlink_schedwrk_handler);
}
static void service_exit_schedwrk_handler (void *data) {
int res;
static int current_priority = 0;
@ -401,7 +371,7 @@ static void service_exit_schedwrk_handler (void *data) {
called = 1;
}
res = corosync_service_unlink_priority (
res = corosync_service_unlink_and_exit_priority (
api,
0,
&current_priority,
@ -411,16 +381,6 @@ static void service_exit_schedwrk_handler (void *data) {
return;
}
if (res == 1) {
cb_data->service_engine = current_service_engine;
qb_loop_job_add(cs_poll_handle_get(),
QB_LOOP_HIGH,
data,
service_unlink_schedwrk_handler);
return;
}
qb_loop_job_add(cs_poll_handle_get(),
QB_LOOP_HIGH,
data,

View File

@ -75,8 +75,6 @@ extern unsigned int corosync_service_defaults_link_and_init (
extern struct corosync_service_engine *corosync_service[];
extern int corosync_service_exiting[];
extern const char *service_stats_rx[SERVICES_COUNT_MAX][SERVICE_HANDLER_MAXIMUM_COUNT];
extern const char *service_stats_tx[SERVICES_COUNT_MAX][SERVICE_HANDLER_MAXIMUM_COUNT];