This patch reworks IPC to use threads instead of the main poll loop

git-svn-id: http://svn.fedorahosted.org/svn/corosync/trunk@1011 fd59a12c-fef9-0310-b244-a6a79926bd2f
This commit is contained in:
Steven Dake 2006-04-27 01:39:10 +00:00
parent 38151d8e0c
commit 3c7f8b7c05
14 changed files with 166 additions and 896 deletions

View File

@ -58,9 +58,9 @@ LCR_OBJS = evs.o clm.o amf.o ckpt.o evt.o lck.o msg.o cfg.o cpg.o amfconfig.o ai
# main executive objects
MAIN_SRC = main.c print.c mempool.c \
util.c sync.c ykd.c service.c totemconfig.c mainconfig.c
util.c sync.c ykd.c service.c ipc.c totemconfig.c mainconfig.c
MAIN_OBJS = main.o print.o mempool.o \
util.o sync.o service.o totemconfig.o mainconfig.o ../lcr/lcr_ifact.o
util.o sync.o service.o ipc.o totemconfig.o mainconfig.o ../lcr/lcr_ifact.o
OTHER_OBJS = objdb.o
ifeq (${BUILD_DYNAMIC}, 1)

View File

@ -57,6 +57,7 @@ struct poll_instance {
struct pollfd *ufds;
int poll_entry_count;
struct timerlist timerlist;
pthread_mutex_t *serialize;
};
/*
@ -68,7 +69,7 @@ static struct hdb_handle_database poll_instance_database = {
.iterator = 0
};
poll_handle poll_create (void)
poll_handle poll_create (pthread_mutex_t *serialize)
{
poll_handle handle;
struct poll_instance *poll_instance;
@ -88,6 +89,7 @@ poll_handle poll_create (void)
poll_instance->poll_entries = 0;
poll_instance->ufds = 0;
poll_instance->poll_entry_count = 0;
poll_instance->serialize = serialize;
timerlist_init (&poll_instance->timerlist);
return (handle);
@ -386,7 +388,9 @@ int poll_run (
&poll_instance->poll_entries[i].ufd,
sizeof (struct pollfd));
}
pthread_mutex_lock (poll_instance->serialize);
timeout = timerlist_timeout_msec (&poll_instance->timerlist);
pthread_mutex_unlock (poll_instance->serialize);
retry_poll:
res = poll (poll_instance->ufds,
@ -402,6 +406,7 @@ retry_poll:
for (i = 0; i < poll_entry_count; i++) {
if (poll_instance->ufds[i].fd != -1 &&
poll_instance->ufds[i].revents) {
pthread_mutex_lock (poll_instance->serialize);
res = poll_instance->poll_entries[i].dispatch_fn (handle,
poll_instance->ufds[i].fd,
@ -415,6 +420,7 @@ retry_poll:
if (res == -1) {
poll_instance->poll_entries[i].ufd.fd = -1; /* empty entry */
}
pthread_mutex_unlock (poll_instance->serialize);
}
}
timerlist_expire (&poll_instance->timerlist);

View File

@ -37,7 +37,7 @@
typedef void * poll_timer_handle;
typedef unsigned int poll_handle;
poll_handle poll_create (void);
poll_handle poll_create (pthread_mutex_t *mutex);
int poll_destroy (poll_handle poll_handle);

View File

@ -1838,6 +1838,7 @@ deliver_event(struct event_data *evt,
}
}
assert (esip->esi_nevents >= 0);
if (!esip->esi_queue_blocked &&
(esip->esi_nevents >= evt_delivery_queue_size)) {
log_printf(LOG_LEVEL_DEBUG, "block\n");
@ -2106,6 +2107,11 @@ static int evt_lib_init(void *conn)
*/
memset(libevt_pd, 0, sizeof(*libevt_pd));
/*
* Initialize the open channel handle database.
*/
hdb_create(&libevt_pd->esi_hdb);
/*
* list of channels open on this instance
*/
@ -3013,6 +3019,11 @@ static int evt_lib_exit(void *conn)
}
}
/*
* Destroy the open channel handle database
*/
hdb_destroy(&esip->esi_hdb);
return 0;
}

File diff suppressed because it is too large Load Diff

View File

@ -65,7 +65,8 @@ struct object_instance {
static struct hdb_handle_database object_instance_database = {
.handle_count = 0,
.handles = 0,
.iterator = 0
.iterator = 0,
.mutex = PTHREAD_MUTEX_INITIALIZER
};
static int objdb_init (void)

View File

@ -202,7 +202,8 @@ static struct totem_ip_address localhost;
static struct hdb_handle_database totemnet_instance_database = {
.handle_count = 0,
.handles = 0,
.iterator = 0
.iterator = 0,
.mutex = PTHREAD_MUTEX_INITIALIZER
};
static void totemnet_instance_initialize (struct totemnet_instance *instance)

View File

@ -203,7 +203,8 @@ struct totempg_group_instance {
static struct hdb_handle_database totempg_groups_instance_database = {
.handle_count = 0,
.handles = 0,
.iterator = 0
.iterator = 0,
.mutex = PTHREAD_MUTEX_INITIALIZER
};
static int send_ok (int msg_size);
@ -570,6 +571,8 @@ static void totempg_deliver_fn (
void *callback_token_received_handle;
pthread_mutex_t mcast_msg_mutex = PTHREAD_MUTEX_INITIALIZER;
int callback_token_received_fn (enum totem_callback_token_type type,
void *data)
{
@ -577,10 +580,13 @@ int callback_token_received_fn (enum totem_callback_token_type type,
struct iovec iovecs[3];
int res;
pthread_mutex_lock (&mcast_msg_mutex);
if (mcast_packed_msg_count == 0) {
pthread_mutex_unlock (&mcast_msg_mutex);
return (0);
}
if (totemmrp_avail() == 0) {
pthread_mutex_unlock (&mcast_msg_mutex);
return (0);
}
mcast.fragmented = 0;
@ -605,6 +611,7 @@ int callback_token_received_fn (enum totem_callback_token_type type,
mcast_packed_msg_count = 0;
fragment_size = 0;
pthread_mutex_unlock (&mcast_msg_mutex);
return (0);
}
@ -672,6 +679,7 @@ static int mcast_msg (
int copy_base = 0;
int total_size = 0;
pthread_mutex_lock (&mcast_msg_mutex);
totemmrp_new_msg_signal ();
max_packet_size = TOTEMPG_PACKET_SIZE -
@ -689,6 +697,7 @@ static int mcast_msg (
if (send_ok (total_size + sizeof(unsigned short) *
(mcast_packed_msg_count+1)) == 0) {
pthread_mutex_unlock (&mcast_msg_mutex);
return(-1);
}
@ -800,6 +809,7 @@ static int mcast_msg (
mcast_packed_msg_count++;
}
pthread_mutex_unlock (&mcast_msg_mutex);
return (res);
}

View File

@ -288,7 +288,8 @@ struct rrp_algo active_algo = {
static struct hdb_handle_database totemrrp_instance_database = {
.handle_count = 0,
.handles = 0,
.iterator = 0
.iterator = 0,
.mutex = PTHREAD_MUTEX_INITIALIZER
};
#define log_printf(level, format, args...) \

View File

@ -48,7 +48,6 @@
*/
#include <assert.h>
#include <pthread.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/stat.h>
@ -571,7 +570,8 @@ void main_iface_change_fn (
static struct hdb_handle_database totemsrp_instance_database = {
.handle_count = 0,
.handles = 0,
.iterator = 0
.iterator = 0,
.mutex = PTHREAD_MUTEX_INITIALIZER
};
struct message_handlers totemsrp_message_handlers = {
6,

View File

@ -38,6 +38,7 @@
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <pthread.h>
enum HDB_HANDLE_STATE {
HDB_HANDLE_STATE_EMPTY,
@ -55,12 +56,14 @@ struct hdb_handle_database {
unsigned int handle_count;
struct hdb_handle *handles;
unsigned int iterator;
pthread_mutex_t mutex;
};
static inline void hdb_create (
struct hdb_handle_database *handle_database)
{
memset (handle_database, 0, sizeof (struct hdb_handle_database));
pthread_mutex_init (&handle_database->mutex, NULL);
}
static inline void hdb_destroy (
@ -83,6 +86,8 @@ static inline int hdb_handle_create (
int found = 0;
void *instance;
pthread_mutex_lock (&handle_database->mutex);
for (handle = 0; handle < handle_database->handle_count; handle++) {
if (handle_database->handles[handle].state == HDB_HANDLE_STATE_EMPTY) {
found = 1;
@ -95,6 +100,7 @@ static inline int hdb_handle_create (
new_handles = (struct hdb_handle *)realloc (handle_database->handles,
sizeof (struct hdb_handle) * handle_database->handle_count);
if (new_handles == 0) {
pthread_mutex_unlock (&handle_database->mutex);
return (-1);
}
handle_database->handles = new_handles;
@ -114,6 +120,8 @@ static inline int hdb_handle_create (
*handle_id_out = handle;
pthread_mutex_unlock (&handle_database->mutex);
return (0);
}
@ -122,18 +130,24 @@ static inline int hdb_handle_get (
unsigned int handle,
void **instance)
{
pthread_mutex_lock (&handle_database->mutex);
*instance = NULL;
if (handle >= handle_database->handle_count) {
pthread_mutex_unlock (&handle_database->mutex);
return (-1);
}
if (handle_database->handles[handle].state != HDB_HANDLE_STATE_ACTIVE) {
pthread_mutex_unlock (&handle_database->mutex);
return (-1);
}
*instance = handle_database->handles[handle].instance;
handle_database->handles[handle].ref_count += 1;
pthread_mutex_unlock (&handle_database->mutex);
return (0);
}
@ -141,6 +155,7 @@ static inline void hdb_handle_put (
struct hdb_handle_database *handle_database,
unsigned int handle)
{
pthread_mutex_lock (&handle_database->mutex);
handle_database->handles[handle].ref_count -= 1;
assert (handle_database->handles[handle].ref_count >= 0);
@ -148,13 +163,17 @@ static inline void hdb_handle_put (
free (handle_database->handles[handle].instance);
memset (&handle_database->handles[handle], 0, sizeof (struct hdb_handle));
}
pthread_mutex_unlock (&handle_database->mutex);
}
static inline void hdb_handle_destroy (
struct hdb_handle_database *handle_database,
unsigned int handle)
{
pthread_mutex_lock (&handle_database->mutex);
handle_database->handles[handle].state = HDB_HANDLE_STATE_PENDINGREMOVAL;
pthread_mutex_unlock (&handle_database->mutex);
hdb_handle_put (handle_database, handle);
}

View File

@ -35,6 +35,7 @@
#define QUEUE_H_DEFINED
#include <string.h>
#include <pthread.h>
#include "assert.h"
struct queue {
@ -46,6 +47,7 @@ struct queue {
void *items;
int size_per_item;
int iterator;
pthread_mutex_t mutex;
};
static inline int queue_init (struct queue *queue, int queue_items, int size_per_item) {
@ -61,17 +63,20 @@ static inline int queue_init (struct queue *queue, int queue_items, int size_per
return (-ENOMEM);
}
memset (queue->items, 0, queue_items * size_per_item);
pthread_mutex_init (&queue->mutex, NULL);
return (0);
}
static inline int queue_reinit (struct queue *queue)
{
pthread_mutex_lock (&queue->mutex);
queue->head = 0;
queue->tail = queue->size - 1;
queue->used = 0;
queue->usedhw = 0;
memset (queue->items, 0, queue->size * queue->size_per_item);
pthread_mutex_unlock (&queue->mutex);
return (0);
}
@ -80,11 +85,21 @@ static inline void queue_free (struct queue *queue) {
}
static inline int queue_is_full (struct queue *queue) {
return (queue->size - 1 == queue->used);
int full;
pthread_mutex_lock (&queue->mutex);
full = queue->size - 1 == queue->used;
pthread_mutex_unlock (&queue->mutex);
return (full);
}
static inline int queue_is_empty (struct queue *queue) {
return (queue->used == 0);
int empty;
pthread_mutex_lock (&queue->mutex);
empty = queue->used == 0;
pthread_mutex_unlock (&queue->mutex);
return (empty);
}
static inline void queue_item_add (struct queue *queue, void *item)
@ -92,6 +107,7 @@ static inline void queue_item_add (struct queue *queue, void *item)
char *queue_item;
int queue_position;
pthread_mutex_lock (&queue->mutex);
queue_position = queue->head;
queue_item = queue->items;
queue_item += queue_position * queue->size_per_item;
@ -104,6 +120,7 @@ static inline void queue_item_add (struct queue *queue, void *item)
if (queue->used > queue->usedhw) {
queue->usedhw = queue->used;
}
pthread_mutex_unlock (&queue->mutex);
}
static inline void *queue_item_get (struct queue *queue)
@ -111,34 +128,42 @@ static inline void *queue_item_get (struct queue *queue)
char *queue_item;
int queue_position;
pthread_mutex_lock (&queue->mutex);
queue_position = (queue->tail + 1) % queue->size;
queue_item = queue->items;
queue_item += queue_position * queue->size_per_item;
pthread_mutex_unlock (&queue->mutex);
return ((void *)queue_item);
}
static inline void queue_item_remove (struct queue *queue) {
pthread_mutex_lock (&queue->mutex);
queue->tail = (queue->tail + 1) % queue->size;
assert (queue->tail != queue->head);
queue->used--;
assert (queue->used >= 0);
pthread_mutex_unlock (&queue->mutex);
}
static inline void queue_items_remove (struct queue *queue, int rel_count)
{
pthread_mutex_lock (&queue->mutex);
queue->tail = (queue->tail + rel_count) % queue->size;
assert (queue->tail != queue->head);
queue->used -= rel_count;
pthread_mutex_unlock (&queue->mutex);
}
static inline void queue_item_iterator_init (struct queue *queue)
{
pthread_mutex_lock (&queue->mutex);
queue->iterator = (queue->tail + 1) % queue->size;
pthread_mutex_unlock (&queue->mutex);
}
static inline void *queue_item_iterator_get (struct queue *queue)
@ -146,30 +171,46 @@ static inline void *queue_item_iterator_get (struct queue *queue)
char *queue_item;
int queue_position;
pthread_mutex_lock (&queue->mutex);
queue_position = (queue->iterator) % queue->size;
if (queue->iterator == queue->head) {
pthread_mutex_unlock (&queue->mutex);
return (0);
}
queue_item = queue->items;
queue_item += queue_position * queue->size_per_item;
pthread_mutex_unlock (&queue->mutex);
return ((void *)queue_item);
}
static inline int queue_item_iterator_next (struct queue *queue)
{
int next_res;
pthread_mutex_lock (&queue->mutex);
queue->iterator = (queue->iterator + 1) % queue->size;
return (queue->iterator == queue->head);
next_res = queue->iterator == queue->head;
pthread_mutex_unlock (&queue->mutex);
return (next_res);
}
static inline void queue_avail (struct queue *queue, int *avail)
{
pthread_mutex_lock (&queue->mutex);
*avail = queue->size - queue->used - 2;
assert (*avail >= 0);
pthread_mutex_unlock (&queue->mutex);
}
static inline int queue_used (struct queue *queue) {
return (queue->used);
int used;
pthread_mutex_lock (&queue->mutex);
used = queue->used;
pthread_mutex_unlock (&queue->mutex);
return (used);
}
#endif /* QUEUE_H_DEFINED */

View File

@ -742,7 +742,6 @@ saEvtDispatch(
* grabbed it.
*/
if (evt->led_head.error == SA_AIS_ERR_NOT_EXIST) {
DPRINT (("MESSAGE_RES_EVT_AVAILABLE: No event data\n"));
error = SA_AIS_OK;
break;
}

View File

@ -11,6 +11,7 @@
#include <sys/poll.h>
#include <stdlib.h>
#include <getopt.h>
#include <sched.h>
#include "saAis.h"
#include "saEvt.h"
@ -372,6 +373,10 @@ evt_free:
static int err_wait_time = -1;
static struct sched_param sched_param = {
sched_priority: 1
};
int main (int argc, char **argv)
{
static const char opts[] = "c:s:n:qu:f:";
@ -379,6 +384,8 @@ int main (int argc, char **argv)
int option;
char *p;
sched_setscheduler (0, SCHED_RR, &sched_param);
while (1) {
option = getopt(argc, argv, opts);
if (option == -1)