mirror of
https://git.proxmox.com/git/mirror_corosync
synced 2026-01-23 22:02:28 +00:00
Rename queue datastructure to cs_queue data structure because some fail
operating system struct queue in the globally scoped headers. git-svn-id: http://svn.fedorahosted.org/svn/corosync/trunk@2211 fd59a12c-fef9-0310-b244-a6a79926bd2f
This commit is contained in:
parent
e8225b667e
commit
ff8283f0b1
@ -61,7 +61,6 @@
|
||||
#include <corosync/coroipc_types.h>
|
||||
#include <corosync/corodefs.h>
|
||||
#include <corosync/list.h>
|
||||
#include <corosync/queue.h>
|
||||
#include <corosync/lcr/lcr_ifact.h>
|
||||
#include <corosync/totem/coropoll.h>
|
||||
#include <corosync/totem/totempg.h>
|
||||
|
||||
@ -61,7 +61,6 @@
|
||||
#include <corosync/corotypes.h>
|
||||
#include <corosync/coroipc_types.h>
|
||||
#include <corosync/list.h>
|
||||
#include <corosync/queue.h>
|
||||
#include <corosync/lcr/lcr_ifact.h>
|
||||
#include <corosync/totem/coropoll.h>
|
||||
#include <corosync/totem/totempg.h>
|
||||
|
||||
@ -57,7 +57,6 @@
|
||||
#include <sys/time.h>
|
||||
#include <sys/poll.h>
|
||||
|
||||
#include <corosync/queue.h>
|
||||
#include <corosync/sq.h>
|
||||
#include <corosync/list.h>
|
||||
#include <corosync/hdb.h>
|
||||
|
||||
@ -57,7 +57,6 @@
|
||||
#include <sys/time.h>
|
||||
#include <sys/poll.h>
|
||||
|
||||
#include <corosync/queue.h>
|
||||
#include <corosync/sq.h>
|
||||
#include <corosync/list.h>
|
||||
#include <corosync/hdb.h>
|
||||
|
||||
@ -71,7 +71,7 @@
|
||||
#include <sys/poll.h>
|
||||
|
||||
#include <corosync/swab.h>
|
||||
#include <corosync/queue.h>
|
||||
#include <corosync/cs_queue.h>
|
||||
#include <corosync/sq.h>
|
||||
#include <corosync/list.h>
|
||||
#include <corosync/hdb.h>
|
||||
@ -374,9 +374,9 @@ struct totemsrp_instance {
|
||||
/*
|
||||
* Queues used to order, deliver, and recover messages
|
||||
*/
|
||||
struct queue new_message_queue;
|
||||
struct cs_queue new_message_queue;
|
||||
|
||||
struct queue retrans_message_queue;
|
||||
struct cs_queue retrans_message_queue;
|
||||
|
||||
struct sq regular_sort_queue;
|
||||
|
||||
@ -788,7 +788,7 @@ int totemsrp_initialize (
|
||||
"max_network_delay (%d ms)\n", totem_config->max_network_delay);
|
||||
|
||||
|
||||
queue_init (&instance->retrans_message_queue, RETRANS_MESSAGE_QUEUE_SIZE_MAX,
|
||||
cs_queue_init (&instance->retrans_message_queue, RETRANS_MESSAGE_QUEUE_SIZE_MAX,
|
||||
sizeof (struct message_item));
|
||||
|
||||
sq_init (&instance->regular_sort_queue,
|
||||
@ -845,7 +845,7 @@ int totemsrp_initialize (
|
||||
/*
|
||||
* Must have net_mtu adjusted by totemrrp_initialize first
|
||||
*/
|
||||
queue_init (&instance->new_message_queue,
|
||||
cs_queue_init (&instance->new_message_queue,
|
||||
MESSAGE_QUEUE_MAX,
|
||||
sizeof (struct message_item));
|
||||
|
||||
@ -1853,7 +1853,7 @@ static void memb_state_recovery_enter (
|
||||
instance->my_high_ring_delivered = 0;
|
||||
|
||||
sq_reinit (&instance->recovery_sort_queue, SEQNO_START_MSG);
|
||||
queue_reinit (&instance->retrans_message_queue);
|
||||
cs_queue_reinit (&instance->retrans_message_queue);
|
||||
|
||||
low_ring_aru = instance->old_ring_state_high_seq_received;
|
||||
|
||||
@ -1982,7 +1982,7 @@ static void memb_state_recovery_enter (
|
||||
memcpy (((char *)message_item.mcast) + sizeof (struct mcast),
|
||||
sort_queue_item->mcast,
|
||||
sort_queue_item->msg_len);
|
||||
queue_item_add (&instance->retrans_message_queue, &message_item);
|
||||
cs_queue_item_add (&instance->retrans_message_queue, &message_item);
|
||||
}
|
||||
log_printf (instance->totemsrp_log_level_notice,
|
||||
"Originated %d messages in RECOVERY.\n", messages_originated);
|
||||
@ -2049,7 +2049,7 @@ int totemsrp_mcast (
|
||||
goto error_exit;
|
||||
}
|
||||
|
||||
if (queue_is_full (&instance->new_message_queue)) {
|
||||
if (cs_queue_is_full (&instance->new_message_queue)) {
|
||||
log_printf (instance->totemsrp_log_level_warning, "queue full\n");
|
||||
return (-1);
|
||||
}
|
||||
@ -2086,7 +2086,7 @@ int totemsrp_mcast (
|
||||
message_item.msg_len = addr_idx;
|
||||
|
||||
log_printf (instance->totemsrp_log_level_debug, "mcasted message added to pending queue\n");
|
||||
queue_item_add (&instance->new_message_queue, &message_item);
|
||||
cs_queue_item_add (&instance->new_message_queue, &message_item);
|
||||
|
||||
hdb_handle_put (&totemsrp_instance_database, handle);
|
||||
return (0);
|
||||
@ -2113,7 +2113,7 @@ int totemsrp_avail (hdb_handle_t handle)
|
||||
goto error_exit;
|
||||
}
|
||||
|
||||
queue_avail (&instance->new_message_queue, &avail);
|
||||
cs_queue_avail (&instance->new_message_queue, &avail);
|
||||
|
||||
hdb_handle_put (&totemsrp_instance_database, handle);
|
||||
|
||||
@ -2272,7 +2272,7 @@ static int orf_token_mcast (
|
||||
int fcc_mcasts_allowed)
|
||||
{
|
||||
struct message_item *message_item = 0;
|
||||
struct queue *mcast_queue;
|
||||
struct cs_queue *mcast_queue;
|
||||
struct sq *sort_queue;
|
||||
struct sort_queue_item sort_queue_item;
|
||||
struct sort_queue_item *sort_queue_item_ptr;
|
||||
@ -2289,10 +2289,10 @@ static int orf_token_mcast (
|
||||
}
|
||||
|
||||
for (fcc_mcast_current = 0; fcc_mcast_current < fcc_mcasts_allowed; fcc_mcast_current++) {
|
||||
if (queue_is_empty (mcast_queue)) {
|
||||
if (cs_queue_is_empty (mcast_queue)) {
|
||||
break;
|
||||
}
|
||||
message_item = (struct message_item *)queue_item_get (mcast_queue);
|
||||
message_item = (struct message_item *)cs_queue_item_get (mcast_queue);
|
||||
/* preincrement required by algo */
|
||||
if (instance->old_ring_state_saved &&
|
||||
(instance->memb_state == MEMB_STATE_GATHER ||
|
||||
@ -2332,7 +2332,7 @@ static int orf_token_mcast (
|
||||
/*
|
||||
* Delete item from pending queue
|
||||
*/
|
||||
queue_item_remove (mcast_queue);
|
||||
cs_queue_item_remove (mcast_queue);
|
||||
|
||||
/*
|
||||
* If messages mcasted, deliver any new messages to totempg
|
||||
@ -2607,7 +2607,7 @@ static int orf_token_send_initial (struct totemsrp_instance *instance)
|
||||
orf_token.retrans_flg = 1;
|
||||
instance->my_set_retrans_flg = 1;
|
||||
|
||||
if (queue_is_empty (&instance->retrans_message_queue) == 1) {
|
||||
if (cs_queue_is_empty (&instance->retrans_message_queue) == 1) {
|
||||
orf_token.retrans_flg = 0;
|
||||
instance->my_set_retrans_flg = 0;
|
||||
} else {
|
||||
@ -3148,10 +3148,10 @@ static unsigned int backlog_get (struct totemsrp_instance *instance)
|
||||
unsigned int backlog = 0;
|
||||
|
||||
if (instance->memb_state == MEMB_STATE_OPERATIONAL) {
|
||||
backlog = queue_used (&instance->new_message_queue);
|
||||
backlog = cs_queue_used (&instance->new_message_queue);
|
||||
} else
|
||||
if (instance->memb_state == MEMB_STATE_RECOVERY) {
|
||||
backlog = queue_used (&instance->retrans_message_queue);
|
||||
backlog = cs_queue_used (&instance->retrans_message_queue);
|
||||
}
|
||||
return (backlog);
|
||||
}
|
||||
@ -3432,7 +3432,7 @@ static int message_handler_orf_token (
|
||||
* has recovered all messages it can recover
|
||||
* (ie: its retrans queue is empty)
|
||||
*/
|
||||
if (queue_is_empty (&instance->retrans_message_queue) == 0) {
|
||||
if (cs_queue_is_empty (&instance->retrans_message_queue) == 0) {
|
||||
|
||||
if (token->retrans_flg == 0) {
|
||||
token->retrans_flg = 1;
|
||||
@ -3445,7 +3445,7 @@ static int message_handler_orf_token (
|
||||
log_printf (instance->totemsrp_log_level_debug,
|
||||
"token retrans flag is %d my set retrans flag%d retrans queue empty %d count %d, aru %x\n",
|
||||
token->retrans_flg, instance->my_set_retrans_flg,
|
||||
queue_is_empty (&instance->retrans_message_queue),
|
||||
cs_queue_is_empty (&instance->retrans_message_queue),
|
||||
instance->my_retrans_flg_count, token->aru);
|
||||
if (token->retrans_flg == 0) {
|
||||
instance->my_retrans_flg_count += 1;
|
||||
|
||||
@ -43,7 +43,7 @@
|
||||
#include <stdlib.h>
|
||||
#include <pthread.h>
|
||||
#include <errno.h>
|
||||
#include <corosync/queue.h>
|
||||
#include <corosync/cs_queue.h>
|
||||
|
||||
#include "wthread.h"
|
||||
|
||||
@ -60,7 +60,7 @@ struct worker_thread_t {
|
||||
pthread_mutex_t done_work_mutex;
|
||||
pthread_cond_t done_work_cond;
|
||||
pthread_t thread_id;
|
||||
struct queue queue;
|
||||
struct cs_queue queue;
|
||||
void *thread_state;
|
||||
struct thread_data thread_data;
|
||||
};
|
||||
@ -73,7 +73,7 @@ static void *start_worker_thread (void *thread_data_in) {
|
||||
|
||||
for (;;) {
|
||||
pthread_mutex_lock (&worker_thread->new_work_mutex);
|
||||
if (queue_is_empty (&worker_thread->queue) == 1) {
|
||||
if (cs_queue_is_empty (&worker_thread->queue) == 1) {
|
||||
pthread_cond_wait (&worker_thread->new_work_cond,
|
||||
&worker_thread->new_work_mutex);
|
||||
}
|
||||
@ -83,14 +83,14 @@ static void *start_worker_thread (void *thread_data_in) {
|
||||
* worker function to execute and also allow new work to be
|
||||
* added to the work queue
|
||||
*/
|
||||
data_for_worker_fn = queue_item_get (&worker_thread->queue);
|
||||
data_for_worker_fn = cs_queue_item_get (&worker_thread->queue);
|
||||
pthread_mutex_unlock (&worker_thread->new_work_mutex);
|
||||
worker_thread->worker_thread_group->worker_fn (worker_thread->thread_state, data_for_worker_fn);
|
||||
pthread_mutex_lock (&worker_thread->new_work_mutex);
|
||||
queue_item_remove (&worker_thread->queue);
|
||||
cs_queue_item_remove (&worker_thread->queue);
|
||||
pthread_mutex_unlock (&worker_thread->new_work_mutex);
|
||||
pthread_mutex_lock (&worker_thread->done_work_mutex);
|
||||
if (queue_is_empty (&worker_thread->queue) == 1) {
|
||||
if (cs_queue_is_empty (&worker_thread->queue) == 1) {
|
||||
pthread_cond_signal (&worker_thread->done_work_cond);
|
||||
}
|
||||
pthread_mutex_unlock (&worker_thread->done_work_mutex);
|
||||
@ -132,7 +132,7 @@ int worker_thread_group_init (
|
||||
pthread_cond_init (&worker_thread_group->threads[i].new_work_cond, NULL);
|
||||
pthread_mutex_init (&worker_thread_group->threads[i].done_work_mutex, NULL);
|
||||
pthread_cond_init (&worker_thread_group->threads[i].done_work_cond, NULL);
|
||||
queue_init (&worker_thread_group->threads[i].queue, items_max,
|
||||
cs_queue_init (&worker_thread_group->threads[i].queue, items_max,
|
||||
item_size);
|
||||
|
||||
worker_thread_group->threads[i].thread_data.thread_state =
|
||||
@ -154,11 +154,11 @@ int worker_thread_group_work_add (
|
||||
worker_thread_group->last_scheduled = schedule;
|
||||
|
||||
pthread_mutex_lock (&worker_thread_group->threads[schedule].new_work_mutex);
|
||||
if (queue_is_full (&worker_thread_group->threads[schedule].queue)) {
|
||||
if (cs_queue_is_full (&worker_thread_group->threads[schedule].queue)) {
|
||||
pthread_mutex_unlock (&worker_thread_group->threads[schedule].new_work_mutex);
|
||||
return (-1);
|
||||
}
|
||||
queue_item_add (&worker_thread_group->threads[schedule].queue, item);
|
||||
cs_queue_item_add (&worker_thread_group->threads[schedule].queue, item);
|
||||
pthread_cond_signal (&worker_thread_group->threads[schedule].new_work_cond);
|
||||
pthread_mutex_unlock (&worker_thread_group->threads[schedule].new_work_mutex);
|
||||
return (0);
|
||||
@ -171,7 +171,7 @@ void worker_thread_group_wait (
|
||||
|
||||
for (i = 0; i < worker_thread_group->threadcount; i++) {
|
||||
pthread_mutex_lock (&worker_thread_group->threads[i].done_work_mutex);
|
||||
if (queue_is_empty (&worker_thread_group->threads[i].queue) == 0) {
|
||||
if (cs_queue_is_empty (&worker_thread_group->threads[i].queue) == 0) {
|
||||
pthread_cond_wait (&worker_thread_group->threads[i].done_work_cond,
|
||||
&worker_thread_group->threads[i].done_work_mutex);
|
||||
}
|
||||
@ -206,10 +206,10 @@ void worker_thread_group_atsegv (
|
||||
|
||||
for (i = 0; i < worker_thread_group->threadcount; i++) {
|
||||
worker_thread = &worker_thread_group->threads[i];
|
||||
while (queue_is_empty (&worker_thread->queue) == 0) {
|
||||
data_for_worker_fn = queue_item_get (&worker_thread->queue);
|
||||
while (cs_queue_is_empty (&worker_thread->queue) == 0) {
|
||||
data_for_worker_fn = cs_queue_item_get (&worker_thread->queue);
|
||||
worker_thread->worker_thread_group->worker_fn (worker_thread->thread_state, data_for_worker_fn);
|
||||
queue_item_remove (&worker_thread->queue);
|
||||
cs_queue_item_remove (&worker_thread->queue);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -35,7 +35,7 @@ CS_H = hdb.h cs_config.h cpg.h cfg.h evs.h mar_gen.h swab.h \
|
||||
coroipcc.h coroipcs.h coroipc_types.h corodefs.h confdb.h list.h corotypes.h quorum.h votequorum.h
|
||||
|
||||
CS_INTERNAL_H = ipc_cfg.h ipc_confdb.h ipc_cpg.h ipc_evs.h ipc_pload.h ipc_quorum.h \
|
||||
jhash.h pload.h queue.h quorum.h sq.h ipc_votequorum.h coroipc_ipc.h
|
||||
jhash.h pload.h cs_queue.h quorum.h sq.h ipc_votequorum.h coroipc_ipc.h
|
||||
|
||||
LCR_H = lcr_ckpt.h lcr_comp.h lcr_ifact.h
|
||||
|
||||
|
||||
227
include/corosync/cs_queue.h
Normal file
227
include/corosync/cs_queue.h
Normal file
@ -0,0 +1,227 @@
|
||||
/*
|
||||
* Copyright (c) 2002-2004 MontaVista Software, Inc.
|
||||
*
|
||||
* All rights reserved.
|
||||
*
|
||||
* Author: Steven Dake (sdake@redhat.com)
|
||||
*
|
||||
* This software licensed under BSD license, the text of which follows:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* - Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
* - Neither the name of the MontaVista Software, Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from this
|
||||
* software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||
* THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#ifndef CS_QUEUE_H_DEFINED
|
||||
#define CS_QUEUE_H_DEFINED
|
||||
|
||||
#include <string.h>
|
||||
#include <pthread.h>
|
||||
#include "assert.h"
|
||||
|
||||
struct cs_queue {
|
||||
int head;
|
||||
int tail;
|
||||
int used;
|
||||
int usedhw;
|
||||
int size;
|
||||
void *items;
|
||||
int size_per_item;
|
||||
int iterator;
|
||||
pthread_mutex_t mutex;
|
||||
};
|
||||
|
||||
static inline int cs_queue_init (struct cs_queue *cs_queue, int cs_queue_items, int size_per_item) {
|
||||
cs_queue->head = 0;
|
||||
cs_queue->tail = cs_queue_items - 1;
|
||||
cs_queue->used = 0;
|
||||
cs_queue->usedhw = 0;
|
||||
cs_queue->size = cs_queue_items;
|
||||
cs_queue->size_per_item = size_per_item;
|
||||
|
||||
cs_queue->items = malloc (cs_queue_items * size_per_item);
|
||||
if (cs_queue->items == 0) {
|
||||
return (-ENOMEM);
|
||||
}
|
||||
memset (cs_queue->items, 0, cs_queue_items * size_per_item);
|
||||
pthread_mutex_init (&cs_queue->mutex, NULL);
|
||||
return (0);
|
||||
}
|
||||
|
||||
static inline int cs_queue_reinit (struct cs_queue *cs_queue)
|
||||
{
|
||||
pthread_mutex_lock (&cs_queue->mutex);
|
||||
cs_queue->head = 0;
|
||||
cs_queue->tail = cs_queue->size - 1;
|
||||
cs_queue->used = 0;
|
||||
cs_queue->usedhw = 0;
|
||||
|
||||
memset (cs_queue->items, 0, cs_queue->size * cs_queue->size_per_item);
|
||||
pthread_mutex_unlock (&cs_queue->mutex);
|
||||
return (0);
|
||||
}
|
||||
|
||||
static inline void cs_queue_free (struct cs_queue *cs_queue) {
|
||||
pthread_mutex_destroy (&cs_queue->mutex);
|
||||
free (cs_queue->items);
|
||||
}
|
||||
|
||||
static inline int cs_queue_is_full (struct cs_queue *cs_queue) {
|
||||
int full;
|
||||
|
||||
pthread_mutex_lock (&cs_queue->mutex);
|
||||
full = ((cs_queue->size - 1) == cs_queue->used);
|
||||
pthread_mutex_unlock (&cs_queue->mutex);
|
||||
return (full);
|
||||
}
|
||||
|
||||
static inline int cs_queue_is_empty (struct cs_queue *cs_queue) {
|
||||
int empty;
|
||||
|
||||
pthread_mutex_lock (&cs_queue->mutex);
|
||||
empty = (cs_queue->used == 0);
|
||||
pthread_mutex_unlock (&cs_queue->mutex);
|
||||
return (empty);
|
||||
}
|
||||
|
||||
static inline void cs_queue_item_add (struct cs_queue *cs_queue, void *item)
|
||||
{
|
||||
char *cs_queue_item;
|
||||
int cs_queue_position;
|
||||
|
||||
pthread_mutex_lock (&cs_queue->mutex);
|
||||
cs_queue_position = cs_queue->head;
|
||||
cs_queue_item = cs_queue->items;
|
||||
cs_queue_item += cs_queue_position * cs_queue->size_per_item;
|
||||
memcpy (cs_queue_item, item, cs_queue->size_per_item);
|
||||
|
||||
assert (cs_queue->tail != cs_queue->head);
|
||||
|
||||
cs_queue->head = (cs_queue->head + 1) % cs_queue->size;
|
||||
cs_queue->used++;
|
||||
if (cs_queue->used > cs_queue->usedhw) {
|
||||
cs_queue->usedhw = cs_queue->used;
|
||||
}
|
||||
pthread_mutex_unlock (&cs_queue->mutex);
|
||||
}
|
||||
|
||||
static inline void *cs_queue_item_get (struct cs_queue *cs_queue)
|
||||
{
|
||||
char *cs_queue_item;
|
||||
int cs_queue_position;
|
||||
|
||||
pthread_mutex_lock (&cs_queue->mutex);
|
||||
cs_queue_position = (cs_queue->tail + 1) % cs_queue->size;
|
||||
cs_queue_item = cs_queue->items;
|
||||
cs_queue_item += cs_queue_position * cs_queue->size_per_item;
|
||||
pthread_mutex_unlock (&cs_queue->mutex);
|
||||
return ((void *)cs_queue_item);
|
||||
}
|
||||
|
||||
static inline void cs_queue_item_remove (struct cs_queue *cs_queue) {
|
||||
pthread_mutex_lock (&cs_queue->mutex);
|
||||
cs_queue->tail = (cs_queue->tail + 1) % cs_queue->size;
|
||||
|
||||
assert (cs_queue->tail != cs_queue->head);
|
||||
|
||||
cs_queue->used--;
|
||||
assert (cs_queue->used >= 0);
|
||||
pthread_mutex_unlock (&cs_queue->mutex);
|
||||
}
|
||||
|
||||
static inline void cs_queue_items_remove (struct cs_queue *cs_queue, int rel_count)
|
||||
{
|
||||
pthread_mutex_lock (&cs_queue->mutex);
|
||||
cs_queue->tail = (cs_queue->tail + rel_count) % cs_queue->size;
|
||||
|
||||
assert (cs_queue->tail != cs_queue->head);
|
||||
|
||||
cs_queue->used -= rel_count;
|
||||
pthread_mutex_unlock (&cs_queue->mutex);
|
||||
}
|
||||
|
||||
|
||||
static inline void cs_queue_item_iterator_init (struct cs_queue *cs_queue)
|
||||
{
|
||||
pthread_mutex_lock (&cs_queue->mutex);
|
||||
cs_queue->iterator = (cs_queue->tail + 1) % cs_queue->size;
|
||||
pthread_mutex_unlock (&cs_queue->mutex);
|
||||
}
|
||||
|
||||
static inline void *cs_queue_item_iterator_get (struct cs_queue *cs_queue)
|
||||
{
|
||||
char *cs_queue_item;
|
||||
int cs_queue_position;
|
||||
|
||||
pthread_mutex_lock (&cs_queue->mutex);
|
||||
cs_queue_position = (cs_queue->iterator) % cs_queue->size;
|
||||
if (cs_queue->iterator == cs_queue->head) {
|
||||
pthread_mutex_unlock (&cs_queue->mutex);
|
||||
return (0);
|
||||
}
|
||||
cs_queue_item = cs_queue->items;
|
||||
cs_queue_item += cs_queue_position * cs_queue->size_per_item;
|
||||
pthread_mutex_unlock (&cs_queue->mutex);
|
||||
return ((void *)cs_queue_item);
|
||||
}
|
||||
|
||||
static inline int cs_queue_item_iterator_next (struct cs_queue *cs_queue)
|
||||
{
|
||||
int next_res;
|
||||
|
||||
pthread_mutex_lock (&cs_queue->mutex);
|
||||
cs_queue->iterator = (cs_queue->iterator + 1) % cs_queue->size;
|
||||
|
||||
next_res = cs_queue->iterator == cs_queue->head;
|
||||
pthread_mutex_unlock (&cs_queue->mutex);
|
||||
return (next_res);
|
||||
}
|
||||
|
||||
static inline void cs_queue_avail (struct cs_queue *cs_queue, int *avail)
|
||||
{
|
||||
pthread_mutex_lock (&cs_queue->mutex);
|
||||
*avail = cs_queue->size - cs_queue->used - 2;
|
||||
assert (*avail >= 0);
|
||||
pthread_mutex_unlock (&cs_queue->mutex);
|
||||
}
|
||||
|
||||
static inline int cs_queue_used (struct cs_queue *cs_queue) {
|
||||
int used;
|
||||
|
||||
pthread_mutex_lock (&cs_queue->mutex);
|
||||
used = cs_queue->used;
|
||||
pthread_mutex_unlock (&cs_queue->mutex);
|
||||
|
||||
return (used);
|
||||
}
|
||||
|
||||
static inline int cs_queue_usedhw (struct cs_queue *cs_queue) {
|
||||
int usedhw;
|
||||
|
||||
pthread_mutex_lock (&cs_queue->mutex);
|
||||
usedhw = cs_queue->usedhw;
|
||||
pthread_mutex_unlock (&cs_queue->mutex);
|
||||
|
||||
return (usedhw);
|
||||
}
|
||||
|
||||
#endif /* CS_QUEUE_H_DEFINED */
|
||||
@ -1,232 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2002-2004 MontaVista Software, Inc.
|
||||
*
|
||||
* All rights reserved.
|
||||
*
|
||||
* Author: Steven Dake (sdake@redhat.com)
|
||||
*
|
||||
* This software licensed under BSD license, the text of which follows:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* - Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
* - Neither the name of the MontaVista Software, Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from this
|
||||
* software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||
* THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#ifndef QUEUE_H_DEFINED
|
||||
#define QUEUE_H_DEFINED
|
||||
|
||||
#include <string.h>
|
||||
#include <pthread.h>
|
||||
#include "assert.h"
|
||||
|
||||
#ifdef queue
|
||||
/* struct queue is already defined in sys/stream.h on Solaris */
|
||||
#undef queue
|
||||
#define queue _queue
|
||||
#endif
|
||||
struct queue {
|
||||
int head;
|
||||
int tail;
|
||||
int used;
|
||||
int usedhw;
|
||||
int size;
|
||||
void *items;
|
||||
int size_per_item;
|
||||
int iterator;
|
||||
pthread_mutex_t mutex;
|
||||
};
|
||||
|
||||
static inline int queue_init (struct queue *queue, int queue_items, int size_per_item) {
|
||||
queue->head = 0;
|
||||
queue->tail = queue_items - 1;
|
||||
queue->used = 0;
|
||||
queue->usedhw = 0;
|
||||
queue->size = queue_items;
|
||||
queue->size_per_item = size_per_item;
|
||||
|
||||
queue->items = malloc (queue_items * size_per_item);
|
||||
if (queue->items == 0) {
|
||||
return (-ENOMEM);
|
||||
}
|
||||
memset (queue->items, 0, queue_items * size_per_item);
|
||||
pthread_mutex_init (&queue->mutex, NULL);
|
||||
return (0);
|
||||
}
|
||||
|
||||
static inline int queue_reinit (struct queue *queue)
|
||||
{
|
||||
pthread_mutex_lock (&queue->mutex);
|
||||
queue->head = 0;
|
||||
queue->tail = queue->size - 1;
|
||||
queue->used = 0;
|
||||
queue->usedhw = 0;
|
||||
|
||||
memset (queue->items, 0, queue->size * queue->size_per_item);
|
||||
pthread_mutex_unlock (&queue->mutex);
|
||||
return (0);
|
||||
}
|
||||
|
||||
static inline void queue_free (struct queue *queue) {
|
||||
pthread_mutex_destroy (&queue->mutex);
|
||||
free (queue->items);
|
||||
}
|
||||
|
||||
static inline int queue_is_full (struct queue *queue) {
|
||||
int full;
|
||||
|
||||
pthread_mutex_lock (&queue->mutex);
|
||||
full = ((queue->size - 1) == queue->used);
|
||||
pthread_mutex_unlock (&queue->mutex);
|
||||
return (full);
|
||||
}
|
||||
|
||||
static inline int queue_is_empty (struct queue *queue) {
|
||||
int empty;
|
||||
|
||||
pthread_mutex_lock (&queue->mutex);
|
||||
empty = (queue->used == 0);
|
||||
pthread_mutex_unlock (&queue->mutex);
|
||||
return (empty);
|
||||
}
|
||||
|
||||
static inline void queue_item_add (struct queue *queue, void *item)
|
||||
{
|
||||
char *queue_item;
|
||||
int queue_position;
|
||||
|
||||
pthread_mutex_lock (&queue->mutex);
|
||||
queue_position = queue->head;
|
||||
queue_item = queue->items;
|
||||
queue_item += queue_position * queue->size_per_item;
|
||||
memcpy (queue_item, item, queue->size_per_item);
|
||||
|
||||
assert (queue->tail != queue->head);
|
||||
|
||||
queue->head = (queue->head + 1) % queue->size;
|
||||
queue->used++;
|
||||
if (queue->used > queue->usedhw) {
|
||||
queue->usedhw = queue->used;
|
||||
}
|
||||
pthread_mutex_unlock (&queue->mutex);
|
||||
}
|
||||
|
||||
static inline void *queue_item_get (struct queue *queue)
|
||||
{
|
||||
char *queue_item;
|
||||
int queue_position;
|
||||
|
||||
pthread_mutex_lock (&queue->mutex);
|
||||
queue_position = (queue->tail + 1) % queue->size;
|
||||
queue_item = queue->items;
|
||||
queue_item += queue_position * queue->size_per_item;
|
||||
pthread_mutex_unlock (&queue->mutex);
|
||||
return ((void *)queue_item);
|
||||
}
|
||||
|
||||
static inline void queue_item_remove (struct queue *queue) {
|
||||
pthread_mutex_lock (&queue->mutex);
|
||||
queue->tail = (queue->tail + 1) % queue->size;
|
||||
|
||||
assert (queue->tail != queue->head);
|
||||
|
||||
queue->used--;
|
||||
assert (queue->used >= 0);
|
||||
pthread_mutex_unlock (&queue->mutex);
|
||||
}
|
||||
|
||||
static inline void queue_items_remove (struct queue *queue, int rel_count)
|
||||
{
|
||||
pthread_mutex_lock (&queue->mutex);
|
||||
queue->tail = (queue->tail + rel_count) % queue->size;
|
||||
|
||||
assert (queue->tail != queue->head);
|
||||
|
||||
queue->used -= rel_count;
|
||||
pthread_mutex_unlock (&queue->mutex);
|
||||
}
|
||||
|
||||
|
||||
static inline void queue_item_iterator_init (struct queue *queue)
|
||||
{
|
||||
pthread_mutex_lock (&queue->mutex);
|
||||
queue->iterator = (queue->tail + 1) % queue->size;
|
||||
pthread_mutex_unlock (&queue->mutex);
|
||||
}
|
||||
|
||||
static inline void *queue_item_iterator_get (struct queue *queue)
|
||||
{
|
||||
char *queue_item;
|
||||
int queue_position;
|
||||
|
||||
pthread_mutex_lock (&queue->mutex);
|
||||
queue_position = (queue->iterator) % queue->size;
|
||||
if (queue->iterator == queue->head) {
|
||||
pthread_mutex_unlock (&queue->mutex);
|
||||
return (0);
|
||||
}
|
||||
queue_item = queue->items;
|
||||
queue_item += queue_position * queue->size_per_item;
|
||||
pthread_mutex_unlock (&queue->mutex);
|
||||
return ((void *)queue_item);
|
||||
}
|
||||
|
||||
static inline int queue_item_iterator_next (struct queue *queue)
|
||||
{
|
||||
int next_res;
|
||||
|
||||
pthread_mutex_lock (&queue->mutex);
|
||||
queue->iterator = (queue->iterator + 1) % queue->size;
|
||||
|
||||
next_res = queue->iterator == queue->head;
|
||||
pthread_mutex_unlock (&queue->mutex);
|
||||
return (next_res);
|
||||
}
|
||||
|
||||
static inline void queue_avail (struct queue *queue, int *avail)
|
||||
{
|
||||
pthread_mutex_lock (&queue->mutex);
|
||||
*avail = queue->size - queue->used - 2;
|
||||
assert (*avail >= 0);
|
||||
pthread_mutex_unlock (&queue->mutex);
|
||||
}
|
||||
|
||||
static inline int queue_used (struct queue *queue) {
|
||||
int used;
|
||||
|
||||
pthread_mutex_lock (&queue->mutex);
|
||||
used = queue->used;
|
||||
pthread_mutex_unlock (&queue->mutex);
|
||||
|
||||
return (used);
|
||||
}
|
||||
|
||||
static inline int queue_usedhw (struct queue *queue) {
|
||||
int usedhw;
|
||||
|
||||
pthread_mutex_lock (&queue->mutex);
|
||||
usedhw = queue->usedhw;
|
||||
pthread_mutex_unlock (&queue->mutex);
|
||||
|
||||
return (usedhw);
|
||||
}
|
||||
|
||||
#endif /* QUEUE_H_DEFINED */
|
||||
@ -53,7 +53,6 @@
|
||||
#include <corosync/coroipc_types.h>
|
||||
#include <corosync/cfg.h>
|
||||
#include <corosync/list.h>
|
||||
#include <corosync/queue.h>
|
||||
#include <corosync/mar_gen.h>
|
||||
#include <corosync/totem/totemip.h>
|
||||
#include <corosync/totem/totem.h>
|
||||
|
||||
@ -46,7 +46,6 @@
|
||||
#include <corosync/corodefs.h>
|
||||
#include <corosync/cfg.h>
|
||||
#include <corosync/list.h>
|
||||
#include <corosync/queue.h>
|
||||
#include <corosync/mar_gen.h>
|
||||
#include <corosync/ipc_confdb.h>
|
||||
#include <corosync/lcr/lcr_comp.h>
|
||||
|
||||
@ -58,7 +58,6 @@
|
||||
#include <corosync/coroipc_types.h>
|
||||
#include <corosync/corodefs.h>
|
||||
#include <corosync/list.h>
|
||||
#include <corosync/queue.h>
|
||||
#include <corosync/jhash.h>
|
||||
#include <corosync/lcr/lcr_comp.h>
|
||||
#include <corosync/engine/logsys.h>
|
||||
|
||||
Loading…
Reference in New Issue
Block a user