mirror of
https://git.proxmox.com/git/mirror_frr
synced 2025-04-29 06:10:37 +00:00
Merge pull request #11654 from donaldsharp/fixup_macros
*: frr_with_mutex change to follow our standard
This commit is contained in:
commit
cb6c39645f
@ -194,7 +194,7 @@ static struct peer *peer_xfer_conn(struct peer *from_peer)
|
||||
* on various buffers. Those need to be transferred or dropped,
|
||||
* otherwise we'll get spurious failures during session establishment.
|
||||
*/
|
||||
frr_with_mutex(&peer->io_mtx, &from_peer->io_mtx) {
|
||||
frr_with_mutex (&peer->io_mtx, &from_peer->io_mtx) {
|
||||
fd = peer->fd;
|
||||
peer->fd = from_peer->fd;
|
||||
from_peer->fd = fd;
|
||||
@ -1501,7 +1501,7 @@ int bgp_stop(struct peer *peer)
|
||||
BGP_TIMER_OFF(peer->t_delayopen);
|
||||
|
||||
/* Clear input and output buffer. */
|
||||
frr_with_mutex(&peer->io_mtx) {
|
||||
frr_with_mutex (&peer->io_mtx) {
|
||||
if (peer->ibuf)
|
||||
stream_fifo_clean(peer->ibuf);
|
||||
if (peer->obuf)
|
||||
|
@ -134,7 +134,7 @@ static void bgp_process_writes(struct thread *thread)
|
||||
|
||||
struct frr_pthread *fpt = bgp_pth_io;
|
||||
|
||||
frr_with_mutex(&peer->io_mtx) {
|
||||
frr_with_mutex (&peer->io_mtx) {
|
||||
status = bgp_write(peer);
|
||||
reschedule = (stream_fifo_head(peer->obuf) != NULL);
|
||||
}
|
||||
@ -188,7 +188,7 @@ static void bgp_process_reads(struct thread *thread)
|
||||
|
||||
struct frr_pthread *fpt = bgp_pth_io;
|
||||
|
||||
frr_with_mutex(&peer->io_mtx) {
|
||||
frr_with_mutex (&peer->io_mtx) {
|
||||
status = bgp_read(peer, &code);
|
||||
}
|
||||
|
||||
@ -247,7 +247,7 @@ static void bgp_process_reads(struct thread *thread)
|
||||
stream_set_endp(pkt, pktsize);
|
||||
|
||||
frrtrace(2, frr_bgp, packet_read, peer, pkt);
|
||||
frr_with_mutex(&peer->io_mtx) {
|
||||
frr_with_mutex (&peer->io_mtx) {
|
||||
stream_fifo_push(peer->ibuf, pkt);
|
||||
}
|
||||
|
||||
|
@ -252,7 +252,7 @@ void bgp_keepalives_on(struct peer *peer)
|
||||
*/
|
||||
assert(peerhash_mtx);
|
||||
|
||||
frr_with_mutex(peerhash_mtx) {
|
||||
frr_with_mutex (peerhash_mtx) {
|
||||
holder.peer = peer;
|
||||
if (!hash_lookup(peerhash, &holder)) {
|
||||
struct pkat *pkat = pkat_new(peer);
|
||||
@ -280,7 +280,7 @@ void bgp_keepalives_off(struct peer *peer)
|
||||
*/
|
||||
assert(peerhash_mtx);
|
||||
|
||||
frr_with_mutex(peerhash_mtx) {
|
||||
frr_with_mutex (peerhash_mtx) {
|
||||
holder.peer = peer;
|
||||
struct pkat *res = hash_release(peerhash, &holder);
|
||||
if (res) {
|
||||
@ -293,7 +293,7 @@ void bgp_keepalives_off(struct peer *peer)
|
||||
|
||||
void bgp_keepalives_wake(void)
|
||||
{
|
||||
frr_with_mutex(peerhash_mtx) {
|
||||
frr_with_mutex (peerhash_mtx) {
|
||||
pthread_cond_signal(peerhash_cond);
|
||||
}
|
||||
}
|
||||
|
@ -125,7 +125,7 @@ static void bgp_packet_add(struct peer *peer, struct stream *s)
|
||||
intmax_t delta;
|
||||
uint32_t holdtime;
|
||||
|
||||
frr_with_mutex(&peer->io_mtx) {
|
||||
frr_with_mutex (&peer->io_mtx) {
|
||||
/* if the queue is empty, reset the "last OK" timestamp to
|
||||
* now, otherwise if we write another packet immediately
|
||||
* after it'll get confused
|
||||
@ -2777,7 +2777,7 @@ void bgp_process_packet(struct thread *thread)
|
||||
bgp_size_t size;
|
||||
char notify_data_length[2];
|
||||
|
||||
frr_with_mutex(&peer->io_mtx) {
|
||||
frr_with_mutex (&peer->io_mtx) {
|
||||
peer->curr = stream_fifo_pop(peer->ibuf);
|
||||
}
|
||||
|
||||
@ -2904,7 +2904,7 @@ void bgp_process_packet(struct thread *thread)
|
||||
|
||||
if (fsm_update_result != FSM_PEER_TRANSFERRED
|
||||
&& fsm_update_result != FSM_PEER_STOPPED) {
|
||||
frr_with_mutex(&peer->io_mtx) {
|
||||
frr_with_mutex (&peer->io_mtx) {
|
||||
// more work to do, come back later
|
||||
if (peer->ibuf->count > 0)
|
||||
thread_add_event(
|
||||
|
@ -1258,7 +1258,7 @@ static int rfapi_open_inner(struct rfapi_descriptor *rfd, struct bgp *bgp,
|
||||
* since this peer is not on the I/O thread, this lock is not strictly
|
||||
* necessary, but serves as a reminder to those who may meddle...
|
||||
*/
|
||||
frr_with_mutex(&rfd->peer->io_mtx) {
|
||||
frr_with_mutex (&rfd->peer->io_mtx) {
|
||||
// we don't need any I/O related facilities
|
||||
if (rfd->peer->ibuf)
|
||||
stream_fifo_free(rfd->peer->ibuf);
|
||||
|
@ -193,7 +193,7 @@ static void vnc_redistribute_add(struct prefix *p, uint32_t metric,
|
||||
* is not strictly necessary, but serves as a reminder
|
||||
* to those who may meddle...
|
||||
*/
|
||||
frr_with_mutex(&vncHD1VR.peer->io_mtx) {
|
||||
frr_with_mutex (&vncHD1VR.peer->io_mtx) {
|
||||
// we don't need any I/O related facilities
|
||||
if (vncHD1VR.peer->ibuf)
|
||||
stream_fifo_free(vncHD1VR.peer->ibuf);
|
||||
|
@ -7,7 +7,7 @@ FRR ships two small wrappers around ``pthread_mutex_lock()`` /
|
||||
``pthread_mutex_unlock``. Use ``#include "frr_pthread.h"`` to get these
|
||||
macros.
|
||||
|
||||
.. c:macro:: frr_with_mutex(mutex)
|
||||
.. c:macro:: frr_with_mutex (mutex)
|
||||
|
||||
(With ``pthread_mutex_t *mutex``.)
|
||||
|
||||
@ -17,7 +17,7 @@ macros.
|
||||
|
||||
int somefunction(int option)
|
||||
{
|
||||
frr_with_mutex(&my_mutex) {
|
||||
frr_with_mutex (&my_mutex) {
|
||||
/* mutex will be locked */
|
||||
|
||||
if (!option)
|
||||
|
10
lib/ferr.c
10
lib/ferr.c
@ -84,7 +84,7 @@ void log_ref_add(struct log_ref *ref)
|
||||
{
|
||||
uint32_t i = 0;
|
||||
|
||||
frr_with_mutex(&refs_mtx) {
|
||||
frr_with_mutex (&refs_mtx) {
|
||||
while (ref[i].code != END_FERR) {
|
||||
(void)hash_get(refs, &ref[i], hash_alloc_intern);
|
||||
i++;
|
||||
@ -98,7 +98,7 @@ struct log_ref *log_ref_get(uint32_t code)
|
||||
struct log_ref *ref;
|
||||
|
||||
holder.code = code;
|
||||
frr_with_mutex(&refs_mtx) {
|
||||
frr_with_mutex (&refs_mtx) {
|
||||
ref = hash_lookup(refs, &holder);
|
||||
}
|
||||
|
||||
@ -115,7 +115,7 @@ void log_ref_display(struct vty *vty, uint32_t code, bool json)
|
||||
if (json)
|
||||
top = json_object_new_object();
|
||||
|
||||
frr_with_mutex(&refs_mtx) {
|
||||
frr_with_mutex (&refs_mtx) {
|
||||
errlist = code ? list_new() : hash_to_list(refs);
|
||||
}
|
||||
|
||||
@ -182,7 +182,7 @@ DEFUN_NOSH(show_error_code,
|
||||
|
||||
void log_ref_init(void)
|
||||
{
|
||||
frr_with_mutex(&refs_mtx) {
|
||||
frr_with_mutex (&refs_mtx) {
|
||||
refs = hash_create(ferr_hash_key, ferr_hash_cmp,
|
||||
"Error Reference Texts");
|
||||
}
|
||||
@ -190,7 +190,7 @@ void log_ref_init(void)
|
||||
|
||||
void log_ref_fini(void)
|
||||
{
|
||||
frr_with_mutex(&refs_mtx) {
|
||||
frr_with_mutex (&refs_mtx) {
|
||||
hash_clean(refs, NULL);
|
||||
hash_free(refs);
|
||||
refs = NULL;
|
||||
|
@ -55,7 +55,7 @@ static struct list *frr_pthread_list;
|
||||
|
||||
void frr_pthread_init(void)
|
||||
{
|
||||
frr_with_mutex(&frr_pthread_list_mtx) {
|
||||
frr_with_mutex (&frr_pthread_list_mtx) {
|
||||
frr_pthread_list = list_new();
|
||||
}
|
||||
}
|
||||
@ -64,7 +64,7 @@ void frr_pthread_finish(void)
|
||||
{
|
||||
frr_pthread_stop_all();
|
||||
|
||||
frr_with_mutex(&frr_pthread_list_mtx) {
|
||||
frr_with_mutex (&frr_pthread_list_mtx) {
|
||||
struct listnode *n, *nn;
|
||||
struct frr_pthread *fpt;
|
||||
|
||||
@ -105,7 +105,7 @@ struct frr_pthread *frr_pthread_new(const struct frr_pthread_attr *attr,
|
||||
pthread_mutex_init(fpt->running_cond_mtx, NULL);
|
||||
pthread_cond_init(fpt->running_cond, NULL);
|
||||
|
||||
frr_with_mutex(&frr_pthread_list_mtx) {
|
||||
frr_with_mutex (&frr_pthread_list_mtx) {
|
||||
listnode_add(frr_pthread_list, fpt);
|
||||
}
|
||||
|
||||
@ -126,7 +126,7 @@ static void frr_pthread_destroy_nolock(struct frr_pthread *fpt)
|
||||
|
||||
void frr_pthread_destroy(struct frr_pthread *fpt)
|
||||
{
|
||||
frr_with_mutex(&frr_pthread_list_mtx) {
|
||||
frr_with_mutex (&frr_pthread_list_mtx) {
|
||||
listnode_delete(frr_pthread_list, fpt);
|
||||
}
|
||||
|
||||
@ -193,7 +193,7 @@ int frr_pthread_run(struct frr_pthread *fpt, const pthread_attr_t *attr)
|
||||
|
||||
void frr_pthread_wait_running(struct frr_pthread *fpt)
|
||||
{
|
||||
frr_with_mutex(fpt->running_cond_mtx) {
|
||||
frr_with_mutex (fpt->running_cond_mtx) {
|
||||
while (!fpt->running)
|
||||
pthread_cond_wait(fpt->running_cond,
|
||||
fpt->running_cond_mtx);
|
||||
@ -202,7 +202,7 @@ void frr_pthread_wait_running(struct frr_pthread *fpt)
|
||||
|
||||
void frr_pthread_notify_running(struct frr_pthread *fpt)
|
||||
{
|
||||
frr_with_mutex(fpt->running_cond_mtx) {
|
||||
frr_with_mutex (fpt->running_cond_mtx) {
|
||||
fpt->running = true;
|
||||
pthread_cond_signal(fpt->running_cond);
|
||||
}
|
||||
@ -219,7 +219,7 @@ int frr_pthread_stop(struct frr_pthread *fpt, void **result)
|
||||
|
||||
void frr_pthread_stop_all(void)
|
||||
{
|
||||
frr_with_mutex(&frr_pthread_list_mtx) {
|
||||
frr_with_mutex (&frr_pthread_list_mtx) {
|
||||
struct listnode *n;
|
||||
struct frr_pthread *fpt;
|
||||
for (ALL_LIST_ELEMENTS_RO(frr_pthread_list, n, fpt)) {
|
||||
|
@ -56,7 +56,7 @@ struct hash *hash_create_size(unsigned int size,
|
||||
hash->name = name ? XSTRDUP(MTYPE_HASH, name) : NULL;
|
||||
hash->stats.empty = hash->size;
|
||||
|
||||
frr_with_mutex(&_hashes_mtx) {
|
||||
frr_with_mutex (&_hashes_mtx) {
|
||||
if (!_hashes)
|
||||
_hashes = list_new();
|
||||
|
||||
@ -329,7 +329,7 @@ struct list *hash_to_list(struct hash *hash)
|
||||
|
||||
void hash_free(struct hash *hash)
|
||||
{
|
||||
frr_with_mutex(&_hashes_mtx) {
|
||||
frr_with_mutex (&_hashes_mtx) {
|
||||
if (_hashes) {
|
||||
listnode_delete(_hashes, hash);
|
||||
if (_hashes->count == 0) {
|
||||
|
@ -43,14 +43,14 @@ static int zlog_filter_lookup(const char *lookup)
|
||||
|
||||
void zlog_filter_clear(void)
|
||||
{
|
||||
frr_with_mutex(&logfilterlock) {
|
||||
frr_with_mutex (&logfilterlock) {
|
||||
zlog_filter_count = 0;
|
||||
}
|
||||
}
|
||||
|
||||
int zlog_filter_add(const char *filter)
|
||||
{
|
||||
frr_with_mutex(&logfilterlock) {
|
||||
frr_with_mutex (&logfilterlock) {
|
||||
if (zlog_filter_count >= ZLOG_FILTERS_MAX)
|
||||
return 1;
|
||||
|
||||
@ -74,7 +74,7 @@ int zlog_filter_add(const char *filter)
|
||||
|
||||
int zlog_filter_del(const char *filter)
|
||||
{
|
||||
frr_with_mutex(&logfilterlock) {
|
||||
frr_with_mutex (&logfilterlock) {
|
||||
int found_idx = zlog_filter_lookup(filter);
|
||||
int last_idx = zlog_filter_count - 1;
|
||||
|
||||
@ -96,7 +96,7 @@ int zlog_filter_dump(char *buf, size_t max_size)
|
||||
{
|
||||
int len = 0;
|
||||
|
||||
frr_with_mutex(&logfilterlock) {
|
||||
frr_with_mutex (&logfilterlock) {
|
||||
for (int i = 0; i < zlog_filter_count; i++) {
|
||||
int ret;
|
||||
|
||||
@ -115,7 +115,7 @@ static int search_buf(const char *buf, size_t len)
|
||||
{
|
||||
char *found = NULL;
|
||||
|
||||
frr_with_mutex(&logfilterlock) {
|
||||
frr_with_mutex (&logfilterlock) {
|
||||
for (int i = 0; i < zlog_filter_count; i++) {
|
||||
found = memmem(buf, len, zlog_filters[i],
|
||||
strlen(zlog_filters[i]));
|
||||
|
@ -488,7 +488,7 @@ struct zebra_privs_t *_zprivs_raise(struct zebra_privs_t *privs,
|
||||
* Serialize 'raise' operations; particularly important for
|
||||
* OSes where privs are process-wide.
|
||||
*/
|
||||
frr_with_mutex(&(privs->mutex)) {
|
||||
frr_with_mutex (&(privs->mutex)) {
|
||||
/* Locate ref-counting object to use */
|
||||
refs = get_privs_refs(privs);
|
||||
|
||||
@ -517,7 +517,7 @@ void _zprivs_lower(struct zebra_privs_t **privs)
|
||||
/* Serialize 'lower privs' operation - particularly important
|
||||
* when OS privs are process-wide.
|
||||
*/
|
||||
frr_with_mutex(&(*privs)->mutex) {
|
||||
frr_with_mutex (&(*privs)->mutex) {
|
||||
refs = get_privs_refs(*privs);
|
||||
|
||||
if (--(refs->refcount) == 0) {
|
||||
|
@ -1280,7 +1280,7 @@ void stream_fifo_push(struct stream_fifo *fifo, struct stream *s)
|
||||
|
||||
void stream_fifo_push_safe(struct stream_fifo *fifo, struct stream *s)
|
||||
{
|
||||
frr_with_mutex(&fifo->mtx) {
|
||||
frr_with_mutex (&fifo->mtx) {
|
||||
stream_fifo_push(fifo, s);
|
||||
}
|
||||
}
|
||||
@ -1312,7 +1312,7 @@ struct stream *stream_fifo_pop_safe(struct stream_fifo *fifo)
|
||||
{
|
||||
struct stream *ret;
|
||||
|
||||
frr_with_mutex(&fifo->mtx) {
|
||||
frr_with_mutex (&fifo->mtx) {
|
||||
ret = stream_fifo_pop(fifo);
|
||||
}
|
||||
|
||||
@ -1328,7 +1328,7 @@ struct stream *stream_fifo_head_safe(struct stream_fifo *fifo)
|
||||
{
|
||||
struct stream *ret;
|
||||
|
||||
frr_with_mutex(&fifo->mtx) {
|
||||
frr_with_mutex (&fifo->mtx) {
|
||||
ret = stream_fifo_head(fifo);
|
||||
}
|
||||
|
||||
@ -1350,7 +1350,7 @@ void stream_fifo_clean(struct stream_fifo *fifo)
|
||||
|
||||
void stream_fifo_clean_safe(struct stream_fifo *fifo)
|
||||
{
|
||||
frr_with_mutex(&fifo->mtx) {
|
||||
frr_with_mutex (&fifo->mtx) {
|
||||
stream_fifo_clean(fifo);
|
||||
}
|
||||
}
|
||||
|
46
lib/thread.c
46
lib/thread.c
@ -217,7 +217,7 @@ static void cpu_record_print(struct vty *vty, uint8_t filter)
|
||||
tmp.funcname = "TOTAL";
|
||||
tmp.types = filter;
|
||||
|
||||
frr_with_mutex(&masters_mtx) {
|
||||
frr_with_mutex (&masters_mtx) {
|
||||
for (ALL_LIST_ELEMENTS_RO(masters, ln, m)) {
|
||||
const char *name = m->name ? m->name : "main";
|
||||
|
||||
@ -283,9 +283,9 @@ static void cpu_record_clear(uint8_t filter)
|
||||
struct thread_master *m;
|
||||
struct listnode *ln;
|
||||
|
||||
frr_with_mutex(&masters_mtx) {
|
||||
frr_with_mutex (&masters_mtx) {
|
||||
for (ALL_LIST_ELEMENTS_RO(masters, ln, m)) {
|
||||
frr_with_mutex(&m->mtx) {
|
||||
frr_with_mutex (&m->mtx) {
|
||||
void *args[2] = {tmp, m->cpu_record};
|
||||
hash_iterate(
|
||||
m->cpu_record,
|
||||
@ -463,7 +463,7 @@ DEFUN_NOSH (show_thread_poll,
|
||||
struct listnode *node;
|
||||
struct thread_master *m;
|
||||
|
||||
frr_with_mutex(&masters_mtx) {
|
||||
frr_with_mutex (&masters_mtx) {
|
||||
for (ALL_LIST_ELEMENTS_RO(masters, node, m)) {
|
||||
show_thread_poll_helper(vty, m);
|
||||
}
|
||||
@ -630,7 +630,7 @@ struct thread_master *thread_master_create(const char *name)
|
||||
sizeof(struct pollfd) * rv->handler.pfdsize);
|
||||
|
||||
/* add to list of threadmasters */
|
||||
frr_with_mutex(&masters_mtx) {
|
||||
frr_with_mutex (&masters_mtx) {
|
||||
if (!masters)
|
||||
masters = list_new();
|
||||
|
||||
@ -642,7 +642,7 @@ struct thread_master *thread_master_create(const char *name)
|
||||
|
||||
void thread_master_set_name(struct thread_master *master, const char *name)
|
||||
{
|
||||
frr_with_mutex(&master->mtx) {
|
||||
frr_with_mutex (&master->mtx) {
|
||||
XFREE(MTYPE_THREAD_MASTER, master->name);
|
||||
master->name = XSTRDUP(MTYPE_THREAD_MASTER, name);
|
||||
}
|
||||
@ -708,7 +708,7 @@ static void thread_array_free(struct thread_master *m,
|
||||
*/
|
||||
void thread_master_free_unused(struct thread_master *m)
|
||||
{
|
||||
frr_with_mutex(&m->mtx) {
|
||||
frr_with_mutex (&m->mtx) {
|
||||
struct thread *t;
|
||||
while ((t = thread_list_pop(&m->unuse)))
|
||||
thread_free(m, t);
|
||||
@ -720,7 +720,7 @@ void thread_master_free(struct thread_master *m)
|
||||
{
|
||||
struct thread *t;
|
||||
|
||||
frr_with_mutex(&masters_mtx) {
|
||||
frr_with_mutex (&masters_mtx) {
|
||||
listnode_delete(masters, m);
|
||||
if (masters->count == 0) {
|
||||
list_delete(&masters);
|
||||
@ -759,7 +759,7 @@ unsigned long thread_timer_remain_msec(struct thread *thread)
|
||||
if (!thread_is_scheduled(thread))
|
||||
return 0;
|
||||
|
||||
frr_with_mutex(&thread->mtx) {
|
||||
frr_with_mutex (&thread->mtx) {
|
||||
remain = monotime_until(&thread->u.sands, NULL) / 1000LL;
|
||||
}
|
||||
|
||||
@ -775,7 +775,7 @@ unsigned long thread_timer_remain_second(struct thread *thread)
|
||||
struct timeval thread_timer_remain(struct thread *thread)
|
||||
{
|
||||
struct timeval remain;
|
||||
frr_with_mutex(&thread->mtx) {
|
||||
frr_with_mutex (&thread->mtx) {
|
||||
monotime_until(&thread->u.sands, &remain);
|
||||
}
|
||||
return remain;
|
||||
@ -990,7 +990,7 @@ void _thread_add_read_write(const struct xref_threadsched *xref,
|
||||
if (fd >= m->fd_limit)
|
||||
assert(!"Number of FD's open is greater than FRR currently configured to handle, aborting");
|
||||
|
||||
frr_with_mutex(&m->mtx) {
|
||||
frr_with_mutex (&m->mtx) {
|
||||
if (t_ptr && *t_ptr)
|
||||
// thread is already scheduled; don't reschedule
|
||||
break;
|
||||
@ -1033,7 +1033,7 @@ void _thread_add_read_write(const struct xref_threadsched *xref,
|
||||
m->handler.pfdcount++;
|
||||
|
||||
if (thread) {
|
||||
frr_with_mutex(&thread->mtx) {
|
||||
frr_with_mutex (&thread->mtx) {
|
||||
thread->u.fd = fd;
|
||||
thread_array[thread->u.fd] = thread;
|
||||
}
|
||||
@ -1069,14 +1069,14 @@ static void _thread_add_timer_timeval(const struct xref_threadsched *xref,
|
||||
monotime(&t);
|
||||
timeradd(&t, time_relative, &t);
|
||||
|
||||
frr_with_mutex(&m->mtx) {
|
||||
frr_with_mutex (&m->mtx) {
|
||||
if (t_ptr && *t_ptr)
|
||||
/* thread is already scheduled; don't reschedule */
|
||||
return;
|
||||
|
||||
thread = thread_get(m, THREAD_TIMER, func, arg, xref);
|
||||
|
||||
frr_with_mutex(&thread->mtx) {
|
||||
frr_with_mutex (&thread->mtx) {
|
||||
thread->u.sands = t;
|
||||
thread_timer_list_add(&m->timer, thread);
|
||||
if (t_ptr) {
|
||||
@ -1154,13 +1154,13 @@ void _thread_add_event(const struct xref_threadsched *xref,
|
||||
|
||||
assert(m != NULL);
|
||||
|
||||
frr_with_mutex(&m->mtx) {
|
||||
frr_with_mutex (&m->mtx) {
|
||||
if (t_ptr && *t_ptr)
|
||||
/* thread is already scheduled; don't reschedule */
|
||||
break;
|
||||
|
||||
thread = thread_get(m, THREAD_EVENT, func, arg, xref);
|
||||
frr_with_mutex(&thread->mtx) {
|
||||
frr_with_mutex (&thread->mtx) {
|
||||
thread->u.val = val;
|
||||
thread_list_add_tail(&m->event, thread);
|
||||
}
|
||||
@ -1441,7 +1441,7 @@ static void cancel_event_helper(struct thread_master *m, void *arg, int flags)
|
||||
|
||||
cr->flags = flags;
|
||||
|
||||
frr_with_mutex(&m->mtx) {
|
||||
frr_with_mutex (&m->mtx) {
|
||||
cr->eventobj = arg;
|
||||
listnode_add(m->cancel_req, cr);
|
||||
do_thread_cancel(m);
|
||||
@ -1499,7 +1499,7 @@ void thread_cancel(struct thread **thread)
|
||||
|
||||
assert(master->owner == pthread_self());
|
||||
|
||||
frr_with_mutex(&master->mtx) {
|
||||
frr_with_mutex (&master->mtx) {
|
||||
struct cancel_req *cr =
|
||||
XCALLOC(MTYPE_TMP, sizeof(struct cancel_req));
|
||||
cr->thread = *thread;
|
||||
@ -1551,7 +1551,7 @@ void thread_cancel_async(struct thread_master *master, struct thread **thread,
|
||||
|
||||
assert(master->owner != pthread_self());
|
||||
|
||||
frr_with_mutex(&master->mtx) {
|
||||
frr_with_mutex (&master->mtx) {
|
||||
master->canceled = false;
|
||||
|
||||
if (thread) {
|
||||
@ -1931,7 +1931,7 @@ unsigned long thread_consumed_time(RUSAGE_T *now, RUSAGE_T *start,
|
||||
int thread_should_yield(struct thread *thread)
|
||||
{
|
||||
int result;
|
||||
frr_with_mutex(&thread->mtx) {
|
||||
frr_with_mutex (&thread->mtx) {
|
||||
result = monotime_since(&thread->real, NULL)
|
||||
> (int64_t)thread->yield;
|
||||
}
|
||||
@ -1940,7 +1940,7 @@ int thread_should_yield(struct thread *thread)
|
||||
|
||||
void thread_set_yield_time(struct thread *thread, unsigned long yield_time)
|
||||
{
|
||||
frr_with_mutex(&thread->mtx) {
|
||||
frr_with_mutex (&thread->mtx) {
|
||||
thread->yield = yield_time;
|
||||
}
|
||||
}
|
||||
@ -2082,11 +2082,11 @@ void _thread_execute(const struct xref_threadsched *xref,
|
||||
struct thread *thread;
|
||||
|
||||
/* Get or allocate new thread to execute. */
|
||||
frr_with_mutex(&m->mtx) {
|
||||
frr_with_mutex (&m->mtx) {
|
||||
thread = thread_get(m, THREAD_EVENT, func, arg, xref);
|
||||
|
||||
/* Set its event value. */
|
||||
frr_with_mutex(&thread->mtx) {
|
||||
frr_with_mutex (&thread->mtx) {
|
||||
thread->add_type = THREAD_EXECUTE;
|
||||
thread->u.val = val;
|
||||
thread->ref = &thread;
|
||||
|
@ -242,14 +242,14 @@ static bool zlog_file_cycle(struct zlog_cfg_file *zcf)
|
||||
|
||||
void zlog_file_set_other(struct zlog_cfg_file *zcf)
|
||||
{
|
||||
frr_with_mutex(&zcf->cfg_mtx) {
|
||||
frr_with_mutex (&zcf->cfg_mtx) {
|
||||
zlog_file_cycle(zcf);
|
||||
}
|
||||
}
|
||||
|
||||
bool zlog_file_set_filename(struct zlog_cfg_file *zcf, const char *filename)
|
||||
{
|
||||
frr_with_mutex(&zcf->cfg_mtx) {
|
||||
frr_with_mutex (&zcf->cfg_mtx) {
|
||||
XFREE(MTYPE_LOG_FD_NAME, zcf->filename);
|
||||
zcf->filename = XSTRDUP(MTYPE_LOG_FD_NAME, filename);
|
||||
zcf->fd = -1;
|
||||
@ -261,7 +261,7 @@ bool zlog_file_set_filename(struct zlog_cfg_file *zcf, const char *filename)
|
||||
|
||||
bool zlog_file_set_fd(struct zlog_cfg_file *zcf, int fd)
|
||||
{
|
||||
frr_with_mutex(&zcf->cfg_mtx) {
|
||||
frr_with_mutex (&zcf->cfg_mtx) {
|
||||
if (zcf->fd == fd)
|
||||
return true;
|
||||
|
||||
@ -283,7 +283,7 @@ bool zlog_file_rotate(struct zlog_cfg_file *zcf)
|
||||
struct rcu_close_rotate *rcr;
|
||||
int fd;
|
||||
|
||||
frr_with_mutex(&zcf->cfg_mtx) {
|
||||
frr_with_mutex (&zcf->cfg_mtx) {
|
||||
if (!zcf->active || !zcf->filename)
|
||||
return true;
|
||||
|
||||
@ -517,7 +517,7 @@ void zlog_syslog_set_facility(int facility)
|
||||
struct zlog_target *newztc;
|
||||
struct zlt_syslog *newzt;
|
||||
|
||||
frr_with_mutex(&syslog_cfg_mutex) {
|
||||
frr_with_mutex (&syslog_cfg_mutex) {
|
||||
if (facility == syslog_facility)
|
||||
return;
|
||||
syslog_facility = facility;
|
||||
@ -540,7 +540,7 @@ void zlog_syslog_set_facility(int facility)
|
||||
|
||||
int zlog_syslog_get_facility(void)
|
||||
{
|
||||
frr_with_mutex(&syslog_cfg_mutex) {
|
||||
frr_with_mutex (&syslog_cfg_mutex) {
|
||||
return syslog_facility;
|
||||
}
|
||||
assert(0);
|
||||
@ -551,7 +551,7 @@ void zlog_syslog_set_prio_min(int prio_min)
|
||||
struct zlog_target *newztc;
|
||||
struct zlt_syslog *newzt = NULL;
|
||||
|
||||
frr_with_mutex(&syslog_cfg_mutex) {
|
||||
frr_with_mutex (&syslog_cfg_mutex) {
|
||||
if (prio_min == syslog_prio_min)
|
||||
return;
|
||||
syslog_prio_min = prio_min;
|
||||
@ -577,7 +577,7 @@ void zlog_syslog_set_prio_min(int prio_min)
|
||||
|
||||
int zlog_syslog_get_prio_min(void)
|
||||
{
|
||||
frr_with_mutex(&syslog_cfg_mutex) {
|
||||
frr_with_mutex (&syslog_cfg_mutex) {
|
||||
return syslog_prio_min;
|
||||
}
|
||||
assert(0);
|
||||
|
@ -247,7 +247,7 @@ uint32_t zebra_opaque_enqueue_batch(struct stream_fifo *batch)
|
||||
/* Dequeue messages from the incoming batch, and save them
|
||||
* on the module fifo.
|
||||
*/
|
||||
frr_with_mutex(&zo_info.mutex) {
|
||||
frr_with_mutex (&zo_info.mutex) {
|
||||
msg = stream_fifo_pop(batch);
|
||||
while (msg) {
|
||||
stream_fifo_push(&zo_info.in_fifo, msg);
|
||||
@ -288,7 +288,7 @@ static void process_messages(struct thread *event)
|
||||
* Dequeue some messages from the incoming queue, temporarily
|
||||
* save them on the local fifo
|
||||
*/
|
||||
frr_with_mutex(&zo_info.mutex) {
|
||||
frr_with_mutex (&zo_info.mutex) {
|
||||
|
||||
for (i = 0; i < zo_info.msgs_per_cycle; i++) {
|
||||
msg = stream_fifo_pop(&zo_info.in_fifo);
|
||||
|
@ -4242,7 +4242,7 @@ static void rib_process_dplane_results(struct thread *thread)
|
||||
TAILQ_INIT(&ctxlist);
|
||||
|
||||
/* Take lock controlling queue of results */
|
||||
frr_with_mutex(&dplane_mutex) {
|
||||
frr_with_mutex (&dplane_mutex) {
|
||||
/* Dequeue list of context structs */
|
||||
dplane_ctx_list_append(&ctxlist, &rib_dplane_q);
|
||||
}
|
||||
@ -4401,7 +4401,7 @@ static void rib_process_dplane_results(struct thread *thread)
|
||||
static int rib_dplane_results(struct dplane_ctx_q *ctxlist)
|
||||
{
|
||||
/* Take lock controlling queue of results */
|
||||
frr_with_mutex(&dplane_mutex) {
|
||||
frr_with_mutex (&dplane_mutex) {
|
||||
/* Enqueue context blocks */
|
||||
dplane_ctx_list_append(&rib_dplane_q, ctxlist);
|
||||
}
|
||||
|
@ -239,7 +239,7 @@ static void zserv_write(struct thread *thread)
|
||||
|
||||
cache = stream_fifo_new();
|
||||
|
||||
frr_with_mutex(&client->obuf_mtx) {
|
||||
frr_with_mutex (&client->obuf_mtx) {
|
||||
while (stream_fifo_head(client->obuf_fifo))
|
||||
stream_fifo_push(cache,
|
||||
stream_fifo_pop(client->obuf_fifo));
|
||||
@ -432,7 +432,7 @@ static void zserv_read(struct thread *thread)
|
||||
memory_order_relaxed);
|
||||
|
||||
/* publish read packets on client's input queue */
|
||||
frr_with_mutex(&client->ibuf_mtx) {
|
||||
frr_with_mutex (&client->ibuf_mtx) {
|
||||
while (cache->head)
|
||||
stream_fifo_push(client->ibuf_fifo,
|
||||
stream_fifo_pop(cache));
|
||||
@ -501,7 +501,7 @@ static void zserv_process_messages(struct thread *thread)
|
||||
uint32_t p2p = zrouter.packets_to_process;
|
||||
bool need_resched = false;
|
||||
|
||||
frr_with_mutex(&client->ibuf_mtx) {
|
||||
frr_with_mutex (&client->ibuf_mtx) {
|
||||
uint32_t i;
|
||||
for (i = 0; i < p2p && stream_fifo_head(client->ibuf_fifo);
|
||||
++i) {
|
||||
@ -531,7 +531,7 @@ static void zserv_process_messages(struct thread *thread)
|
||||
|
||||
int zserv_send_message(struct zserv *client, struct stream *msg)
|
||||
{
|
||||
frr_with_mutex(&client->obuf_mtx) {
|
||||
frr_with_mutex (&client->obuf_mtx) {
|
||||
stream_fifo_push(client->obuf_fifo, msg);
|
||||
}
|
||||
|
||||
@ -547,7 +547,7 @@ int zserv_send_batch(struct zserv *client, struct stream_fifo *fifo)
|
||||
{
|
||||
struct stream *msg;
|
||||
|
||||
frr_with_mutex(&client->obuf_mtx) {
|
||||
frr_with_mutex (&client->obuf_mtx) {
|
||||
msg = stream_fifo_pop(fifo);
|
||||
while (msg) {
|
||||
stream_fifo_push(client->obuf_fifo, msg);
|
||||
@ -684,7 +684,7 @@ void zserv_close_client(struct zserv *client)
|
||||
* Final check in case the client struct is in use in another
|
||||
* pthread: if not in-use, continue and free the client
|
||||
*/
|
||||
frr_with_mutex(&client_mutex) {
|
||||
frr_with_mutex (&client_mutex) {
|
||||
if (client->busy_count <= 0) {
|
||||
/* remove from client list */
|
||||
listnode_delete(zrouter.client_list, client);
|
||||
@ -761,7 +761,7 @@ static struct zserv *zserv_client_create(int sock)
|
||||
}
|
||||
|
||||
/* Add this client to linked list. */
|
||||
frr_with_mutex(&client_mutex) {
|
||||
frr_with_mutex (&client_mutex) {
|
||||
listnode_add(zrouter.client_list, client);
|
||||
}
|
||||
|
||||
@ -797,7 +797,7 @@ struct zserv *zserv_acquire_client(uint8_t proto, unsigned short instance,
|
||||
{
|
||||
struct zserv *client = NULL;
|
||||
|
||||
frr_with_mutex(&client_mutex) {
|
||||
frr_with_mutex (&client_mutex) {
|
||||
client = find_client_internal(proto, instance, session_id);
|
||||
if (client) {
|
||||
/* Don't return a dead/closed client object */
|
||||
@ -823,7 +823,7 @@ void zserv_release_client(struct zserv *client)
|
||||
* for it to be deleted as soon as we release the lock, so we won't
|
||||
* touch the object again.
|
||||
*/
|
||||
frr_with_mutex(&client_mutex) {
|
||||
frr_with_mutex (&client_mutex) {
|
||||
client->busy_count--;
|
||||
|
||||
if (client->busy_count <= 0) {
|
||||
@ -1229,7 +1229,7 @@ struct zserv *zserv_find_client(uint8_t proto, unsigned short instance)
|
||||
{
|
||||
struct zserv *client;
|
||||
|
||||
frr_with_mutex(&client_mutex) {
|
||||
frr_with_mutex (&client_mutex) {
|
||||
client = find_client_internal(proto, instance, 0);
|
||||
}
|
||||
|
||||
@ -1244,7 +1244,7 @@ struct zserv *zserv_find_client_session(uint8_t proto, unsigned short instance,
|
||||
{
|
||||
struct zserv *client;
|
||||
|
||||
frr_with_mutex(&client_mutex) {
|
||||
frr_with_mutex (&client_mutex) {
|
||||
client = find_client_internal(proto, instance, session_id);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user