Merge pull request #12953 from donaldsharp/struct_event

Struct event
This commit is contained in:
Jafar Al-Gharaibeh 2023-03-24 13:48:53 -05:00 committed by GitHub
commit 06f54ff416
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
488 changed files with 5399 additions and 5440 deletions

View File

@ -8,7 +8,7 @@ Copyright 2011 by Matthieu Boutier and Juliusz Chroboczek
#include "getopt.h"
#include "if.h"
#include "log.h"
#include "thread.h"
#include "frrevent.h"
#include "privs.h"
#include "sigevent.h"
#include "lib/version.h"
@ -37,7 +37,7 @@ static void babel_exit_properly(void);
static void babel_save_state_file(void);
struct thread_master *master; /* quagga's threads handler */
struct event_loop *master; /* quagga's threads handler */
struct timeval babel_now; /* current time */
unsigned char myid[8]; /* unique id (mac address of an interface) */

View File

@ -9,7 +9,7 @@ Copyright 2011 by Matthieu Boutier and Juliusz Chroboczek
#include "vty.h"
extern struct timeval babel_now; /* current time */
extern struct thread_master *master; /* quagga's threads handler */
extern struct event_loop *master; /* quagga's threads handler */
extern int debug;
extern int resend_delay;

View File

@ -37,11 +37,11 @@ Copyright 2011 by Matthieu Boutier and Juliusz Chroboczek
DEFINE_MGROUP(BABELD, "babeld");
DEFINE_MTYPE_STATIC(BABELD, BABEL, "Babel Structure");
static void babel_init_routing_process(struct thread *thread);
static void babel_init_routing_process(struct event *thread);
static void babel_get_myid(void);
static void babel_initial_noise(void);
static void babel_read_protocol(struct thread *thread);
static void babel_main_loop(struct thread *thread);
static void babel_read_protocol(struct event *thread);
static void babel_main_loop(struct event *thread);
static void babel_set_timer(struct timeval *timeout);
static void babel_fill_with_next_timeout(struct timeval *tv);
static void
@ -148,9 +148,11 @@ babel_create_routing_process (void)
}
/* Threads. */
thread_add_read(master, babel_read_protocol, NULL, protocol_socket, &babel_routing_process->t_read);
event_add_read(master, babel_read_protocol, NULL, protocol_socket,
&babel_routing_process->t_read);
/* wait a little: zebra will announce interfaces, addresses, routes... */
thread_add_timer_msec(master, babel_init_routing_process, NULL, 200L, &babel_routing_process->t_update);
event_add_timer_msec(master, babel_init_routing_process, NULL, 200L,
&babel_routing_process->t_update);
/* Distribute list install. */
babel_routing_process->distribute_ctx = distribute_list_ctx_create (vrf_lookup_by_id(VRF_DEFAULT));
@ -163,7 +165,7 @@ fail:
}
/* thread reading entries form others babel daemons */
static void babel_read_protocol(struct thread *thread)
static void babel_read_protocol(struct event *thread)
{
int rc;
struct vrf *vrf = vrf_lookup_by_id(VRF_DEFAULT);
@ -193,13 +195,14 @@ static void babel_read_protocol(struct thread *thread)
}
/* re-add thread */
thread_add_read(master, &babel_read_protocol, NULL, protocol_socket, &babel_routing_process->t_read);
event_add_read(master, &babel_read_protocol, NULL, protocol_socket,
&babel_routing_process->t_read);
}
/* Zebra will give some information, especially about interfaces. This function
must be call with a litte timeout wich may give zebra the time to do his job,
making these inits have sense. */
static void babel_init_routing_process(struct thread *thread)
static void babel_init_routing_process(struct event *thread)
{
myseqno = (frr_weak_random() & 0xFFFF);
babel_get_myid();
@ -303,15 +306,15 @@ babel_clean_routing_process(void)
babel_interface_close_all();
/* cancel events */
thread_cancel(&babel_routing_process->t_read);
thread_cancel(&babel_routing_process->t_update);
event_cancel(&babel_routing_process->t_read);
event_cancel(&babel_routing_process->t_update);
distribute_list_delete(&babel_routing_process->distribute_ctx);
XFREE(MTYPE_BABEL, babel_routing_process);
}
/* Function used with timeout. */
static void babel_main_loop(struct thread *thread)
static void babel_main_loop(struct event *thread)
{
struct timeval tv;
struct vrf *vrf = vrf_lookup_by_id(VRF_DEFAULT);
@ -482,8 +485,9 @@ static void
babel_set_timer(struct timeval *timeout)
{
long msecs = timeout->tv_sec * 1000 + timeout->tv_usec / 1000;
thread_cancel(&(babel_routing_process->t_update));
thread_add_timer_msec(master, babel_main_loop, NULL, msecs, &babel_routing_process->t_update);
event_cancel(&(babel_routing_process->t_update));
event_add_timer_msec(master, babel_main_loop, NULL, msecs,
&babel_routing_process->t_update);
}
void

View File

@ -84,8 +84,8 @@ Copyright 2011 by Matthieu Boutier and Juliusz Chroboczek
struct babel
{
/* Babel threads. */
struct thread *t_read; /* on Babel protocol's socket */
struct thread *t_update; /* timers */
struct event *t_read; /* on Babel protocol's socket */
struct event *t_update; /* timers */
/* distribute_ctx */
struct distribute_ctx *distribute_ctx;
};

View File

@ -29,7 +29,7 @@ Copyright 2011, 2012 by Matthieu Boutier and Juliusz Chroboczek
#include "command.h"
#include "vty.h"
#include "memory.h"
#include "thread.h"
#include "frrevent.h"
#include "nexthop.h"
#include "util.h"

View File

@ -618,25 +618,25 @@ struct bfd_session *ptm_bfd_sess_find(struct bfd_pkt *cp,
return bfd_key_lookup(key);
}
void bfd_xmt_cb(struct thread *t)
void bfd_xmt_cb(struct event *t)
{
struct bfd_session *bs = THREAD_ARG(t);
struct bfd_session *bs = EVENT_ARG(t);
ptm_bfd_xmt_TO(bs, 0);
}
void bfd_echo_xmt_cb(struct thread *t)
void bfd_echo_xmt_cb(struct event *t)
{
struct bfd_session *bs = THREAD_ARG(t);
struct bfd_session *bs = EVENT_ARG(t);
if (bs->echo_xmt_TO > 0)
ptm_bfd_echo_xmt_TO(bs);
}
/* Was ptm_bfd_detect_TO() */
void bfd_recvtimer_cb(struct thread *t)
void bfd_recvtimer_cb(struct event *t)
{
struct bfd_session *bs = THREAD_ARG(t);
struct bfd_session *bs = EVENT_ARG(t);
switch (bs->ses_state) {
case PTM_BFD_INIT:
@ -647,9 +647,9 @@ void bfd_recvtimer_cb(struct thread *t)
}
/* Was ptm_bfd_echo_detect_TO() */
void bfd_echo_recvtimer_cb(struct thread *t)
void bfd_echo_recvtimer_cb(struct event *t)
{
struct bfd_session *bs = THREAD_ARG(t);
struct bfd_session *bs = EVENT_ARG(t);
switch (bs->ses_state) {
case PTM_BFD_INIT:
@ -1957,23 +1957,23 @@ static int bfd_vrf_enable(struct vrf *vrf)
bvrf->bg_echov6 = bp_echov6_socket(vrf);
if (!bvrf->bg_ev[0] && bvrf->bg_shop != -1)
thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop,
&bvrf->bg_ev[0]);
event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop,
&bvrf->bg_ev[0]);
if (!bvrf->bg_ev[1] && bvrf->bg_mhop != -1)
thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop,
&bvrf->bg_ev[1]);
event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop,
&bvrf->bg_ev[1]);
if (!bvrf->bg_ev[2] && bvrf->bg_shop6 != -1)
thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop6,
&bvrf->bg_ev[2]);
event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop6,
&bvrf->bg_ev[2]);
if (!bvrf->bg_ev[3] && bvrf->bg_mhop6 != -1)
thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop6,
&bvrf->bg_ev[3]);
event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop6,
&bvrf->bg_ev[3]);
if (!bvrf->bg_ev[4] && bvrf->bg_echo != -1)
thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echo,
&bvrf->bg_ev[4]);
event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echo,
&bvrf->bg_ev[4]);
if (!bvrf->bg_ev[5] && bvrf->bg_echov6 != -1)
thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echov6,
&bvrf->bg_ev[5]);
event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echov6,
&bvrf->bg_ev[5]);
if (vrf->vrf_id != VRF_DEFAULT) {
bfdd_zclient_register(vrf->vrf_id);
@ -1999,12 +1999,12 @@ static int bfd_vrf_disable(struct vrf *vrf)
zlog_debug("VRF disable %s id %d", vrf->name, vrf->vrf_id);
/* Disable read/write poll triggering. */
THREAD_OFF(bvrf->bg_ev[0]);
THREAD_OFF(bvrf->bg_ev[1]);
THREAD_OFF(bvrf->bg_ev[2]);
THREAD_OFF(bvrf->bg_ev[3]);
THREAD_OFF(bvrf->bg_ev[4]);
THREAD_OFF(bvrf->bg_ev[5]);
EVENT_OFF(bvrf->bg_ev[0]);
EVENT_OFF(bvrf->bg_ev[1]);
EVENT_OFF(bvrf->bg_ev[2]);
EVENT_OFF(bvrf->bg_ev[3]);
EVENT_OFF(bvrf->bg_ev[4]);
EVENT_OFF(bvrf->bg_ev[5]);
/* Close all descriptors. */
socket_close(&bvrf->bg_echo);

View File

@ -265,12 +265,12 @@ struct bfd_session {
struct bfd_config_timers timers;
struct bfd_timers cur_timers;
uint64_t detect_TO;
struct thread *echo_recvtimer_ev;
struct thread *recvtimer_ev;
struct event *echo_recvtimer_ev;
struct event *recvtimer_ev;
uint64_t xmt_TO;
uint64_t echo_xmt_TO;
struct thread *xmttimer_ev;
struct thread *echo_xmttimer_ev;
struct event *xmttimer_ev;
struct event *echo_xmttimer_ev;
uint64_t echo_detect_TO;
/* software object state */
@ -401,8 +401,8 @@ struct bfd_control_socket {
TAILQ_ENTRY(bfd_control_socket) bcs_entry;
int bcs_sd;
struct thread *bcs_ev;
struct thread *bcs_outev;
struct event *bcs_ev;
struct event *bcs_outev;
struct bcqueue bcs_bcqueue;
/* Notification data */
@ -422,7 +422,7 @@ int control_init(const char *path);
void control_shutdown(void);
int control_notify(struct bfd_session *bs, uint8_t notify_state);
int control_notify_config(const char *op, struct bfd_session *bs);
void control_accept(struct thread *t);
void control_accept(struct event *t);
/*
@ -439,7 +439,7 @@ struct bfd_vrf_global {
int bg_echov6;
struct vrf *vrf;
struct thread *bg_ev[6];
struct event *bg_ev[6];
};
/* Forward declaration of data plane context struct. */
@ -448,7 +448,7 @@ TAILQ_HEAD(dplane_queue, bfd_dplane_ctx);
struct bfd_global {
int bg_csock;
struct thread *bg_csockev;
struct event *bg_csockev;
struct bcslist bg_bcslist;
struct pllist bg_pllist;
@ -466,7 +466,7 @@ struct bfd_global {
/* Distributed BFD items. */
bool bg_use_dplane;
int bg_dplane_sock;
struct thread *bg_dplane_sockev;
struct event *bg_dplane_sockev;
struct dplane_queue bg_dplaneq;
/* Debug options. */
@ -553,7 +553,7 @@ void ptm_bfd_snd(struct bfd_session *bfd, int fbit);
void ptm_bfd_echo_snd(struct bfd_session *bfd);
void ptm_bfd_echo_fp_snd(struct bfd_session *bfd);
void bfd_recv_cb(struct thread *t);
void bfd_recv_cb(struct event *t);
/*
@ -561,7 +561,7 @@ void bfd_recv_cb(struct thread *t);
*
* Contains the code related with event loop.
*/
typedef void (*bfd_ev_cb)(struct thread *t);
typedef void (*bfd_ev_cb)(struct event *t);
void bfd_recvtimer_update(struct bfd_session *bs);
void bfd_echo_recvtimer_update(struct bfd_session *bs);
@ -686,12 +686,12 @@ void bfd_key_iterate(hash_iter_func hif, void *arg);
unsigned long bfd_get_session_count(void);
/* Export callback functions for `event.c`. */
extern struct thread_master *master;
extern struct event_loop *master;
void bfd_recvtimer_cb(struct thread *t);
void bfd_echo_recvtimer_cb(struct thread *t);
void bfd_xmt_cb(struct thread *t);
void bfd_echo_xmt_cb(struct thread *t);
void bfd_recvtimer_cb(struct event *t);
void bfd_echo_recvtimer_cb(struct event *t);
void bfd_xmt_cb(struct event *t);
void bfd_echo_xmt_cb(struct event *t);
extern struct in6_addr zero_addr;

View File

@ -701,29 +701,29 @@ ssize_t bfd_recv_ipv6(int sd, uint8_t *msgbuf, size_t msgbuflen, uint8_t *ttl,
static void bfd_sd_reschedule(struct bfd_vrf_global *bvrf, int sd)
{
if (sd == bvrf->bg_shop) {
THREAD_OFF(bvrf->bg_ev[0]);
thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop,
&bvrf->bg_ev[0]);
EVENT_OFF(bvrf->bg_ev[0]);
event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop,
&bvrf->bg_ev[0]);
} else if (sd == bvrf->bg_mhop) {
THREAD_OFF(bvrf->bg_ev[1]);
thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop,
&bvrf->bg_ev[1]);
EVENT_OFF(bvrf->bg_ev[1]);
event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop,
&bvrf->bg_ev[1]);
} else if (sd == bvrf->bg_shop6) {
THREAD_OFF(bvrf->bg_ev[2]);
thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop6,
&bvrf->bg_ev[2]);
EVENT_OFF(bvrf->bg_ev[2]);
event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop6,
&bvrf->bg_ev[2]);
} else if (sd == bvrf->bg_mhop6) {
THREAD_OFF(bvrf->bg_ev[3]);
thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop6,
&bvrf->bg_ev[3]);
EVENT_OFF(bvrf->bg_ev[3]);
event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop6,
&bvrf->bg_ev[3]);
} else if (sd == bvrf->bg_echo) {
THREAD_OFF(bvrf->bg_ev[4]);
thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echo,
&bvrf->bg_ev[4]);
EVENT_OFF(bvrf->bg_ev[4]);
event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echo,
&bvrf->bg_ev[4]);
} else if (sd == bvrf->bg_echov6) {
THREAD_OFF(bvrf->bg_ev[5]);
thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echov6,
&bvrf->bg_ev[5]);
EVENT_OFF(bvrf->bg_ev[5]);
event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echov6,
&bvrf->bg_ev[5]);
}
}
@ -768,9 +768,9 @@ static void cp_debug(bool mhop, struct sockaddr_any *peer,
mhop ? "yes" : "no", peerstr, localstr, portstr, vrfstr);
}
void bfd_recv_cb(struct thread *t)
void bfd_recv_cb(struct event *t)
{
int sd = THREAD_FD(t);
int sd = EVENT_FD(t);
struct bfd_session *bfd;
struct bfd_pkt *cp;
bool is_mhop;
@ -781,7 +781,7 @@ void bfd_recv_cb(struct thread *t)
struct sockaddr_any local, peer;
uint8_t msgbuf[1516];
struct interface *ifp = NULL;
struct bfd_vrf_global *bvrf = THREAD_ARG(t);
struct bfd_vrf_global *bvrf = EVENT_ARG(t);
/* Schedule next read. */
bfd_sd_reschedule(bvrf, sd);

View File

@ -32,7 +32,7 @@ DEFINE_MTYPE(BFDD, BFDD_CONTROL, "long-lived control socket memory");
DEFINE_MTYPE(BFDD, BFDD_NOTIFICATION, "short-lived control notification data");
/* Master of threads. */
struct thread_master *master;
struct event_loop *master;
/* BFDd privileges */
static zebra_capabilities_t _caps_p[] = {ZCAP_BIND, ZCAP_SYS_ADMIN, ZCAP_NET_RAW};
@ -375,8 +375,8 @@ int main(int argc, char *argv[])
/* Initialize zebra connection. */
bfdd_zclient_init(&bglobal.bfdd_privs);
thread_add_read(master, control_accept, NULL, bglobal.bg_csock,
&bglobal.bg_csockev);
event_add_read(master, control_accept, NULL, bglobal.bg_csock,
&bglobal.bg_csockev);
/* Install commands. */
bfdd_vty_init();

View File

@ -39,8 +39,8 @@ struct bfd_notify_peer *control_notifypeer_find(struct bfd_control_socket *bcs,
struct bfd_control_socket *control_new(int sd);
static void control_free(struct bfd_control_socket *bcs);
static void control_reset_buf(struct bfd_control_buffer *bcb);
static void control_read(struct thread *t);
static void control_write(struct thread *t);
static void control_read(struct event *t);
static void control_write(struct event *t);
static void control_handle_request_add(struct bfd_control_socket *bcs,
struct bfd_control_msg *bcm);
@ -132,7 +132,7 @@ void control_shutdown(void)
{
struct bfd_control_socket *bcs;
thread_cancel(&bglobal.bg_csockev);
event_cancel(&bglobal.bg_csockev);
socket_close(&bglobal.bg_csock);
@ -142,9 +142,9 @@ void control_shutdown(void)
}
}
void control_accept(struct thread *t)
void control_accept(struct event *t)
{
int csock, sd = THREAD_FD(t);
int csock, sd = EVENT_FD(t);
csock = accept(sd, NULL, 0);
if (csock == -1) {
@ -154,7 +154,7 @@ void control_accept(struct thread *t)
control_new(csock);
thread_add_read(master, control_accept, NULL, sd, &bglobal.bg_csockev);
event_add_read(master, control_accept, NULL, sd, &bglobal.bg_csockev);
}
@ -171,7 +171,7 @@ struct bfd_control_socket *control_new(int sd)
bcs->bcs_notify = 0;
bcs->bcs_sd = sd;
thread_add_read(master, control_read, bcs, sd, &bcs->bcs_ev);
event_add_read(master, control_read, bcs, sd, &bcs->bcs_ev);
TAILQ_INIT(&bcs->bcs_bcqueue);
TAILQ_INIT(&bcs->bcs_bnplist);
@ -185,8 +185,8 @@ static void control_free(struct bfd_control_socket *bcs)
struct bfd_control_queue *bcq;
struct bfd_notify_peer *bnp;
thread_cancel(&(bcs->bcs_ev));
thread_cancel(&(bcs->bcs_outev));
event_cancel(&(bcs->bcs_ev));
event_cancel(&(bcs->bcs_outev));
close(bcs->bcs_sd);
@ -286,13 +286,13 @@ static int control_queue_dequeue(struct bfd_control_socket *bcs)
bcs->bcs_bout = &bcq->bcq_bcb;
bcs->bcs_outev = NULL;
thread_add_write(master, control_write, bcs, bcs->bcs_sd,
&bcs->bcs_outev);
event_add_write(master, control_write, bcs, bcs->bcs_sd,
&bcs->bcs_outev);
return 1;
empty_list:
thread_cancel(&(bcs->bcs_outev));
event_cancel(&(bcs->bcs_outev));
bcs->bcs_bout = NULL;
return 0;
}
@ -315,8 +315,8 @@ static int control_queue_enqueue(struct bfd_control_socket *bcs,
bcs->bcs_bout = bcb;
/* New messages, active write events. */
thread_add_write(master, control_write, bcs, bcs->bcs_sd,
&bcs->bcs_outev);
event_add_write(master, control_write, bcs, bcs->bcs_sd,
&bcs->bcs_outev);
}
return 0;
@ -379,9 +379,9 @@ static void control_reset_buf(struct bfd_control_buffer *bcb)
bcb->bcb_left = 0;
}
static void control_read(struct thread *t)
static void control_read(struct event *t)
{
struct bfd_control_socket *bcs = THREAD_ARG(t);
struct bfd_control_socket *bcs = EVENT_ARG(t);
struct bfd_control_buffer *bcb = &bcs->bcs_bin;
int sd = bcs->bcs_sd;
struct bfd_control_msg bcm;
@ -511,12 +511,12 @@ skip_header:
schedule_next_read:
bcs->bcs_ev = NULL;
thread_add_read(master, control_read, bcs, sd, &bcs->bcs_ev);
event_add_read(master, control_read, bcs, sd, &bcs->bcs_ev);
}
static void control_write(struct thread *t)
static void control_write(struct event *t)
{
struct bfd_control_socket *bcs = THREAD_ARG(t);
struct bfd_control_socket *bcs = EVENT_ARG(t);
struct bfd_control_buffer *bcb = bcs->bcs_bout;
int sd = bcs->bcs_sd;
ssize_t bwrite;
@ -529,8 +529,8 @@ static void control_write(struct thread *t)
if (bwrite < 0) {
if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR) {
bcs->bcs_outev = NULL;
thread_add_write(master, control_write, bcs,
bcs->bcs_sd, &bcs->bcs_outev);
event_add_write(master, control_write, bcs, bcs->bcs_sd,
&bcs->bcs_outev);
return;
}
@ -543,8 +543,8 @@ static void control_write(struct thread *t)
bcb->bcb_left -= bwrite;
if (bcb->bcb_left > 0) {
bcs->bcs_outev = NULL;
thread_add_write(master, control_write, bcs, bcs->bcs_sd,
&bcs->bcs_outev);
event_add_write(master, control_write, bcs, bcs->bcs_sd,
&bcs->bcs_outev);
return;
}

View File

@ -26,7 +26,7 @@
#include "lib/network.h"
#include "lib/printfrr.h"
#include "lib/stream.h"
#include "lib/thread.h"
#include "lib/frrevent.h"
#include "bfd.h"
#include "bfddp_packet.h"
@ -63,11 +63,11 @@ struct bfd_dplane_ctx {
/** Output buffer data. */
struct stream *outbuf;
/** Input event data. */
struct thread *inbufev;
struct event *inbufev;
/** Output event data. */
struct thread *outbufev;
struct event *outbufev;
/** Connection event. */
struct thread *connectev;
struct event *connectev;
/** Amount of bytes read. */
uint64_t in_bytes;
@ -94,7 +94,7 @@ struct bfd_dplane_ctx {
*/
typedef void (*bfd_dplane_expect_cb)(struct bfddp_message *msg, void *arg);
static void bfd_dplane_client_connect(struct thread *t);
static void bfd_dplane_client_connect(struct event *t);
static bool bfd_dplane_client_connecting(struct bfd_dplane_ctx *bdc);
static void bfd_dplane_ctx_free(struct bfd_dplane_ctx *bdc);
static int _bfd_dplane_add_session(struct bfd_dplane_ctx *bdc,
@ -307,14 +307,14 @@ static ssize_t bfd_dplane_flush(struct bfd_dplane_ctx *bdc)
stream_pulldown(bdc->outbuf);
/* Disable write ready events. */
THREAD_OFF(bdc->outbufev);
EVENT_OFF(bdc->outbufev);
return total;
}
static void bfd_dplane_write(struct thread *t)
static void bfd_dplane_write(struct event *t)
{
struct bfd_dplane_ctx *bdc = THREAD_ARG(t);
struct bfd_dplane_ctx *bdc = EVENT_ARG(t);
/* Handle connection stage. */
if (bdc->connecting && bfd_dplane_client_connecting(bdc))
@ -429,8 +429,8 @@ static int bfd_dplane_enqueue(struct bfd_dplane_ctx *bdc, const void *buf,
/* Schedule if it is not yet. */
if (bdc->outbufev == NULL)
thread_add_write(master, bfd_dplane_write, bdc, bdc->sock,
&bdc->outbufev);
event_add_write(master, bfd_dplane_write, bdc, bdc->sock,
&bdc->outbufev);
return 0;
}
@ -599,9 +599,9 @@ skip_read:
return 0;
}
static void bfd_dplane_read(struct thread *t)
static void bfd_dplane_read(struct event *t)
{
struct bfd_dplane_ctx *bdc = THREAD_ARG(t);
struct bfd_dplane_ctx *bdc = EVENT_ARG(t);
int rv;
rv = bfd_dplane_expect(bdc, 0, bfd_dplane_handle_message, NULL);
@ -609,7 +609,7 @@ static void bfd_dplane_read(struct thread *t)
return;
stream_pulldown(bdc->inbuf);
thread_add_read(master, bfd_dplane_read, bdc, bdc->sock, &bdc->inbufev);
event_add_read(master, bfd_dplane_read, bdc, bdc->sock, &bdc->inbufev);
}
static void _bfd_session_register_dplane(struct hash_bucket *hb, void *arg)
@ -641,7 +641,7 @@ static struct bfd_dplane_ctx *bfd_dplane_ctx_new(int sock)
if (sock == -1)
return bdc;
thread_add_read(master, bfd_dplane_read, bdc, sock, &bdc->inbufev);
event_add_read(master, bfd_dplane_read, bdc, sock, &bdc->inbufev);
/* Register all unattached sessions. */
bfd_key_iterate(_bfd_session_register_dplane, bdc);
@ -672,7 +672,7 @@ static void bfd_dplane_ctx_free(struct bfd_dplane_ctx *bdc)
/* Client mode has special treatment. */
if (bdc->client) {
/* Disable connection event if any. */
THREAD_OFF(bdc->connectev);
EVENT_OFF(bdc->connectev);
/* Normal treatment on shutdown. */
if (bglobal.bg_shutdown)
@ -680,10 +680,10 @@ static void bfd_dplane_ctx_free(struct bfd_dplane_ctx *bdc)
/* Attempt reconnection. */
socket_close(&bdc->sock);
THREAD_OFF(bdc->inbufev);
THREAD_OFF(bdc->outbufev);
thread_add_timer(master, bfd_dplane_client_connect, bdc, 3,
&bdc->connectev);
EVENT_OFF(bdc->inbufev);
EVENT_OFF(bdc->outbufev);
event_add_timer(master, bfd_dplane_client_connect, bdc, 3,
&bdc->connectev);
return;
}
@ -699,8 +699,8 @@ free_resources:
socket_close(&bdc->sock);
stream_free(bdc->inbuf);
stream_free(bdc->outbuf);
THREAD_OFF(bdc->inbufev);
THREAD_OFF(bdc->outbufev);
EVENT_OFF(bdc->inbufev);
EVENT_OFF(bdc->outbufev);
XFREE(MTYPE_BFDD_DPLANE_CTX, bdc);
}
@ -819,9 +819,9 @@ static uint16_t bfd_dplane_request_counters(const struct bfd_session *bs)
/*
* Data plane listening socket.
*/
static void bfd_dplane_accept(struct thread *t)
static void bfd_dplane_accept(struct event *t)
{
struct bfd_global *bg = THREAD_ARG(t);
struct bfd_global *bg = EVENT_ARG(t);
struct bfd_dplane_ctx *bdc;
int sock;
@ -840,8 +840,8 @@ static void bfd_dplane_accept(struct thread *t)
zlog_debug("%s: new data plane client connected", __func__);
reschedule_and_return:
thread_add_read(master, bfd_dplane_accept, bg, bg->bg_dplane_sock,
&bglobal.bg_dplane_sockev);
event_add_read(master, bfd_dplane_accept, bg, bg->bg_dplane_sock,
&bglobal.bg_dplane_sockev);
}
/*
@ -856,7 +856,7 @@ static void _bfd_dplane_client_bootstrap(struct bfd_dplane_ctx *bdc)
stream_reset(bdc->outbuf);
/* Ask for read notifications. */
thread_add_read(master, bfd_dplane_read, bdc, bdc->sock, &bdc->inbufev);
event_add_read(master, bfd_dplane_read, bdc, bdc->sock, &bdc->inbufev);
/* Remove all sessions then register again to send them all. */
bfd_key_iterate(_bfd_session_unregister_dplane, bdc);
@ -899,9 +899,9 @@ static bool bfd_dplane_client_connecting(struct bfd_dplane_ctx *bdc)
}
}
static void bfd_dplane_client_connect(struct thread *t)
static void bfd_dplane_client_connect(struct event *t)
{
struct bfd_dplane_ctx *bdc = THREAD_ARG(t);
struct bfd_dplane_ctx *bdc = EVENT_ARG(t);
int rv, sock;
socklen_t rvlen = sizeof(rv);
@ -938,8 +938,8 @@ static void bfd_dplane_client_connect(struct thread *t)
/* If we are not connected yet, ask for write notifications. */
bdc->connecting = true;
thread_add_write(master, bfd_dplane_write, bdc, bdc->sock,
&bdc->outbufev);
event_add_write(master, bfd_dplane_write, bdc, bdc->sock,
&bdc->outbufev);
} else {
if (bglobal.debug_dplane)
zlog_debug("%s: server connection: %d", __func__, sock);
@ -949,11 +949,11 @@ static void bfd_dplane_client_connect(struct thread *t)
}
reschedule_connect:
THREAD_OFF(bdc->inbufev);
THREAD_OFF(bdc->outbufev);
EVENT_OFF(bdc->inbufev);
EVENT_OFF(bdc->outbufev);
socket_close(&sock);
thread_add_timer(master, bfd_dplane_client_connect, bdc, 3,
&bdc->connectev);
event_add_timer(master, bfd_dplane_client_connect, bdc, 3,
&bdc->connectev);
}
static void bfd_dplane_client_init(const struct sockaddr *sa, socklen_t salen)
@ -974,8 +974,8 @@ static void bfd_dplane_client_init(const struct sockaddr *sa, socklen_t salen)
bdc->client = true;
thread_add_timer(master, bfd_dplane_client_connect, bdc, 0,
&bdc->connectev);
event_add_timer(master, bfd_dplane_client_connect, bdc, 0,
&bdc->connectev);
/* Insert into data plane lists. */
TAILQ_INSERT_TAIL(&bglobal.bg_dplaneq, bdc, entry);
@ -997,7 +997,7 @@ static int bfd_dplane_finish_late(void)
bfd_dplane_ctx_free(bdc);
/* Cancel accept thread and close socket. */
THREAD_OFF(bglobal.bg_dplane_sockev);
EVENT_OFF(bglobal.bg_dplane_sockev);
close(bglobal.bg_dplane_sock);
return 0;
@ -1067,8 +1067,8 @@ void bfd_dplane_init(const struct sockaddr *sa, socklen_t salen, bool client)
}
bglobal.bg_dplane_sock = sock;
thread_add_read(master, bfd_dplane_accept, &bglobal, sock,
&bglobal.bg_dplane_sockev);
event_add_read(master, bfd_dplane_accept, &bglobal, sock,
&bglobal.bg_dplane_sockev);
}
int bfd_dplane_add_session(struct bfd_session *bs)

View File

@ -36,8 +36,8 @@ void bfd_recvtimer_update(struct bfd_session *bs)
tv_normalize(&tv);
thread_add_timer_tv(master, bfd_recvtimer_cb, bs, &tv,
&bs->recvtimer_ev);
event_add_timer_tv(master, bfd_recvtimer_cb, bs, &tv,
&bs->recvtimer_ev);
}
void bfd_echo_recvtimer_update(struct bfd_session *bs)
@ -54,8 +54,8 @@ void bfd_echo_recvtimer_update(struct bfd_session *bs)
tv_normalize(&tv);
thread_add_timer_tv(master, bfd_echo_recvtimer_cb, bs, &tv,
&bs->echo_recvtimer_ev);
event_add_timer_tv(master, bfd_echo_recvtimer_cb, bs, &tv,
&bs->echo_recvtimer_ev);
}
void bfd_xmttimer_update(struct bfd_session *bs, uint64_t jitter)
@ -72,7 +72,7 @@ void bfd_xmttimer_update(struct bfd_session *bs, uint64_t jitter)
tv_normalize(&tv);
thread_add_timer_tv(master, bfd_xmt_cb, bs, &tv, &bs->xmttimer_ev);
event_add_timer_tv(master, bfd_xmt_cb, bs, &tv, &bs->xmttimer_ev);
}
void bfd_echo_xmttimer_update(struct bfd_session *bs, uint64_t jitter)
@ -89,26 +89,26 @@ void bfd_echo_xmttimer_update(struct bfd_session *bs, uint64_t jitter)
tv_normalize(&tv);
thread_add_timer_tv(master, bfd_echo_xmt_cb, bs, &tv,
&bs->echo_xmttimer_ev);
event_add_timer_tv(master, bfd_echo_xmt_cb, bs, &tv,
&bs->echo_xmttimer_ev);
}
void bfd_recvtimer_delete(struct bfd_session *bs)
{
THREAD_OFF(bs->recvtimer_ev);
EVENT_OFF(bs->recvtimer_ev);
}
void bfd_echo_recvtimer_delete(struct bfd_session *bs)
{
THREAD_OFF(bs->echo_recvtimer_ev);
EVENT_OFF(bs->echo_recvtimer_ev);
}
void bfd_xmttimer_delete(struct bfd_session *bs)
{
THREAD_OFF(bs->xmttimer_ev);
EVENT_OFF(bs->xmttimer_ev);
}
void bfd_echo_xmttimer_delete(struct bfd_session *bs)
{
THREAD_OFF(bs->echo_xmttimer_ev);
EVENT_OFF(bs->echo_xmttimer_ev);
}

View File

@ -9,7 +9,7 @@
#include "memory.h"
#include "prefix.h"
#include "hash.h"
#include "thread.h"
#include "frrevent.h"
#include "queue.h"
#include "filter.h"

View File

@ -11,7 +11,7 @@
#include "linklist.h"
#include "memory.h"
#include "prefix.h"
#include "thread.h"
#include "frrevent.h"
#include "buffer.h"
#include "stream.h"
#include "vrf.h"
@ -609,7 +609,7 @@ DEFUN(no_neighbor_bfd_profile, no_neighbor_bfd_profile_cmd,
}
#endif /* HAVE_BFDD */
void bgp_bfd_init(struct thread_master *tm)
void bgp_bfd_init(struct event_loop *tm)
{
/* Initialize BFD client functions */
bfd_protocol_integration_init(zclient, tm);

View File

@ -12,7 +12,7 @@
((((peer)->sort == BGP_PEER_IBGP) && !(peer)->shared_network) \
|| is_ebgp_multihop_configured((peer)))
extern void bgp_bfd_init(struct thread_master *tm);
extern void bgp_bfd_init(struct event_loop *tm);
extern void bgp_bfd_peer_config_write(struct vty *vty, const struct peer *peer,
const char *addr);

View File

@ -11,7 +11,7 @@
#include "sockunion.h"
#include "command.h"
#include "prefix.h"
#include "thread.h"
#include "frrevent.h"
#include "linklist.h"
#include "queue.h"
#include "pullwr.h"
@ -1335,17 +1335,17 @@ static void bmp_stat_put_u32(struct stream *s, size_t *cnt, uint16_t type,
(*cnt)++;
}
static void bmp_stats(struct thread *thread)
static void bmp_stats(struct event *thread)
{
struct bmp_targets *bt = THREAD_ARG(thread);
struct bmp_targets *bt = EVENT_ARG(thread);
struct stream *s;
struct peer *peer;
struct listnode *node;
struct timeval tv;
if (bt->stat_msec)
thread_add_timer_msec(bm->master, bmp_stats, bt, bt->stat_msec,
&bt->t_stats);
event_add_timer_msec(bm->master, bmp_stats, bt, bt->stat_msec,
&bt->t_stats);
gettimeofday(&tv, NULL);
@ -1388,9 +1388,9 @@ static void bmp_stats(struct thread *thread)
}
/* read from the BMP socket to detect session termination */
static void bmp_read(struct thread *t)
static void bmp_read(struct event *t)
{
struct bmp *bmp = THREAD_ARG(t);
struct bmp *bmp = EVENT_ARG(t);
char buf[1024];
ssize_t n;
@ -1409,7 +1409,7 @@ static void bmp_read(struct thread *t)
return;
}
thread_add_read(bm->master, bmp_read, bmp, bmp->socket, &bmp->t_read);
event_add_read(bm->master, bmp_read, bmp, bmp->socket, &bmp->t_read);
}
static struct bmp *bmp_open(struct bmp_targets *bt, int bmp_sock)
@ -1485,21 +1485,21 @@ static struct bmp *bmp_open(struct bmp_targets *bt, int bmp_sock)
bmp->state = BMP_PeerUp;
bmp->pullwr = pullwr_new(bm->master, bmp_sock, bmp, bmp_wrfill,
bmp_wrerr);
thread_add_read(bm->master, bmp_read, bmp, bmp_sock, &bmp->t_read);
event_add_read(bm->master, bmp_read, bmp, bmp_sock, &bmp->t_read);
bmp_send_initiation(bmp);
return bmp;
}
/* Accept BMP connection. */
static void bmp_accept(struct thread *thread)
static void bmp_accept(struct event *thread)
{
union sockunion su;
struct bmp_listener *bl = THREAD_ARG(thread);
struct bmp_listener *bl = EVENT_ARG(thread);
int bmp_sock;
/* We continue hearing BMP socket. */
thread_add_read(bm->master, bmp_accept, bl, bl->sock, &bl->t_accept);
event_add_read(bm->master, bmp_accept, bl, bl->sock, &bl->t_accept);
memset(&su, 0, sizeof(union sockunion));
@ -1517,7 +1517,7 @@ static void bmp_close(struct bmp *bmp)
struct bmp_queue_entry *bqe;
struct bmp_mirrorq *bmq;
THREAD_OFF(bmp->t_read);
EVENT_OFF(bmp->t_read);
if (bmp->active)
bmp_active_disconnected(bmp->active);
@ -1529,7 +1529,7 @@ static void bmp_close(struct bmp *bmp)
if (!bqe->refcount)
XFREE(MTYPE_BMP_QUEUE, bqe);
THREAD_OFF(bmp->t_read);
EVENT_OFF(bmp->t_read);
pullwr_del(bmp->pullwr);
close(bmp->socket);
}
@ -1644,7 +1644,7 @@ static void bmp_targets_put(struct bmp_targets *bt)
struct bmp *bmp;
struct bmp_active *ba;
THREAD_OFF(bt->t_stats);
EVENT_OFF(bt->t_stats);
frr_each_safe (bmp_actives, &bt->actives, ba)
bmp_active_put(ba);
@ -1721,7 +1721,7 @@ static void bmp_listener_start(struct bmp_listener *bl)
goto out_sock;
bl->sock = sock;
thread_add_read(bm->master, bmp_accept, bl, sock, &bl->t_accept);
event_add_read(bm->master, bmp_accept, bl, sock, &bl->t_accept);
return;
out_sock:
close(sock);
@ -1729,7 +1729,7 @@ out_sock:
static void bmp_listener_stop(struct bmp_listener *bl)
{
THREAD_OFF(bl->t_accept);
EVENT_OFF(bl->t_accept);
if (bl->sock != -1)
close(bl->sock);
@ -1768,9 +1768,9 @@ static struct bmp_active *bmp_active_get(struct bmp_targets *bt,
static void bmp_active_put(struct bmp_active *ba)
{
THREAD_OFF(ba->t_timer);
THREAD_OFF(ba->t_read);
THREAD_OFF(ba->t_write);
EVENT_OFF(ba->t_timer);
EVENT_OFF(ba->t_read);
EVENT_OFF(ba->t_write);
bmp_actives_del(&ba->targets->actives, ba);
@ -1902,18 +1902,18 @@ static void bmp_active_resolved(struct resolver_query *resq, const char *errstr,
bmp_active_connect(ba);
}
static void bmp_active_thread(struct thread *t)
static void bmp_active_thread(struct event *t)
{
struct bmp_active *ba = THREAD_ARG(t);
struct bmp_active *ba = EVENT_ARG(t);
socklen_t slen;
int status, ret;
vrf_id_t vrf_id;
/* all 3 end up here, though only timer or read+write are active
* at a time */
THREAD_OFF(ba->t_timer);
THREAD_OFF(ba->t_read);
THREAD_OFF(ba->t_write);
EVENT_OFF(ba->t_timer);
EVENT_OFF(ba->t_read);
EVENT_OFF(ba->t_write);
ba->last_err = NULL;
@ -1967,9 +1967,9 @@ static void bmp_active_disconnected(struct bmp_active *ba)
static void bmp_active_setup(struct bmp_active *ba)
{
THREAD_OFF(ba->t_timer);
THREAD_OFF(ba->t_read);
THREAD_OFF(ba->t_write);
EVENT_OFF(ba->t_timer);
EVENT_OFF(ba->t_read);
EVENT_OFF(ba->t_write);
if (ba->bmp)
return;
@ -1980,12 +1980,12 @@ static void bmp_active_setup(struct bmp_active *ba)
ba->curretry = ba->maxretry;
if (ba->socket == -1)
thread_add_timer_msec(bm->master, bmp_active_thread, ba,
ba->curretry, &ba->t_timer);
event_add_timer_msec(bm->master, bmp_active_thread, ba,
ba->curretry, &ba->t_timer);
else {
thread_add_read(bm->master, bmp_active_thread, ba, ba->socket,
&ba->t_read);
thread_add_write(bm->master, bmp_active_thread, ba, ba->socket,
event_add_read(bm->master, bmp_active_thread, ba, ba->socket,
&ba->t_read);
event_add_write(bm->master, bmp_active_thread, ba, ba->socket,
&ba->t_write);
}
}
@ -2190,7 +2190,7 @@ DEFPY(bmp_stats_cfg,
{
VTY_DECLVAR_CONTEXT_SUB(bmp_targets, bt);
THREAD_OFF(bt->t_stats);
EVENT_OFF(bt->t_stats);
if (no)
bt->stat_msec = 0;
else if (interval_str)
@ -2199,8 +2199,8 @@ DEFPY(bmp_stats_cfg,
bt->stat_msec = BMP_STAT_DEFAULT_TIMER;
if (bt->stat_msec)
thread_add_timer_msec(bm->master, bmp_stats, bt, bt->stat_msec,
&bt->t_stats);
event_add_timer_msec(bm->master, bmp_stats, bt, bt->stat_msec,
&bt->t_stats);
return CMD_SUCCESS;
}
@ -2408,7 +2408,7 @@ DEFPY(show_bmp,
uptime[0] = '\0';
if (ba->t_timer) {
long trem = thread_timer_remain_second(
long trem = event_timer_remain_second(
ba->t_timer);
peer_uptime(monotime(NULL) - trem,
@ -2526,7 +2526,7 @@ static int bmp_config_write(struct bgp *bgp, struct vty *vty)
return 0;
}
static int bgp_bmp_init(struct thread_master *tm)
static int bgp_bmp_init(struct event_loop *tm)
{
install_node(&bmp_node);
install_default(BMP_NODE);

View File

@ -112,7 +112,7 @@ struct bmp {
int socket;
char remote[SU_ADDRSTRLEN + 6];
struct thread *t_read;
struct event *t_read;
struct pullwr *pullwr;
@ -176,7 +176,7 @@ struct bmp_active {
union sockunion addrs[8];
int socket;
const char *last_err;
struct thread *t_timer, *t_read, *t_write;
struct event *t_timer, *t_read, *t_write;
};
/* config & state for passive / listening sockets */
@ -190,7 +190,7 @@ struct bmp_listener {
union sockunion addr;
int port;
struct thread *t_accept;
struct event *t_accept;
int sock;
};
@ -226,7 +226,7 @@ struct bmp_targets {
struct bmp_actives_head actives;
struct thread *t_stats;
struct event *t_stats;
struct bmp_session_head sessions;
struct bmp_qhash_head updhash;

View File

@ -150,7 +150,7 @@ static void bgp_conditional_adv_routes(struct peer *peer, afi_t afi,
/* Handler of conditional advertisement timer event.
* Each route in the condition-map is evaluated.
*/
static void bgp_conditional_adv_timer(struct thread *t)
static void bgp_conditional_adv_timer(struct event *t)
{
afi_t afi;
safi_t safi;
@ -165,11 +165,11 @@ static void bgp_conditional_adv_timer(struct thread *t)
route_map_result_t ret;
bool advmap_table_changed = false;
bgp = THREAD_ARG(t);
bgp = EVENT_ARG(t);
assert(bgp);
thread_add_timer(bm->master, bgp_conditional_adv_timer, bgp,
bgp->condition_check_period, &bgp->t_condition_check);
event_add_timer(bm->master, bgp_conditional_adv_timer, bgp,
bgp->condition_check_period, &bgp->t_condition_check);
/* loop through each peer and check if we have peers with
* advmap_table_change attribute set, to make sure we send
@ -328,9 +328,9 @@ void bgp_conditional_adv_enable(struct peer *peer, afi_t afi, safi_t safi)
}
/* Register for conditional routes polling timer */
if (!thread_is_scheduled(bgp->t_condition_check))
thread_add_timer(bm->master, bgp_conditional_adv_timer, bgp, 0,
&bgp->t_condition_check);
if (!event_is_scheduled(bgp->t_condition_check))
event_add_timer(bm->master, bgp_conditional_adv_timer, bgp, 0,
&bgp->t_condition_check);
}
void bgp_conditional_adv_disable(struct peer *peer, afi_t afi, safi_t safi)
@ -351,7 +351,7 @@ void bgp_conditional_adv_disable(struct peer *peer, afi_t afi, safi_t safi)
}
/* Last filter removed. So cancel conditional routes polling thread. */
THREAD_OFF(bgp->t_condition_check);
EVENT_OFF(bgp->t_condition_check);
}
static void peer_advertise_map_filter_update(struct peer *peer, afi_t afi,

View File

@ -10,7 +10,7 @@
#include "memory.h"
#include "command.h"
#include "log.h"
#include "thread.h"
#include "frrevent.h"
#include "queue.h"
#include "filter.h"
@ -98,17 +98,17 @@ int bgp_damp_decay(time_t tdiff, int penalty, struct bgp_damp_config *bdc)
/* Handler of reuse timer event. Each route in the current reuse-list
is evaluated. RFC2439 Section 4.8.7. */
static void bgp_reuse_timer(struct thread *t)
static void bgp_reuse_timer(struct event *t)
{
struct bgp_damp_info *bdi;
struct bgp_damp_info *next;
time_t t_now, t_diff;
struct bgp_damp_config *bdc = THREAD_ARG(t);
struct bgp_damp_config *bdc = EVENT_ARG(t);
bdc->t_reuse = NULL;
thread_add_timer(bm->master, bgp_reuse_timer, bdc, DELTA_REUSE,
&bdc->t_reuse);
event_add_timer(bm->master, bgp_reuse_timer, bdc, DELTA_REUSE,
&bdc->t_reuse);
t_now = monotime(NULL);
@ -395,8 +395,8 @@ int bgp_damp_enable(struct bgp *bgp, afi_t afi, safi_t safi, time_t half,
bgp_damp_parameter_set(half, reuse, suppress, max, bdc);
/* Register reuse timer. */
thread_add_timer(bm->master, bgp_reuse_timer, bdc, DELTA_REUSE,
&bdc->t_reuse);
event_add_timer(bm->master, bgp_reuse_timer, bdc, DELTA_REUSE,
&bdc->t_reuse);
return 0;
}
@ -451,7 +451,7 @@ int bgp_damp_disable(struct bgp *bgp, afi_t afi, safi_t safi)
return 0;
/* Cancel reuse event. */
THREAD_OFF(bdc->t_reuse);
EVENT_OFF(bdc->t_reuse);
/* Clean BGP dampening information. */
bgp_damp_info_clean(afi, safi);

View File

@ -92,7 +92,7 @@ struct bgp_damp_config {
struct bgp_damp_info *no_reuse_list;
/* Reuse timer thread per-set base. */
struct thread *t_reuse;
struct event *t_reuse;
afi_t afi;
safi_t safi;

View File

@ -10,7 +10,7 @@
#include "sockunion.h"
#include "command.h"
#include "prefix.h"
#include "thread.h"
#include "frrevent.h"
#include "linklist.h"
#include "queue.h"
#include "memory.h"
@ -69,11 +69,11 @@ struct bgp_dump {
char *interval_str;
struct thread *t_interval;
struct event *t_interval;
};
static int bgp_dump_unset(struct bgp_dump *bgp_dump);
static void bgp_dump_interval_func(struct thread *);
static void bgp_dump_interval_func(struct event *);
/* BGP packet dump output buffer. */
struct stream *bgp_dump_obuf;
@ -154,13 +154,13 @@ static int bgp_dump_interval_add(struct bgp_dump *bgp_dump, int interval)
interval = interval
- secs_into_day % interval; /* always > 0 */
}
thread_add_timer(bm->master, bgp_dump_interval_func, bgp_dump,
interval, &bgp_dump->t_interval);
event_add_timer(bm->master, bgp_dump_interval_func, bgp_dump,
interval, &bgp_dump->t_interval);
} else {
/* One-off dump: execute immediately, don't affect any scheduled
* dumps */
thread_add_event(bm->master, bgp_dump_interval_func, bgp_dump,
0, &bgp_dump->t_interval);
event_add_event(bm->master, bgp_dump_interval_func, bgp_dump, 0,
&bgp_dump->t_interval);
}
return 0;
@ -428,10 +428,10 @@ static unsigned int bgp_dump_routes_func(int afi, int first_run,
return seq;
}
static void bgp_dump_interval_func(struct thread *t)
static void bgp_dump_interval_func(struct event *t)
{
struct bgp_dump *bgp_dump;
bgp_dump = THREAD_ARG(t);
bgp_dump = EVENT_ARG(t);
/* Reschedule dump even if file couldn't be opened this time... */
if (bgp_dump_open_file(bgp_dump) != NULL) {
@ -691,7 +691,7 @@ static int bgp_dump_unset(struct bgp_dump *bgp_dump)
}
/* Removing interval event. */
THREAD_OFF(bgp_dump->t_interval);
EVENT_OFF(bgp_dump->t_interval);
bgp_dump->interval = 0;

View File

@ -65,7 +65,7 @@ static void bgp_evpn_mac_update_on_es_local_chg(struct bgp_evpn_es *es,
bool is_local);
esi_t zero_esi_buf, *zero_esi = &zero_esi_buf;
static void bgp_evpn_run_consistency_checks(struct thread *t);
static void bgp_evpn_run_consistency_checks(struct event *t);
static void bgp_evpn_path_nh_info_free(struct bgp_path_evpn_nh_info *nh_info);
static void bgp_evpn_path_nh_unlink(struct bgp_path_evpn_nh_info *nh_info);
@ -4174,9 +4174,9 @@ static void bgp_evpn_es_cons_checks_timer_start(void)
if (BGP_DEBUG(evpn_mh, EVPN_MH_ES))
zlog_debug("periodic consistency checking started");
thread_add_timer(bm->master, bgp_evpn_run_consistency_checks, NULL,
BGP_EVPN_CONS_CHECK_INTERVAL,
&bgp_mh_info->t_cons_check);
event_add_timer(bm->master, bgp_evpn_run_consistency_checks, NULL,
BGP_EVPN_CONS_CHECK_INTERVAL,
&bgp_mh_info->t_cons_check);
}
/* queue up the es for background consistency checks */
@ -4360,7 +4360,7 @@ static uint32_t bgp_evpn_es_run_consistency_checks(struct bgp_evpn_es *es)
return proc_cnt;
}
static void bgp_evpn_run_consistency_checks(struct thread *t)
static void bgp_evpn_run_consistency_checks(struct event *t)
{
int proc_cnt = 0;
struct listnode *node;
@ -4380,7 +4380,7 @@ static void bgp_evpn_run_consistency_checks(struct thread *t)
}
/* restart the timer */
thread_add_timer(bm->master, bgp_evpn_run_consistency_checks, NULL,
event_add_timer(bm->master, bgp_evpn_run_consistency_checks, NULL,
BGP_EVPN_CONS_CHECK_INTERVAL,
&bgp_mh_info->t_cons_check);
}
@ -4936,7 +4936,7 @@ void bgp_evpn_mh_finish(void)
bgp_evpn_es_local_info_clear(es, true);
}
if (bgp_mh_info->t_cons_check)
THREAD_OFF(bgp_mh_info->t_cons_check);
EVENT_OFF(bgp_mh_info->t_cons_check);
list_delete(&bgp_mh_info->local_es_list);
list_delete(&bgp_mh_info->pend_es_list);
list_delete(&bgp_mh_info->ead_es_export_rtl);

View File

@ -299,7 +299,7 @@ struct bgp_evpn_mh_info {
/* List of ESs with pending/periodic processing */
struct list *pend_es_list;
/* periodic timer for running background consistency checks */
struct thread *t_cons_check;
struct event *t_cons_check;
/* config knobs for optimizing or interop */
/* Generate EAD-EVI routes even if the ES is oper-down. This can be

View File

@ -9,7 +9,7 @@
#include "linklist.h"
#include "prefix.h"
#include "sockunion.h"
#include "thread.h"
#include "frrevent.h"
#include "log.h"
#include "stream.h"
#include "ringbuf.h"
@ -82,13 +82,13 @@ static const char *const bgp_event_str[] = {
function. */
/* BGP event function. */
void bgp_event(struct thread *);
void bgp_event(struct event *event);
/* BGP thread functions. */
static void bgp_start_timer(struct thread *);
static void bgp_connect_timer(struct thread *);
static void bgp_holdtime_timer(struct thread *);
static void bgp_delayopen_timer(struct thread *);
static void bgp_start_timer(struct event *event);
static void bgp_connect_timer(struct event *event);
static void bgp_holdtime_timer(struct event *event);
static void bgp_delayopen_timer(struct event *event);
/* BGP FSM functions. */
static enum bgp_fsm_state_progress bgp_start(struct peer *);
@ -169,17 +169,17 @@ static struct peer *peer_xfer_conn(struct peer *from_peer)
*/
bgp_keepalives_off(from_peer);
THREAD_OFF(peer->t_routeadv);
THREAD_OFF(peer->t_connect);
THREAD_OFF(peer->t_delayopen);
THREAD_OFF(peer->t_connect_check_r);
THREAD_OFF(peer->t_connect_check_w);
THREAD_OFF(from_peer->t_routeadv);
THREAD_OFF(from_peer->t_connect);
THREAD_OFF(from_peer->t_delayopen);
THREAD_OFF(from_peer->t_connect_check_r);
THREAD_OFF(from_peer->t_connect_check_w);
THREAD_OFF(from_peer->t_process_packet);
EVENT_OFF(peer->t_routeadv);
EVENT_OFF(peer->t_connect);
EVENT_OFF(peer->t_delayopen);
EVENT_OFF(peer->t_connect_check_r);
EVENT_OFF(peer->t_connect_check_w);
EVENT_OFF(from_peer->t_routeadv);
EVENT_OFF(from_peer->t_connect);
EVENT_OFF(from_peer->t_delayopen);
EVENT_OFF(from_peer->t_connect_check_r);
EVENT_OFF(from_peer->t_connect_check_w);
EVENT_OFF(from_peer->t_process_packet);
/*
* At this point in time, it is possible that there are packets pending
@ -343,8 +343,8 @@ static struct peer *peer_xfer_conn(struct peer *from_peer)
bgp_reads_on(peer);
bgp_writes_on(peer);
thread_add_event(bm->master, bgp_process_packet, peer, 0,
&peer->t_process_packet);
event_add_event(bm->master, bgp_process_packet, peer, 0,
&peer->t_process_packet);
return (peer);
}
@ -364,23 +364,23 @@ void bgp_timer_set(struct peer *peer)
inactive. All other timer must be turned off */
if (BGP_PEER_START_SUPPRESSED(peer) || !peer_active(peer)
|| peer->bgp->vrf_id == VRF_UNKNOWN) {
THREAD_OFF(peer->t_start);
EVENT_OFF(peer->t_start);
} else {
BGP_TIMER_ON(peer->t_start, bgp_start_timer,
peer->v_start);
}
THREAD_OFF(peer->t_connect);
THREAD_OFF(peer->t_holdtime);
EVENT_OFF(peer->t_connect);
EVENT_OFF(peer->t_holdtime);
bgp_keepalives_off(peer);
THREAD_OFF(peer->t_routeadv);
THREAD_OFF(peer->t_delayopen);
EVENT_OFF(peer->t_routeadv);
EVENT_OFF(peer->t_delayopen);
break;
case Connect:
/* After start timer is expired, the peer moves to Connect
status. Make sure start timer is off and connect timer is
on. */
THREAD_OFF(peer->t_start);
EVENT_OFF(peer->t_start);
if (CHECK_FLAG(peer->flags, PEER_FLAG_TIMER_DELAYOPEN))
BGP_TIMER_ON(peer->t_connect, bgp_connect_timer,
(peer->v_delayopen + peer->v_connect));
@ -388,19 +388,19 @@ void bgp_timer_set(struct peer *peer)
BGP_TIMER_ON(peer->t_connect, bgp_connect_timer,
peer->v_connect);
THREAD_OFF(peer->t_holdtime);
EVENT_OFF(peer->t_holdtime);
bgp_keepalives_off(peer);
THREAD_OFF(peer->t_routeadv);
EVENT_OFF(peer->t_routeadv);
break;
case Active:
/* Active is waiting connection from remote peer. And if
connect timer is expired, change status to Connect. */
THREAD_OFF(peer->t_start);
EVENT_OFF(peer->t_start);
/* If peer is passive mode, do not set connect timer. */
if (CHECK_FLAG(peer->flags, PEER_FLAG_PASSIVE)
|| CHECK_FLAG(peer->sflags, PEER_STATUS_NSF_WAIT)) {
THREAD_OFF(peer->t_connect);
EVENT_OFF(peer->t_connect);
} else {
if (CHECK_FLAG(peer->flags, PEER_FLAG_TIMER_DELAYOPEN))
BGP_TIMER_ON(
@ -410,30 +410,30 @@ void bgp_timer_set(struct peer *peer)
BGP_TIMER_ON(peer->t_connect, bgp_connect_timer,
peer->v_connect);
}
THREAD_OFF(peer->t_holdtime);
EVENT_OFF(peer->t_holdtime);
bgp_keepalives_off(peer);
THREAD_OFF(peer->t_routeadv);
EVENT_OFF(peer->t_routeadv);
break;
case OpenSent:
/* OpenSent status. */
THREAD_OFF(peer->t_start);
THREAD_OFF(peer->t_connect);
EVENT_OFF(peer->t_start);
EVENT_OFF(peer->t_connect);
if (peer->v_holdtime != 0) {
BGP_TIMER_ON(peer->t_holdtime, bgp_holdtime_timer,
peer->v_holdtime);
} else {
THREAD_OFF(peer->t_holdtime);
EVENT_OFF(peer->t_holdtime);
}
bgp_keepalives_off(peer);
THREAD_OFF(peer->t_routeadv);
THREAD_OFF(peer->t_delayopen);
EVENT_OFF(peer->t_routeadv);
EVENT_OFF(peer->t_delayopen);
break;
case OpenConfirm:
/* OpenConfirm status. */
THREAD_OFF(peer->t_start);
THREAD_OFF(peer->t_connect);
EVENT_OFF(peer->t_start);
EVENT_OFF(peer->t_connect);
/*
* If the negotiated Hold Time value is zero, then the Hold Time
@ -441,7 +441,7 @@ void bgp_timer_set(struct peer *peer)
* Additionally if a different hold timer has been negotiated
* than we must stop then start the timer again
*/
THREAD_OFF(peer->t_holdtime);
EVENT_OFF(peer->t_holdtime);
if (peer->v_holdtime == 0)
bgp_keepalives_off(peer);
else {
@ -449,16 +449,16 @@ void bgp_timer_set(struct peer *peer)
peer->v_holdtime);
bgp_keepalives_on(peer);
}
THREAD_OFF(peer->t_routeadv);
THREAD_OFF(peer->t_delayopen);
EVENT_OFF(peer->t_routeadv);
EVENT_OFF(peer->t_delayopen);
break;
case Established:
/* In Established status start and connect timer is turned
off. */
THREAD_OFF(peer->t_start);
THREAD_OFF(peer->t_connect);
THREAD_OFF(peer->t_delayopen);
EVENT_OFF(peer->t_start);
EVENT_OFF(peer->t_connect);
EVENT_OFF(peer->t_delayopen);
/*
* Same as OpenConfirm, if holdtime is zero then both holdtime
@ -466,7 +466,7 @@ void bgp_timer_set(struct peer *peer)
* Additionally if a different hold timer has been negotiated
* then we must stop then start the timer again
*/
THREAD_OFF(peer->t_holdtime);
EVENT_OFF(peer->t_holdtime);
if (peer->v_holdtime == 0)
bgp_keepalives_off(peer);
else {
@ -476,22 +476,22 @@ void bgp_timer_set(struct peer *peer)
}
break;
case Deleted:
THREAD_OFF(peer->t_gr_restart);
THREAD_OFF(peer->t_gr_stale);
EVENT_OFF(peer->t_gr_restart);
EVENT_OFF(peer->t_gr_stale);
FOREACH_AFI_SAFI (afi, safi)
THREAD_OFF(peer->t_llgr_stale[afi][safi]);
EVENT_OFF(peer->t_llgr_stale[afi][safi]);
THREAD_OFF(peer->t_pmax_restart);
THREAD_OFF(peer->t_refresh_stalepath);
EVENT_OFF(peer->t_pmax_restart);
EVENT_OFF(peer->t_refresh_stalepath);
/* fallthru */
case Clearing:
THREAD_OFF(peer->t_start);
THREAD_OFF(peer->t_connect);
THREAD_OFF(peer->t_holdtime);
EVENT_OFF(peer->t_start);
EVENT_OFF(peer->t_connect);
EVENT_OFF(peer->t_holdtime);
bgp_keepalives_off(peer);
THREAD_OFF(peer->t_routeadv);
THREAD_OFF(peer->t_delayopen);
EVENT_OFF(peer->t_routeadv);
EVENT_OFF(peer->t_delayopen);
break;
case BGP_STATUS_MAX:
flog_err(EC_LIB_DEVELOPMENT,
@ -502,28 +502,28 @@ void bgp_timer_set(struct peer *peer)
/* BGP start timer. This function set BGP_Start event to thread value
and process event. */
static void bgp_start_timer(struct thread *thread)
static void bgp_start_timer(struct event *thread)
{
struct peer *peer;
peer = THREAD_ARG(thread);
peer = EVENT_ARG(thread);
if (bgp_debug_neighbor_events(peer))
zlog_debug("%s [FSM] Timer (start timer expire).", peer->host);
THREAD_VAL(thread) = BGP_Start;
EVENT_VAL(thread) = BGP_Start;
bgp_event(thread); /* bgp_event unlocks peer */
}
/* BGP connect retry timer. */
static void bgp_connect_timer(struct thread *thread)
static void bgp_connect_timer(struct event *thread)
{
struct peer *peer;
peer = THREAD_ARG(thread);
peer = EVENT_ARG(thread);
/* stop the DelayOpenTimer if it is running */
THREAD_OFF(peer->t_delayopen);
EVENT_OFF(peer->t_delayopen);
assert(!peer->t_write);
assert(!peer->t_read);
@ -534,18 +534,18 @@ static void bgp_connect_timer(struct thread *thread)
if (CHECK_FLAG(peer->sflags, PEER_STATUS_ACCEPT_PEER))
bgp_stop(peer);
else {
THREAD_VAL(thread) = ConnectRetry_timer_expired;
EVENT_VAL(thread) = ConnectRetry_timer_expired;
bgp_event(thread); /* bgp_event unlocks peer */
}
}
/* BGP holdtime timer. */
static void bgp_holdtime_timer(struct thread *thread)
static void bgp_holdtime_timer(struct event *thread)
{
atomic_size_t inq_count;
struct peer *peer;
peer = THREAD_ARG(thread);
peer = EVENT_ARG(thread);
if (bgp_debug_neighbor_events(peer))
zlog_debug("%s [FSM] Timer (holdtime timer expire)",
@ -567,15 +567,15 @@ static void bgp_holdtime_timer(struct thread *thread)
BGP_TIMER_ON(peer->t_holdtime, bgp_holdtime_timer,
peer->v_holdtime);
THREAD_VAL(thread) = Hold_Timer_expired;
EVENT_VAL(thread) = Hold_Timer_expired;
bgp_event(thread); /* bgp_event unlocks peer */
}
void bgp_routeadv_timer(struct thread *thread)
void bgp_routeadv_timer(struct event *thread)
{
struct peer *peer;
peer = THREAD_ARG(thread);
peer = EVENT_ARG(thread);
if (bgp_debug_neighbor_events(peer))
zlog_debug("%s [FSM] Timer (routeadv timer expire)",
@ -583,8 +583,8 @@ void bgp_routeadv_timer(struct thread *thread)
peer->synctime = monotime(NULL);
thread_add_timer_msec(bm->master, bgp_generate_updgrp_packets, peer, 0,
&peer->t_generate_updgrp_packets);
event_add_timer_msec(bm->master, bgp_generate_updgrp_packets, peer, 0,
&peer->t_generate_updgrp_packets);
/* MRAI timer will be started again when FIFO is built, no need to
* do it here.
@ -592,17 +592,17 @@ void bgp_routeadv_timer(struct thread *thread)
}
/* RFC 4271 DelayOpenTimer */
void bgp_delayopen_timer(struct thread *thread)
void bgp_delayopen_timer(struct event *thread)
{
struct peer *peer;
peer = THREAD_ARG(thread);
peer = EVENT_ARG(thread);
if (bgp_debug_neighbor_events(peer))
zlog_debug("%s [FSM] Timer (DelayOpentimer expire)",
peer->host);
THREAD_VAL(thread) = DelayOpen_timer_expired;
EVENT_VAL(thread) = DelayOpen_timer_expired;
bgp_event(thread); /* bgp_event unlocks peer */
}
@ -655,7 +655,7 @@ static void bgp_graceful_restart_timer_off(struct peer *peer)
return;
UNSET_FLAG(peer->sflags, PEER_STATUS_NSF_WAIT);
THREAD_OFF(peer->t_gr_stale);
EVENT_OFF(peer->t_gr_stale);
if (peer_dynamic_neighbor(peer) &&
!(CHECK_FLAG(peer->flags, PEER_FLAG_DELETE))) {
@ -668,14 +668,14 @@ static void bgp_graceful_restart_timer_off(struct peer *peer)
bgp_timer_set(peer);
}
static void bgp_llgr_stale_timer_expire(struct thread *thread)
static void bgp_llgr_stale_timer_expire(struct event *thread)
{
struct peer_af *paf;
struct peer *peer;
afi_t afi;
safi_t safi;
paf = THREAD_ARG(thread);
paf = EVENT_ARG(thread);
peer = paf->peer;
afi = paf->afi;
@ -770,7 +770,7 @@ static void bgp_set_llgr_stale(struct peer *peer, afi_t afi, safi_t safi)
}
}
static void bgp_graceful_restart_timer_expire(struct thread *thread)
static void bgp_graceful_restart_timer_expire(struct event *thread)
{
struct peer *peer, *tmp_peer;
struct listnode *node, *nnode;
@ -778,7 +778,7 @@ static void bgp_graceful_restart_timer_expire(struct thread *thread)
afi_t afi;
safi_t safi;
peer = THREAD_ARG(thread);
peer = EVENT_ARG(thread);
if (bgp_debug_neighbor_events(peer)) {
zlog_debug("%pBP graceful restart timer expired", peer);
@ -820,10 +820,9 @@ static void bgp_graceful_restart_timer_expire(struct thread *thread)
bgp_set_llgr_stale(peer, afi, safi);
bgp_clear_stale_route(peer, afi, safi);
thread_add_timer(bm->master,
bgp_llgr_stale_timer_expire, paf,
peer->llgr[afi][safi].stale_time,
&peer->t_llgr_stale[afi][safi]);
event_add_timer(bm->master, bgp_llgr_stale_timer_expire,
paf, peer->llgr[afi][safi].stale_time,
&peer->t_llgr_stale[afi][safi]);
for (ALL_LIST_ELEMENTS(peer->bgp->peer, node, nnode,
tmp_peer))
@ -836,13 +835,13 @@ static void bgp_graceful_restart_timer_expire(struct thread *thread)
bgp_graceful_restart_timer_off(peer);
}
static void bgp_graceful_stale_timer_expire(struct thread *thread)
static void bgp_graceful_stale_timer_expire(struct event *thread)
{
struct peer *peer;
afi_t afi;
safi_t safi;
peer = THREAD_ARG(thread);
peer = EVENT_ARG(thread);
if (bgp_debug_neighbor_events(peer))
zlog_debug("%pBP graceful restart stalepath timer expired",
@ -855,14 +854,14 @@ static void bgp_graceful_stale_timer_expire(struct thread *thread)
}
/* Selection deferral timer processing function */
static void bgp_graceful_deferral_timer_expire(struct thread *thread)
static void bgp_graceful_deferral_timer_expire(struct event *thread)
{
struct afi_safi_info *info;
afi_t afi;
safi_t safi;
struct bgp *bgp;
info = THREAD_ARG(thread);
info = EVENT_ARG(thread);
afi = info->afi;
safi = info->safi;
bgp = info->bgp;
@ -909,8 +908,8 @@ bool bgp_update_delay_configured(struct bgp *bgp)
on ending the update delay. */
void bgp_update_delay_end(struct bgp *bgp)
{
THREAD_OFF(bgp->t_update_delay);
THREAD_OFF(bgp->t_establish_wait);
EVENT_OFF(bgp->t_update_delay);
EVENT_OFF(bgp->t_establish_wait);
/* Reset update-delay related state */
bgp->update_delay_over = 1;
@ -973,7 +972,7 @@ void bgp_start_routeadv(struct bgp *bgp)
for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
if (!peer_established(peer))
continue;
THREAD_OFF(peer->t_routeadv);
EVENT_OFF(peer->t_routeadv);
BGP_TIMER_ON(peer->t_routeadv, bgp_routeadv_timer, 0);
}
}
@ -993,7 +992,7 @@ void bgp_adjust_routeadv(struct peer *peer)
* different
* duration and schedule write thread immediately.
*/
THREAD_OFF(peer->t_routeadv);
EVENT_OFF(peer->t_routeadv);
peer->synctime = monotime(NULL);
/* If suppress fib pending is enabled, route is advertised to
@ -1025,7 +1024,7 @@ void bgp_adjust_routeadv(struct peer *peer)
*/
diff = difftime(nowtime, peer->last_update);
if (diff > (double)peer->v_routeadv) {
THREAD_OFF(peer->t_routeadv);
EVENT_OFF(peer->t_routeadv);
BGP_TIMER_ON(peer->t_routeadv, bgp_routeadv_timer, 0);
return;
}
@ -1047,12 +1046,12 @@ void bgp_adjust_routeadv(struct peer *peer)
* (MRAI - m) < r
*/
if (peer->t_routeadv)
remain = thread_timer_remain_second(peer->t_routeadv);
remain = event_timer_remain_second(peer->t_routeadv);
else
remain = peer->v_routeadv;
diff = peer->v_routeadv - diff;
if (diff <= (double)remain) {
THREAD_OFF(peer->t_routeadv);
EVENT_OFF(peer->t_routeadv);
BGP_TIMER_ON(peer->t_routeadv, bgp_routeadv_timer, diff);
}
}
@ -1125,14 +1124,14 @@ int bgp_fsm_error_subcode(int status)
}
/* The maxmed onstartup timer expiry callback. */
static void bgp_maxmed_onstartup_timer(struct thread *thread)
static void bgp_maxmed_onstartup_timer(struct event *thread)
{
struct bgp *bgp;
zlog_info("Max med on startup ended - timer expired.");
bgp = THREAD_ARG(thread);
THREAD_OFF(bgp->t_maxmed_onstartup);
bgp = EVENT_ARG(thread);
EVENT_OFF(bgp->t_maxmed_onstartup);
bgp->maxmed_onstartup_over = 1;
bgp_maxmed_update(bgp);
@ -1147,8 +1146,8 @@ static void bgp_maxmed_onstartup_begin(struct bgp *bgp)
zlog_info("Begin maxmed onstartup mode - timer %d seconds",
bgp->v_maxmed_onstartup);
thread_add_timer(bm->master, bgp_maxmed_onstartup_timer, bgp,
bgp->v_maxmed_onstartup, &bgp->t_maxmed_onstartup);
event_add_timer(bm->master, bgp_maxmed_onstartup_timer, bgp,
bgp->v_maxmed_onstartup, &bgp->t_maxmed_onstartup);
if (!bgp->v_maxmed_admin) {
bgp->maxmed_active = 1;
@ -1167,26 +1166,26 @@ static void bgp_maxmed_onstartup_process_status_change(struct peer *peer)
}
/* The update delay timer expiry callback. */
static void bgp_update_delay_timer(struct thread *thread)
static void bgp_update_delay_timer(struct event *thread)
{
struct bgp *bgp;
zlog_info("Update delay ended - timer expired.");
bgp = THREAD_ARG(thread);
THREAD_OFF(bgp->t_update_delay);
bgp = EVENT_ARG(thread);
EVENT_OFF(bgp->t_update_delay);
bgp_update_delay_end(bgp);
}
/* The establish wait timer expiry callback. */
static void bgp_establish_wait_timer(struct thread *thread)
static void bgp_establish_wait_timer(struct event *thread)
{
struct bgp *bgp;
zlog_info("Establish wait - timer expired.");
bgp = THREAD_ARG(thread);
THREAD_OFF(bgp->t_establish_wait);
bgp = EVENT_ARG(thread);
EVENT_OFF(bgp->t_establish_wait);
bgp_check_update_delay(bgp);
}
@ -1206,12 +1205,12 @@ static void bgp_update_delay_begin(struct bgp *bgp)
peer->update_delay_over = 0;
/* Start the update-delay timer */
thread_add_timer(bm->master, bgp_update_delay_timer, bgp,
bgp->v_update_delay, &bgp->t_update_delay);
event_add_timer(bm->master, bgp_update_delay_timer, bgp,
bgp->v_update_delay, &bgp->t_update_delay);
if (bgp->v_establish_wait != bgp->v_update_delay)
thread_add_timer(bm->master, bgp_establish_wait_timer, bgp,
bgp->v_establish_wait, &bgp->t_establish_wait);
event_add_timer(bm->master, bgp_establish_wait_timer, bgp,
bgp->v_establish_wait, &bgp->t_establish_wait);
frr_timestamp(3, bgp->update_delay_begin_time,
sizeof(bgp->update_delay_begin_time));
@ -1413,7 +1412,7 @@ enum bgp_fsm_state_progress bgp_stop(struct peer *peer)
/* graceful restart */
if (peer->t_gr_stale) {
THREAD_OFF(peer->t_gr_stale);
EVENT_OFF(peer->t_gr_stale);
if (bgp_debug_neighbor_events(peer))
zlog_debug(
"%pBP graceful restart stalepath timer stopped",
@ -1443,7 +1442,7 @@ enum bgp_fsm_state_progress bgp_stop(struct peer *peer)
/* Stop route-refresh stalepath timer */
if (peer->t_refresh_stalepath) {
THREAD_OFF(peer->t_refresh_stalepath);
EVENT_OFF(peer->t_refresh_stalepath);
if (bgp_debug_neighbor_events(peer))
zlog_debug(
@ -1477,11 +1476,11 @@ enum bgp_fsm_state_progress bgp_stop(struct peer *peer)
/* There is no pending EOR message */
if (gr_info->eor_required == 0) {
if (gr_info->t_select_deferral) {
void *info = THREAD_ARG(
void *info = EVENT_ARG(
gr_info->t_select_deferral);
XFREE(MTYPE_TMP, info);
}
THREAD_OFF(gr_info->t_select_deferral);
EVENT_OFF(gr_info->t_select_deferral);
gr_info->eor_received = 0;
}
}
@ -1506,15 +1505,15 @@ enum bgp_fsm_state_progress bgp_stop(struct peer *peer)
bgp_writes_off(peer);
bgp_reads_off(peer);
THREAD_OFF(peer->t_connect_check_r);
THREAD_OFF(peer->t_connect_check_w);
EVENT_OFF(peer->t_connect_check_r);
EVENT_OFF(peer->t_connect_check_w);
/* Stop all timers. */
THREAD_OFF(peer->t_start);
THREAD_OFF(peer->t_connect);
THREAD_OFF(peer->t_holdtime);
THREAD_OFF(peer->t_routeadv);
THREAD_OFF(peer->t_delayopen);
EVENT_OFF(peer->t_start);
EVENT_OFF(peer->t_connect);
EVENT_OFF(peer->t_holdtime);
EVENT_OFF(peer->t_routeadv);
EVENT_OFF(peer->t_delayopen);
/* Clear input and output buffer. */
frr_with_mutex (&peer->io_mtx) {
@ -1654,21 +1653,21 @@ bgp_stop_with_notify(struct peer *peer, uint8_t code, uint8_t sub_code)
* when the connection is established. A read event is triggered when the
* connection is closed. Thus we need to cancel whichever one did not occur.
*/
static void bgp_connect_check(struct thread *thread)
static void bgp_connect_check(struct event *thread)
{
int status;
socklen_t slen;
int ret;
struct peer *peer;
peer = THREAD_ARG(thread);
peer = EVENT_ARG(thread);
assert(!CHECK_FLAG(peer->thread_flags, PEER_THREAD_READS_ON));
assert(!CHECK_FLAG(peer->thread_flags, PEER_THREAD_WRITES_ON));
assert(!peer->t_read);
assert(!peer->t_write);
THREAD_OFF(peer->t_connect_check_r);
THREAD_OFF(peer->t_connect_check_w);
EVENT_OFF(peer->t_connect_check_r);
EVENT_OFF(peer->t_connect_check_w);
/* Check file descriptor. */
slen = sizeof(status);
@ -1941,10 +1940,10 @@ enum bgp_fsm_state_progress bgp_start(struct peer *peer)
* bgp_connect_check() as the handler for each and cancel the
* unused event in that function.
*/
thread_add_read(bm->master, bgp_connect_check, peer, peer->fd,
&peer->t_connect_check_r);
thread_add_write(bm->master, bgp_connect_check, peer, peer->fd,
&peer->t_connect_check_w);
event_add_read(bm->master, bgp_connect_check, peer, peer->fd,
&peer->t_connect_check_r);
event_add_write(bm->master, bgp_connect_check, peer, peer->fd,
&peer->t_connect_check_w);
break;
}
return BGP_FSM_SUCCESS;
@ -2013,7 +2012,7 @@ static enum bgp_fsm_state_progress
bgp_fsm_delayopen_timer_expire(struct peer *peer)
{
/* Stop the DelayOpenTimer */
THREAD_OFF(peer->t_delayopen);
EVENT_OFF(peer->t_delayopen);
/* Send open message to peer */
bgp_open_send(peer);
@ -2046,9 +2045,9 @@ static int bgp_start_deferral_timer(struct bgp *bgp, afi_t afi, safi_t safi,
thread_info->safi = safi;
thread_info->bgp = bgp;
thread_add_timer(bm->master, bgp_graceful_deferral_timer_expire,
thread_info, bgp->select_defer_time,
&gr_info->t_select_deferral);
event_add_timer(bm->master, bgp_graceful_deferral_timer_expire,
thread_info, bgp->select_defer_time,
&gr_info->t_select_deferral);
}
gr_info->eor_required++;
/* Send message to RIB indicating route update pending */
@ -2226,7 +2225,7 @@ static enum bgp_fsm_state_progress bgp_establish(struct peer *peer)
else {
UNSET_FLAG(peer->sflags, PEER_STATUS_NSF_MODE);
if (peer->t_gr_stale) {
THREAD_OFF(peer->t_gr_stale);
EVENT_OFF(peer->t_gr_stale);
if (bgp_debug_neighbor_events(peer))
zlog_debug(
"%pBP graceful restart stalepath timer stopped",
@ -2235,7 +2234,7 @@ static enum bgp_fsm_state_progress bgp_establish(struct peer *peer)
}
if (peer->t_gr_restart) {
THREAD_OFF(peer->t_gr_restart);
EVENT_OFF(peer->t_gr_restart);
if (bgp_debug_neighbor_events(peer))
zlog_debug("%pBP graceful restart timer stopped", peer);
}
@ -2251,7 +2250,7 @@ static enum bgp_fsm_state_progress bgp_establish(struct peer *peer)
*/
FOREACH_AFI_SAFI (afi, safi) {
if (peer->t_llgr_stale[afi][safi]) {
THREAD_OFF(peer->t_llgr_stale[afi][safi]);
EVENT_OFF(peer->t_llgr_stale[afi][safi]);
if (bgp_debug_neighbor_events(peer))
zlog_debug(
"%pBP Long-lived stale timer stopped for afi/safi: %d/%d",
@ -2296,7 +2295,7 @@ static enum bgp_fsm_state_progress bgp_establish(struct peer *peer)
* of read-only mode.
*/
if (!bgp_update_delay_active(peer->bgp)) {
THREAD_OFF(peer->t_routeadv);
EVENT_OFF(peer->t_routeadv);
BGP_TIMER_ON(peer->t_routeadv, bgp_routeadv_timer, 0);
}
@ -2331,14 +2330,14 @@ static enum bgp_fsm_state_progress bgp_establish(struct peer *peer)
/* Keepalive packet is received. */
static enum bgp_fsm_state_progress bgp_fsm_keepalive(struct peer *peer)
{
THREAD_OFF(peer->t_holdtime);
EVENT_OFF(peer->t_holdtime);
return BGP_FSM_SUCCESS;
}
/* Update packet is received. */
static enum bgp_fsm_state_progress bgp_fsm_update(struct peer *peer)
{
THREAD_OFF(peer->t_holdtime);
EVENT_OFF(peer->t_holdtime);
return BGP_FSM_SUCCESS;
}
@ -2380,13 +2379,13 @@ void bgp_fsm_nht_update(struct peer *peer, bool has_valid_nexthops)
break;
case Connect:
if (!has_valid_nexthops) {
THREAD_OFF(peer->t_connect);
EVENT_OFF(peer->t_connect);
BGP_EVENT_ADD(peer, TCP_fatal_error);
}
break;
case Active:
if (has_valid_nexthops) {
THREAD_OFF(peer->t_connect);
EVENT_OFF(peer->t_connect);
BGP_EVENT_ADD(peer, ConnectRetry_timer_expired);
}
break;
@ -2572,13 +2571,13 @@ static const struct {
};
/* Execute event process. */
void bgp_event(struct thread *thread)
void bgp_event(struct event *thread)
{
enum bgp_fsm_events event;
struct peer *peer;
peer = THREAD_ARG(thread);
event = THREAD_VAL(thread);
peer = EVENT_ARG(thread);
event = EVENT_VAL(thread);
peer_lock(peer);
bgp_event_update(peer, event);

View File

@ -11,33 +11,34 @@
#define BGP_TIMER_ON(T, F, V) \
do { \
if ((peer->status != Deleted)) \
thread_add_timer(bm->master, (F), peer, (V), &(T)); \
event_add_timer(bm->master, (F), peer, (V), &(T)); \
} while (0)
#define BGP_EVENT_ADD(P, E) \
do { \
if ((P)->status != Deleted) \
thread_add_event(bm->master, bgp_event, (P), (E), \
NULL); \
event_add_event(bm->master, bgp_event, (P), (E), \
NULL); \
} while (0)
#define BGP_EVENT_FLUSH(P) \
do { \
assert(peer); \
thread_cancel_event_ready(bm->master, (P)); \
event_cancel_event_ready(bm->master, (P)); \
} while (0)
#define BGP_UPDATE_GROUP_TIMER_ON(T, F) \
do { \
if (BGP_SUPPRESS_FIB_ENABLED(peer->bgp) && \
PEER_ROUTE_ADV_DELAY(peer)) \
thread_add_timer_msec(bm->master, (F), peer, \
(BGP_DEFAULT_UPDATE_ADVERTISEMENT_TIME * 1000),\
(T)); \
else \
thread_add_timer_msec(bm->master, (F), peer, \
0, (T)); \
} while (0) \
#define BGP_UPDATE_GROUP_TIMER_ON(T, F) \
do { \
if (BGP_SUPPRESS_FIB_ENABLED(peer->bgp) && \
PEER_ROUTE_ADV_DELAY(peer)) \
event_add_timer_msec( \
bm->master, (F), peer, \
(BGP_DEFAULT_UPDATE_ADVERTISEMENT_TIME * \
1000), \
(T)); \
else \
event_add_timer_msec(bm->master, (F), peer, 0, (T)); \
} while (0)
#define BGP_MSEC_JITTER 10
@ -105,11 +106,11 @@
* Update FSM for peer based on whether we have valid nexthops or not.
*/
extern void bgp_fsm_nht_update(struct peer *peer, bool has_valid_nexthops);
extern void bgp_event(struct thread *);
extern void bgp_event(struct event *event);
extern int bgp_event_update(struct peer *, enum bgp_fsm_events event);
extern int bgp_stop(struct peer *peer);
extern void bgp_timer_set(struct peer *);
extern void bgp_routeadv_timer(struct thread *);
extern void bgp_routeadv_timer(struct event *event);
extern void bgp_fsm_change_status(struct peer *peer,
enum bgp_fsm_status status);
extern const char *const peer_down_str[];

View File

@ -17,7 +17,7 @@
#include "network.h" // for ERRNO_IO_RETRY
#include "stream.h" // for stream_get_endp, stream_getw_from, str...
#include "ringbuf.h" // for ringbuf_remain, ringbuf_peek, ringbuf_...
#include "thread.h" // for THREAD_OFF, THREAD_ARG, thread...
#include "frrevent.h" // for EVENT_OFF, EVENT_ARG, thread...
#include "bgpd/bgp_io.h"
#include "bgpd/bgp_debug.h" // for bgp_debug_neighbor_events, bgp_type_str
@ -31,8 +31,8 @@
/* forward declarations */
static uint16_t bgp_write(struct peer *);
static uint16_t bgp_read(struct peer *peer, int *code_p);
static void bgp_process_writes(struct thread *);
static void bgp_process_reads(struct thread *);
static void bgp_process_writes(struct event *event);
static void bgp_process_reads(struct event *event);
static bool validate_header(struct peer *);
/* generic i/o status codes */
@ -55,8 +55,8 @@ void bgp_writes_on(struct peer *peer)
assert(!peer->t_connect_check_w);
assert(peer->fd);
thread_add_write(fpt->master, bgp_process_writes, peer, peer->fd,
&peer->t_write);
event_add_write(fpt->master, bgp_process_writes, peer, peer->fd,
&peer->t_write);
SET_FLAG(peer->thread_flags, PEER_THREAD_WRITES_ON);
}
@ -65,8 +65,8 @@ void bgp_writes_off(struct peer *peer)
struct frr_pthread *fpt = bgp_pth_io;
assert(fpt->running);
thread_cancel_async(fpt->master, &peer->t_write, NULL);
THREAD_OFF(peer->t_generate_updgrp_packets);
event_cancel_async(fpt->master, &peer->t_write, NULL);
EVENT_OFF(peer->t_generate_updgrp_packets);
UNSET_FLAG(peer->thread_flags, PEER_THREAD_WRITES_ON);
}
@ -85,8 +85,8 @@ void bgp_reads_on(struct peer *peer)
assert(!peer->t_connect_check_w);
assert(peer->fd);
thread_add_read(fpt->master, bgp_process_reads, peer, peer->fd,
&peer->t_read);
event_add_read(fpt->master, bgp_process_reads, peer, peer->fd,
&peer->t_read);
SET_FLAG(peer->thread_flags, PEER_THREAD_READS_ON);
}
@ -96,9 +96,9 @@ void bgp_reads_off(struct peer *peer)
struct frr_pthread *fpt = bgp_pth_io;
assert(fpt->running);
thread_cancel_async(fpt->master, &peer->t_read, NULL);
THREAD_OFF(peer->t_process_packet);
THREAD_OFF(peer->t_process_packet_error);
event_cancel_async(fpt->master, &peer->t_read, NULL);
EVENT_OFF(peer->t_process_packet);
EVENT_OFF(peer->t_process_packet_error);
UNSET_FLAG(peer->thread_flags, PEER_THREAD_READS_ON);
}
@ -108,10 +108,10 @@ void bgp_reads_off(struct peer *peer)
/*
* Called from I/O pthread when a file descriptor has become ready for writing.
*/
static void bgp_process_writes(struct thread *thread)
static void bgp_process_writes(struct event *thread)
{
static struct peer *peer;
peer = THREAD_ARG(thread);
peer = EVENT_ARG(thread);
uint16_t status;
bool reschedule;
bool fatal = false;
@ -142,8 +142,8 @@ static void bgp_process_writes(struct thread *thread)
* sent in the update message
*/
if (reschedule) {
thread_add_write(fpt->master, bgp_process_writes, peer,
peer->fd, &peer->t_write);
event_add_write(fpt->master, bgp_process_writes, peer, peer->fd,
&peer->t_write);
} else if (!fatal) {
BGP_UPDATE_GROUP_TIMER_ON(&peer->t_generate_updgrp_packets,
bgp_generate_updgrp_packets);
@ -210,7 +210,7 @@ static int read_ibuf_work(struct peer *peer)
* We read as much data as possible, process as many packets as we can and
* place them on peer->ibuf for secondary processing by the main thread.
*/
static void bgp_process_reads(struct thread *thread)
static void bgp_process_reads(struct event *thread)
{
/* clang-format off */
static struct peer *peer; /* peer to read from */
@ -223,7 +223,7 @@ static void bgp_process_reads(struct thread *thread)
int ret = 1;
/* clang-format on */
peer = THREAD_ARG(thread);
peer = EVENT_ARG(thread);
if (peer->fd < 0 || bm->terminating)
return;
@ -247,8 +247,8 @@ static void bgp_process_reads(struct thread *thread)
/* Handle the error in the main pthread, include the
* specific state change from 'bgp_read'.
*/
thread_add_event(bm->master, bgp_packet_process_error,
peer, code, &peer->t_process_packet_error);
event_add_event(bm->master, bgp_packet_process_error, peer,
code, &peer->t_process_packet_error);
goto done;
}
@ -292,11 +292,11 @@ done:
if (!ibuf_full)
assert(ringbuf_space(peer->ibuf_work) >= peer->max_packet_size);
thread_add_read(fpt->master, bgp_process_reads, peer, peer->fd,
&peer->t_read);
event_add_read(fpt->master, bgp_process_reads, peer, peer->fd,
&peer->t_read);
if (added_pkt)
thread_add_event(bm->master, bgp_process_packet, peer, 0,
&peer->t_process_packet);
event_add_event(bm->master, bgp_process_packet, peer, 0,
&peer->t_process_packet);
}
/*

View File

@ -15,7 +15,7 @@
#include "memory.h" // for MTYPE_TMP, XFREE, XCALLOC, XMALLOC
#include "monotime.h" // for monotime, monotime_since
#include "bgpd/bgpd.h" // for peer, PEER_THREAD_KEEPALIVES_ON, peer...
#include "bgpd/bgpd.h" // for peer, PEER_EVENT_KEEPALIVES_ON, peer...
#include "bgpd/bgp_debug.h" // for bgp_debug_neighbor_events
#include "bgpd/bgp_packet.h" // for bgp_keepalive_send
#include "bgpd/bgp_keepalives.h"
@ -162,7 +162,7 @@ void *bgp_keepalives_start(void *arg)
/*
* The RCU mechanism for each pthread is initialized in a "locked"
* state. That's ok for pthreads using the frr_pthread,
* thread_fetch event loop, because that event loop unlocks regularly.
* event_fetch event loop, because that event loop unlocks regularly.
* For foreign pthreads, the lock needs to be unlocked so that the
* background rcu pthread can run.
*/

View File

@ -6,7 +6,7 @@
#include <zebra.h>
#include "command.h"
#include "thread.h"
#include "frrevent.h"
#include "prefix.h"
#include "zclient.h"
#include "stream.h"

View File

@ -177,7 +177,7 @@ static void lp_chunk_free(void *goner)
XFREE(MTYPE_BGP_LABEL_CHUNK, goner);
}
void bgp_lp_init(struct thread_master *master, struct labelpool *pool)
void bgp_lp_init(struct event_loop *master, struct labelpool *pool)
{
if (BGP_DEBUG(labelpool, LABELPOOL))
zlog_debug("%s: entry", __func__);
@ -1091,7 +1091,7 @@ struct lp_test {
struct timeval starttime;
struct skiplist *timestamps_alloc;
struct skiplist *timestamps_dealloc;
struct thread *event_thread;
struct event *event_thread;
unsigned int counter[LPT_STAT_MAX];
};
@ -1150,7 +1150,7 @@ static int test_cb(mpls_label_t label, void *labelid, bool allocated)
return 0;
}
static void labelpool_test_event_handler(struct thread *thread)
static void labelpool_test_event_handler(struct event *thread)
{
struct lp_test *tcb;
@ -1202,7 +1202,7 @@ static void lptest_stop(void)
}
if (tcb->event_thread)
thread_cancel(&tcb->event_thread);
event_cancel(&tcb->event_thread);
lpt_inprogress = false;
}
@ -1491,7 +1491,7 @@ static void lptest_delete(void *val)
}
if (tcb->event_thread)
thread_cancel(&tcb->event_thread);
event_cancel(&tcb->event_thread);
memset(tcb, 0, sizeof(*tcb));

View File

@ -31,7 +31,7 @@ struct labelpool {
uint32_t next_chunksize; /* request this many labels */
};
extern void bgp_lp_init(struct thread_master *master, struct labelpool *pool);
extern void bgp_lp_init(struct event_loop *master, struct labelpool *pool);
extern void bgp_lp_finish(void);
extern void bgp_lp_get(int type, void *labelid,
int (*cbfunc)(mpls_label_t label, void *labelid, bool allocated));

View File

@ -9,7 +9,7 @@
#include "vector.h"
#include "command.h"
#include "getopt.h"
#include "thread.h"
#include "frrevent.h"
#include <lib/version.h>
#include "memory.h"
#include "prefix.h"

View File

@ -12,7 +12,7 @@
#include "log.h"
#include "prefix.h"
#include "command.h"
#include "thread.h"
#include "frrevent.h"
#include "smux.h"
#include "filter.h"
#include "hook.h"

View File

@ -5,7 +5,7 @@
#include <zebra.h>
#include "thread.h"
#include "frrevent.h"
#include "sockunion.h"
#include "sockopt.h"
#include "memory.h"
@ -338,12 +338,12 @@ static void bgp_socket_set_buffer_size(const int fd)
}
/* Accept bgp connection. */
static void bgp_accept(struct thread *thread)
static void bgp_accept(struct event *thread)
{
int bgp_sock;
int accept_sock;
union sockunion su;
struct bgp_listener *listener = THREAD_ARG(thread);
struct bgp_listener *listener = EVENT_ARG(thread);
struct peer *peer;
struct peer *peer1;
char buf[SU_ADDRSTRLEN];
@ -354,7 +354,7 @@ static void bgp_accept(struct thread *thread)
bgp = bgp_lookup_by_name(listener->name);
/* Register accept thread. */
accept_sock = THREAD_FD(thread);
accept_sock = EVENT_FD(thread);
if (accept_sock < 0) {
flog_err_sys(EC_LIB_SOCKET,
"[Error] BGP accept socket fd is negative: %d",
@ -362,8 +362,8 @@ static void bgp_accept(struct thread *thread)
return;
}
thread_add_read(bm->master, bgp_accept, listener, accept_sock,
&listener->thread);
event_add_read(bm->master, bgp_accept, listener, accept_sock,
&listener->thread);
/* Accept client connection. */
bgp_sock = sockunion_accept(accept_sock, &su);
@ -391,7 +391,7 @@ static void bgp_accept(struct thread *thread)
"[Error] accept() failed with error \"%s\" on BGP listener socket %d for BGP instance in VRF \"%s\"; refreshing socket",
safe_strerror(save_errno), accept_sock,
VRF_LOGNAME(vrf));
THREAD_OFF(listener->thread);
EVENT_OFF(listener->thread);
} else {
flog_err_sys(
EC_LIB_SOCKET,
@ -436,7 +436,7 @@ static void bgp_accept(struct thread *thread)
sockopt_tcp_mss_set(bgp_sock, peer1->tcp_mss);
bgp_fsm_change_status(peer1, Active);
THREAD_OFF(
EVENT_OFF(
peer1->t_start); /* created in peer_create() */
if (peer_active(peer1)) {
@ -569,7 +569,7 @@ static void bgp_accept(struct thread *thread)
}
bgp_peer_reg_with_nht(peer);
bgp_fsm_change_status(peer, Active);
THREAD_OFF(peer->t_start); /* created in peer_create() */
EVENT_OFF(peer->t_start); /* created in peer_create() */
SET_FLAG(peer->sflags, PEER_STATUS_ACCEPT_PEER);
/* Make dummy peer until read Open packet. */
@ -861,8 +861,8 @@ static int bgp_listener(int sock, struct sockaddr *sa, socklen_t salen,
listener->bgp = bgp;
memcpy(&listener->su, sa, salen);
thread_add_read(bm->master, bgp_accept, listener, sock,
&listener->thread);
event_add_read(bm->master, bgp_accept, listener, sock,
&listener->thread);
listnode_add(bm->listen_sockets, listener);
return 0;
@ -961,7 +961,7 @@ void bgp_close_vrf_socket(struct bgp *bgp)
for (ALL_LIST_ELEMENTS(bm->listen_sockets, node, next, listener)) {
if (listener->bgp == bgp) {
THREAD_OFF(listener->thread);
EVENT_OFF(listener->thread);
close(listener->fd);
listnode_delete(bm->listen_sockets, listener);
XFREE(MTYPE_BGP_LISTENER, listener->name);
@ -983,7 +983,7 @@ void bgp_close(void)
for (ALL_LIST_ELEMENTS(bm->listen_sockets, node, next, listener)) {
if (listener->bgp)
continue;
THREAD_OFF(listener->thread);
EVENT_OFF(listener->thread);
close(listener->fd);
listnode_delete(bm->listen_sockets, listener);
XFREE(MTYPE_BGP_LISTENER, listener->name);

View File

@ -11,7 +11,7 @@
struct bgp_listener {
int fd;
union sockunion su;
struct thread *thread;
struct event *thread;
struct bgp *bgp;
char *name;
};

View File

@ -6,7 +6,7 @@
#include <zebra.h>
#include "command.h"
#include "thread.h"
#include "frrevent.h"
#include "prefix.h"
#include "lib/json.h"
#include "zclient.h"

View File

@ -6,7 +6,7 @@
#include <zebra.h>
#include "command.h"
#include "thread.h"
#include "frrevent.h"
#include "prefix.h"
#include "zclient.h"
#include "stream.h"
@ -37,7 +37,7 @@ extern struct zclient *zclient;
static void register_zebra_rnh(struct bgp_nexthop_cache *bnc);
static void unregister_zebra_rnh(struct bgp_nexthop_cache *bnc);
static int make_prefix(int afi, struct bgp_path_info *pi, struct prefix *p);
static void bgp_nht_ifp_initial(struct thread *thread);
static void bgp_nht_ifp_initial(struct event *thread);
static int bgp_isvalid_nexthop(struct bgp_nexthop_cache *bnc)
{
@ -756,10 +756,10 @@ void bgp_nht_ifp_down(struct interface *ifp)
bgp_nht_ifp_handle(ifp, false);
}
static void bgp_nht_ifp_initial(struct thread *thread)
static void bgp_nht_ifp_initial(struct event *thread)
{
ifindex_t ifindex = THREAD_VAL(thread);
struct bgp *bgp = THREAD_ARG(thread);
ifindex_t ifindex = EVENT_VAL(thread);
struct bgp *bgp = EVENT_ARG(thread);
struct interface *ifp = if_lookup_by_index(ifindex, bgp->vrf_id);
if (!ifp)
@ -811,8 +811,8 @@ void bgp_nht_interface_events(struct peer *peer)
return;
if (bnc->ifindex)
thread_add_event(bm->master, bgp_nht_ifp_initial, bnc->bgp,
bnc->ifindex, NULL);
event_add_event(bm->master, bgp_nht_ifp_initial, bnc->bgp,
bnc->ifindex, NULL);
}
void bgp_parse_nexthop_update(int command, vrf_id_t vrf_id)

View File

@ -8,7 +8,7 @@
#include "linklist.h"
#include "prefix.h"
#include "stream.h"
#include "thread.h"
#include "frrevent.h"
#include "log.h"
#include "command.h"
#include "memory.h"

View File

@ -8,7 +8,7 @@
#include <zebra.h>
#include <sys/time.h>
#include "thread.h"
#include "frrevent.h"
#include "stream.h"
#include "network.h"
#include "prefix.h"
@ -442,9 +442,9 @@ static void bgp_write_proceed_actions(struct peer *peer)
* update group a peer belongs to, encode this information into packets, and
* enqueue the packets onto the peer's output buffer.
*/
void bgp_generate_updgrp_packets(struct thread *thread)
void bgp_generate_updgrp_packets(struct event *thread)
{
struct peer *peer = THREAD_ARG(thread);
struct peer *peer = EVENT_ARG(thread);
struct stream *s;
struct peer_af *paf;
@ -1792,11 +1792,11 @@ static int bgp_keepalive_receive(struct peer *peer, bgp_size_t size)
return Receive_KEEPALIVE_message;
}
static void bgp_refresh_stalepath_timer_expire(struct thread *thread)
static void bgp_refresh_stalepath_timer_expire(struct event *thread)
{
struct peer_af *paf;
paf = THREAD_ARG(thread);
paf = EVENT_ARG(thread);
afi_t afi = paf->afi;
safi_t safi = paf->safi;
@ -2106,11 +2106,11 @@ static int bgp_update_receive(struct peer *peer, bgp_size_t size)
"EOR RCV",
gr_info->eor_received);
if (gr_info->t_select_deferral) {
void *info = THREAD_ARG(
void *info = EVENT_ARG(
gr_info->t_select_deferral);
XFREE(MTYPE_TMP, info);
}
THREAD_OFF(gr_info->t_select_deferral);
EVENT_OFF(gr_info->t_select_deferral);
gr_info->eor_required = 0;
gr_info->eor_received = 0;
/* Best path selection */
@ -2595,10 +2595,10 @@ static int bgp_route_refresh_receive(struct peer *peer, bgp_size_t size)
}
if (peer_established(peer))
thread_add_timer(bm->master,
bgp_refresh_stalepath_timer_expire,
paf, peer->bgp->stalepath_time,
&peer->t_refresh_stalepath);
event_add_timer(bm->master,
bgp_refresh_stalepath_timer_expire, paf,
peer->bgp->stalepath_time,
&peer->t_refresh_stalepath);
if (bgp_debug_neighbor_events(peer))
zlog_debug(
@ -2613,7 +2613,7 @@ static int bgp_route_refresh_receive(struct peer *peer, bgp_size_t size)
return BGP_PACKET_NOOP;
}
THREAD_OFF(peer->t_refresh_stalepath);
EVENT_OFF(peer->t_refresh_stalepath);
SET_FLAG(peer->af_sflags[afi][safi], PEER_STATUS_EORR_RECEIVED);
UNSET_FLAG(peer->af_sflags[afi][safi],
@ -2863,11 +2863,11 @@ int bgp_capability_receive(struct peer *peer, bgp_size_t size)
* would not, making event flow difficult to understand. Please think twice
* before hacking this.
*
* Thread type: THREAD_EVENT
* Thread type: EVENT_EVENT
* @param thread
* @return 0
*/
void bgp_process_packet(struct thread *thread)
void bgp_process_packet(struct event *thread)
{
/* Yes first of all get peer pointer. */
struct peer *peer; // peer
@ -2875,7 +2875,7 @@ void bgp_process_packet(struct thread *thread)
int fsm_update_result; // return code of bgp_event_update()
int mprc; // message processing return code
peer = THREAD_ARG(thread);
peer = EVENT_ARG(thread);
rpkt_quanta_old = atomic_load_explicit(&peer->bgp->rpkt_quanta,
memory_order_relaxed);
fsm_update_result = 0;
@ -3021,9 +3021,9 @@ void bgp_process_packet(struct thread *thread)
frr_with_mutex (&peer->io_mtx) {
// more work to do, come back later
if (peer->ibuf->count > 0)
thread_add_event(
bm->master, bgp_process_packet, peer, 0,
&peer->t_process_packet);
event_add_event(bm->master, bgp_process_packet,
peer, 0,
&peer->t_process_packet);
}
}
}
@ -3044,13 +3044,13 @@ void bgp_send_delayed_eor(struct bgp *bgp)
* having the io pthread try to enqueue fsm events or mess with the peer
* struct.
*/
void bgp_packet_process_error(struct thread *thread)
void bgp_packet_process_error(struct event *thread)
{
struct peer *peer;
int code;
peer = THREAD_ARG(thread);
code = THREAD_VAL(thread);
peer = EVENT_ARG(thread);
code = EVENT_VAL(thread);
if (bgp_debug_neighbor_events(peer))
zlog_debug("%s [Event] BGP error %d on fd %d",

View File

@ -66,13 +66,13 @@ extern void bgp_check_update_delay(struct bgp *);
extern int bgp_packet_set_marker(struct stream *s, uint8_t type);
extern void bgp_packet_set_size(struct stream *s);
extern void bgp_generate_updgrp_packets(struct thread *);
extern void bgp_process_packet(struct thread *);
extern void bgp_generate_updgrp_packets(struct event *event);
extern void bgp_process_packet(struct event *event);
extern void bgp_send_delayed_eor(struct bgp *bgp);
/* Task callback to handle socket error encountered in the io pthread */
void bgp_packet_process_error(struct thread *thread);
void bgp_packet_process_error(struct event *thread);
extern struct bgp_notify
bgp_notify_decapsulate_hard_reset(struct bgp_notify *notify);
extern bool bgp_has_graceful_restart_notification(struct peer *peer);

View File

@ -20,7 +20,7 @@
#include "buffer.h"
#include "sockunion.h"
#include "plist.h"
#include "thread.h"
#include "frrevent.h"
#include "workqueue.h"
#include "queue.h"
#include "memory.h"
@ -2618,14 +2618,14 @@ bool subgroup_announce_check(struct bgp_dest *dest, struct bgp_path_info *pi,
return true;
}
static void bgp_route_select_timer_expire(struct thread *thread)
static void bgp_route_select_timer_expire(struct event *thread)
{
struct afi_safi_info *info;
afi_t afi;
safi_t safi;
struct bgp *bgp;
info = THREAD_ARG(thread);
info = EVENT_ARG(thread);
afi = info->afi;
safi = info->safi;
bgp = info->bgp;
@ -3281,7 +3281,7 @@ static void bgp_process_main_one(struct bgp *bgp, struct bgp_dest *dest,
if (!bgp->t_rmap_def_originate_eval) {
bgp_lock(bgp);
thread_add_timer(
event_add_timer(
bm->master,
update_group_refresh_default_originate_route_map,
bgp, RMAP_DEFAULT_ORIGINATE_EVAL_TIMER,
@ -3381,11 +3381,11 @@ void bgp_best_path_select_defer(struct bgp *bgp, afi_t afi, safi_t safi)
struct afi_safi_info *thread_info;
if (bgp->gr_info[afi][safi].t_route_select) {
struct thread *t = bgp->gr_info[afi][safi].t_route_select;
struct event *t = bgp->gr_info[afi][safi].t_route_select;
thread_info = THREAD_ARG(t);
thread_info = EVENT_ARG(t);
XFREE(MTYPE_TMP, thread_info);
THREAD_OFF(bgp->gr_info[afi][safi].t_route_select);
EVENT_OFF(bgp->gr_info[afi][safi].t_route_select);
}
if (BGP_DEBUG(update, UPDATE_OUT)) {
@ -3433,7 +3433,7 @@ void bgp_best_path_select_defer(struct bgp *bgp, afi_t afi, safi_t safi)
/* If there are more routes to be processed, start the
* selection timer
*/
thread_add_timer(bm->master, bgp_route_select_timer_expire, thread_info,
event_add_timer(bm->master, bgp_route_select_timer_expire, thread_info,
BGP_ROUTE_SELECT_DELAY,
&bgp->gr_info[afi][safi].t_route_select);
}
@ -3585,11 +3585,11 @@ void bgp_add_eoiu_mark(struct bgp *bgp)
work_queue_add(bgp->process_queue, pqnode);
}
static void bgp_maximum_prefix_restart_timer(struct thread *thread)
static void bgp_maximum_prefix_restart_timer(struct event *thread)
{
struct peer *peer;
peer = THREAD_ARG(thread);
peer = EVENT_ARG(thread);
peer->t_pmax_restart = NULL;
if (bgp_debug_neighbor_events(peer))
@ -5046,7 +5046,7 @@ void bgp_stop_announce_route_timer(struct peer_af *paf)
if (!paf->t_announce_route)
return;
THREAD_OFF(paf->t_announce_route);
EVENT_OFF(paf->t_announce_route);
}
/*
@ -5055,12 +5055,12 @@ void bgp_stop_announce_route_timer(struct peer_af *paf)
* Callback that is invoked when the route announcement timer for a
* peer_af expires.
*/
static void bgp_announce_route_timer_expired(struct thread *t)
static void bgp_announce_route_timer_expired(struct event *t)
{
struct peer_af *paf;
struct peer *peer;
paf = THREAD_ARG(t);
paf = EVENT_ARG(t);
peer = paf->peer;
if (!peer_established(peer))
@ -5109,11 +5109,11 @@ void bgp_announce_route(struct peer *peer, afi_t afi, safi_t safi, bool force)
* multiple peers and the announcement doesn't happen in the
* vty context.
*/
thread_add_timer_msec(bm->master, bgp_announce_route_timer_expired, paf,
(subgrp->peer_count == 1)
? BGP_ANNOUNCE_ROUTE_SHORT_DELAY_MS
: BGP_ANNOUNCE_ROUTE_DELAY_MS,
&paf->t_announce_route);
event_add_timer_msec(bm->master, bgp_announce_route_timer_expired, paf,
(subgrp->peer_count == 1)
? BGP_ANNOUNCE_ROUTE_SHORT_DELAY_MS
: BGP_ANNOUNCE_ROUTE_DELAY_MS,
&paf->t_announce_route);
}
/*
@ -5215,7 +5215,7 @@ static void bgp_soft_reconfig_table(struct peer *peer, afi_t afi, safi_t safi,
* Without splitting the full job into several part,
* vtysh waits for the job to finish before responding to a BGP command
*/
static void bgp_soft_reconfig_table_task(struct thread *thread)
static void bgp_soft_reconfig_table_task(struct event *thread)
{
uint32_t iter, max_iter;
struct bgp_dest *dest;
@ -5225,7 +5225,7 @@ static void bgp_soft_reconfig_table_task(struct thread *thread)
struct prefix_rd *prd;
struct listnode *node, *nnode;
table = THREAD_ARG(thread);
table = EVENT_ARG(thread);
prd = NULL;
max_iter = SOFT_RECONFIG_TASK_MAX_PREFIX;
@ -5263,8 +5263,8 @@ static void bgp_soft_reconfig_table_task(struct thread *thread)
*/
if (dest || table->soft_reconfig_init) {
table->soft_reconfig_init = false;
thread_add_event(bm->master, bgp_soft_reconfig_table_task,
table, 0, &table->soft_reconfig_thread);
event_add_event(bm->master, bgp_soft_reconfig_table_task, table,
0, &table->soft_reconfig_thread);
return;
}
/* we're done, clean up the background iteration context info and
@ -5319,7 +5319,7 @@ void bgp_soft_reconfig_table_task_cancel(const struct bgp *bgp,
list_delete(&ntable->soft_reconfig_peers);
bgp_soft_reconfig_table_flag(ntable, false);
THREAD_OFF(ntable->soft_reconfig_thread);
EVENT_OFF(ntable->soft_reconfig_thread);
}
}
@ -5365,9 +5365,9 @@ bool bgp_soft_reconfig_in(struct peer *peer, afi_t afi, safi_t safi)
bgp_soft_reconfig_table_flag(table, true);
if (!table->soft_reconfig_thread)
thread_add_event(bm->master,
bgp_soft_reconfig_table_task, table, 0,
&table->soft_reconfig_thread);
event_add_event(bm->master,
bgp_soft_reconfig_table_task, table, 0,
&table->soft_reconfig_thread);
/* Cancel bgp_announce_route_timer_expired threads.
* bgp_announce_route_timer_expired threads have been scheduled
* to announce routes as soon as the soft_reconfigure process
@ -11105,7 +11105,7 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct bgp_dest *bn,
if (path->peer->t_gr_restart &&
CHECK_FLAG(path->flags, BGP_PATH_STALE)) {
unsigned long gr_remaining =
thread_timer_remain_second(path->peer->t_gr_restart);
event_timer_remain_second(path->peer->t_gr_restart);
if (json_paths) {
json_object_int_add(json_path,
@ -11121,7 +11121,7 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct bgp_dest *bn,
bgp_attr_get_community(attr) &&
community_include(bgp_attr_get_community(attr),
COMMUNITY_LLGR_STALE)) {
unsigned long llgr_remaining = thread_timer_remain_second(
unsigned long llgr_remaining = event_timer_remain_second(
path->peer->t_llgr_stale[afi][safi]);
if (json_paths) {
@ -13342,11 +13342,11 @@ static void bgp_table_stats_rn(struct bgp_dest *dest, struct bgp_dest *top,
}
}
static void bgp_table_stats_walker(struct thread *t)
static void bgp_table_stats_walker(struct event *t)
{
struct bgp_dest *dest, *ndest;
struct bgp_dest *top;
struct bgp_table_stats *ts = THREAD_ARG(t);
struct bgp_table_stats *ts = EVENT_ARG(t);
unsigned int space = 0;
if (!(top = bgp_table_top(ts->table)))
@ -13441,7 +13441,7 @@ static int bgp_table_stats_single(struct vty *vty, struct bgp *bgp, afi_t afi,
memset(&ts, 0, sizeof(ts));
ts.table = bgp->rib[afi][safi];
thread_execute(bm->master, bgp_table_stats_walker, &ts, 0);
event_execute(bm->master, bgp_table_stats_walker, &ts, 0);
for (i = 0; i < BGP_STATS_MAX; i++) {
if ((!json && !table_stats_strs[i][TABLE_STATS_IDX_VTY])
@ -13739,11 +13739,11 @@ static void bgp_peer_count_proc(struct bgp_dest *rn, struct peer_pcounts *pc)
}
}
static void bgp_peer_count_walker(struct thread *t)
static void bgp_peer_count_walker(struct event *t)
{
struct bgp_dest *rn, *rm;
const struct bgp_table *table;
struct peer_pcounts *pc = THREAD_ARG(t);
struct peer_pcounts *pc = EVENT_ARG(t);
if (pc->safi == SAFI_MPLS_VPN || pc->safi == SAFI_ENCAP
|| pc->safi == SAFI_EVPN) {
@ -13798,7 +13798,7 @@ static int bgp_peer_counts(struct vty *vty, struct peer *peer, afi_t afi,
* stats for the thread-walk (i.e. ensure this can't be blamed on
* on just vty_read()).
*/
thread_execute(bm->master, bgp_peer_count_walker, &pcounts, 0);
event_execute(bm->master, bgp_peer_count_walker, &pcounts, 0);
if (use_json) {
json_object_string_add(json, "prefixCountsFor", peer->host);

View File

@ -182,7 +182,7 @@ struct bgp_path_info_extra {
} export;
struct {
struct thread *timer;
struct event *timer;
void *hme; /* encap monitor, if this is a VPN route */
struct prefix_rd
rd; /* import: route's route-distinguisher */

View File

@ -4344,7 +4344,7 @@ static void bgp_route_map_process_update_cb(char *rmap_name)
vpn_policy_routemap_event(rmap_name);
}
void bgp_route_map_update_timer(struct thread *thread)
void bgp_route_map_update_timer(struct event *thread)
{
route_map_walk_update_list(bgp_route_map_process_update_cb);
}
@ -4357,13 +4357,12 @@ static void bgp_route_map_mark_update(const char *rmap_name)
/* If new update is received before the current timer timed out,
* turn it off and start a new timer.
*/
THREAD_OFF(bm->t_rmap_update);
EVENT_OFF(bm->t_rmap_update);
/* rmap_update_timer of 0 means don't do route updates */
if (bm->rmap_update_timer) {
thread_add_timer(bm->master, bgp_route_map_update_timer,
NULL, bm->rmap_update_timer,
&bm->t_rmap_update);
event_add_timer(bm->master, bgp_route_map_update_timer, NULL,
bm->rmap_update_timer, &bm->t_rmap_update);
/* Signal the groups that a route-map update event has
* started */

View File

@ -23,7 +23,7 @@
#include "command.h"
#include "linklist.h"
#include "memory.h"
#include "thread.h"
#include "frrevent.h"
#include "filter.h"
#include "bgpd/bgpd.h"
#include "bgpd/bgp_table.h"
@ -36,7 +36,6 @@
#include "northbound_cli.h"
#include "lib/network.h"
#include "lib/thread.h"
#include "rtrlib/rtrlib.h"
#include "hook.h"
#include "libfrr.h"
@ -54,7 +53,7 @@ DEFINE_MTYPE_STATIC(BGPD, BGP_RPKI_REVALIDATE, "BGP RPKI Revalidation");
#define RETRY_INTERVAL_DEFAULT 600
#define BGP_RPKI_CACHE_SERVER_SYNC_RETRY_TIMEOUT 3
static struct thread *t_rpki_sync;
static struct event *t_rpki_sync;
#define RPKI_DEBUG(...) \
if (rpki_debug) { \
@ -382,9 +381,9 @@ struct rpki_revalidate_prefix {
safi_t safi;
};
static void rpki_revalidate_prefix(struct thread *thread)
static void rpki_revalidate_prefix(struct event *thread)
{
struct rpki_revalidate_prefix *rrp = THREAD_ARG(thread);
struct rpki_revalidate_prefix *rrp = EVENT_ARG(thread);
struct bgp_dest *match, *node;
match = bgp_table_subtree_lookup(rrp->bgp->rib[rrp->afi][rrp->safi],
@ -403,15 +402,15 @@ static void rpki_revalidate_prefix(struct thread *thread)
XFREE(MTYPE_BGP_RPKI_REVALIDATE, rrp);
}
static void bgpd_sync_callback(struct thread *thread)
static void bgpd_sync_callback(struct event *thread)
{
struct bgp *bgp;
struct listnode *node;
struct prefix prefix;
struct pfx_record rec;
thread_add_read(bm->master, bgpd_sync_callback, NULL,
rpki_sync_socket_bgpd, NULL);
event_add_read(bm->master, bgpd_sync_callback, NULL,
rpki_sync_socket_bgpd, NULL);
if (atomic_load_explicit(&rtr_update_overflow, memory_order_seq_cst)) {
while (read(rpki_sync_socket_bgpd, &rec,
@ -449,8 +448,8 @@ static void bgpd_sync_callback(struct thread *thread)
rrp->prefix = prefix;
rrp->afi = afi;
rrp->safi = safi;
thread_add_event(bm->master, rpki_revalidate_prefix,
rrp, 0, &bgp->t_revalidate[afi][safi]);
event_add_event(bm->master, rpki_revalidate_prefix, rrp,
0, &bgp->t_revalidate[afi][safi]);
}
}
}
@ -490,9 +489,9 @@ struct rpki_revalidate_peer {
struct peer *peer;
};
static void bgp_rpki_revalidate_peer(struct thread *thread)
static void bgp_rpki_revalidate_peer(struct event *thread)
{
struct rpki_revalidate_peer *rvp = THREAD_ARG(thread);
struct rpki_revalidate_peer *rvp = EVENT_ARG(thread);
/*
* Here's the expensive bit of gnomish deviousness
@ -530,7 +529,7 @@ static void revalidate_all_routes(void)
rvp->afi = afi;
rvp->safi = safi;
thread_add_event(
event_add_event(
bm->master, bgp_rpki_revalidate_peer,
rvp, 0,
&peer->t_revalidate_all[afi][safi]);
@ -581,8 +580,8 @@ static void rpki_init_sync_socket(void)
}
thread_add_read(bm->master, bgpd_sync_callback, NULL,
rpki_sync_socket_bgpd, NULL);
event_add_read(bm->master, bgpd_sync_callback, NULL,
rpki_sync_socket_bgpd, NULL);
return;
@ -592,7 +591,7 @@ err:
}
static int bgp_rpki_init(struct thread_master *master)
static int bgp_rpki_init(struct event_loop *master)
{
rpki_debug = false;
rtr_is_running = false;
@ -632,13 +631,13 @@ static int bgp_rpki_module_init(void)
return 0;
}
static void sync_expired(struct thread *thread)
static void sync_expired(struct event *thread)
{
if (!rtr_mgr_conf_in_sync(rtr_config)) {
RPKI_DEBUG("rtr_mgr is not synced, retrying.");
thread_add_timer(bm->master, sync_expired, NULL,
BGP_RPKI_CACHE_SERVER_SYNC_RETRY_TIMEOUT,
&t_rpki_sync);
event_add_timer(bm->master, sync_expired, NULL,
BGP_RPKI_CACHE_SERVER_SYNC_RETRY_TIMEOUT,
&t_rpki_sync);
return;
}
@ -681,7 +680,7 @@ static int start(void)
return ERROR;
}
thread_add_timer(bm->master, sync_expired, NULL, 0, &t_rpki_sync);
event_add_timer(bm->master, sync_expired, NULL, 0, &t_rpki_sync);
XFREE(MTYPE_BGP_RPKI_CACHE_GROUP, groups);
@ -694,7 +693,7 @@ static void stop(void)
{
rtr_is_stopping = true;
if (is_running()) {
THREAD_OFF(t_rpki_sync);
EVENT_OFF(t_rpki_sync);
rtr_mgr_stop(rtr_config);
rtr_mgr_free(rtr_config);
rtr_is_running = false;

View File

@ -12,7 +12,7 @@
#include "log.h"
#include "prefix.h"
#include "command.h"
#include "thread.h"
#include "frrevent.h"
#include "smux.h"
#include "filter.h"
#include "hook.h"
@ -30,7 +30,7 @@
#include "bgpd/bgp_snmp_bgp4v2.h"
#include "bgpd/bgp_mplsvpn_snmp.h"
static int bgp_snmp_init(struct thread_master *tm)
static int bgp_snmp_init(struct event_loop *tm)
{
smux_init(tm);
bgp_snmp_bgp4_init(tm);

View File

@ -12,7 +12,7 @@
#include "log.h"
#include "prefix.h"
#include "command.h"
#include "thread.h"
#include "frrevent.h"
#include "smux.h"
#include "filter.h"
#include "hook.h"
@ -791,7 +791,7 @@ int bgpTrapBackwardTransition(struct peer *peer)
return 0;
}
int bgp_snmp_bgp4_init(struct thread_master *tm)
int bgp_snmp_bgp4_init(struct event_loop *tm)
{
REGISTER_MIB("mibII/bgp", bgp_variables, variable, bgp_oid);
return 0;

View File

@ -71,6 +71,6 @@
extern int bgpTrapEstablished(struct peer *peer);
extern int bgpTrapBackwardTransition(struct peer *peer);
extern int bgp_snmp_bgp4_init(struct thread_master *tm);
extern int bgp_snmp_bgp4_init(struct event_loop *tm);
#endif /* _FRR_BGP_SNMP_BGP4_H_ */

View File

@ -13,7 +13,7 @@
#include "log.h"
#include "prefix.h"
#include "command.h"
#include "thread.h"
#include "frrevent.h"
#include "smux.h"
#include "filter.h"
#include "hook.h"
@ -1394,7 +1394,7 @@ static struct variable bgpv2_variables[] = {
{1, 9, 1, BGP4V2_NLRI_PATH_ATTR_UNKNOWN, 2, 16}},
};
int bgp_snmp_bgp4v2_init(struct thread_master *tm)
int bgp_snmp_bgp4v2_init(struct event_loop *tm)
{
REGISTER_MIB("mibII/bgpv2", bgpv2_variables, variable, bgpv2_oid);
return 0;

View File

@ -81,6 +81,6 @@
#define BGP4V2_ESTABLISHED_NOTIFICATION 1
#define BGP4V2_BACKWARD_TRANSITION_NOTIFICATION 2
extern int bgp_snmp_bgp4v2_init(struct thread_master *tm);
extern int bgp_snmp_bgp4v2_init(struct event_loop *tm);
#endif /* _FRR_BGP_SNMP_BGP4V2_H_ */

View File

@ -29,7 +29,7 @@ struct bgp_table {
/* soft_reconfig_table in progress */
bool soft_reconfig_init;
struct thread *soft_reconfig_thread;
struct event *soft_reconfig_thread;
/* list of peers on which soft_reconfig_table has to run */
struct list *soft_reconfig_peers;

View File

@ -12,7 +12,7 @@
#include <zebra.h>
#include "prefix.h"
#include "thread.h"
#include "frrevent.h"
#include "buffer.h"
#include "stream.h"
#include "command.h"
@ -1099,8 +1099,8 @@ static void update_subgroup_delete(struct update_subgroup *subgrp)
if (subgrp->update_group)
UPDGRP_INCR_STAT(subgrp->update_group, subgrps_deleted);
THREAD_OFF(subgrp->t_merge_check);
THREAD_OFF(subgrp->t_coalesce);
EVENT_OFF(subgrp->t_merge_check);
EVENT_OFF(subgrp->t_coalesce);
bpacket_queue_cleanup(SUBGRP_PKTQ(subgrp));
subgroup_clear_table(subgrp);
@ -1418,11 +1418,11 @@ bool update_subgroup_check_merge(struct update_subgroup *subgrp,
/*
* update_subgroup_merge_check_thread_cb
*/
static void update_subgroup_merge_check_thread_cb(struct thread *thread)
static void update_subgroup_merge_check_thread_cb(struct event *thread)
{
struct update_subgroup *subgrp;
subgrp = THREAD_ARG(thread);
subgrp = EVENT_ARG(thread);
subgrp->t_merge_check = NULL;
@ -1449,8 +1449,8 @@ bool update_subgroup_trigger_merge_check(struct update_subgroup *subgrp,
return false;
subgrp->t_merge_check = NULL;
thread_add_timer_msec(bm->master, update_subgroup_merge_check_thread_cb,
subgrp, 0, &subgrp->t_merge_check);
event_add_timer_msec(bm->master, update_subgroup_merge_check_thread_cb,
subgrp, 0, &subgrp->t_merge_check);
SUBGRP_INCR_STAT(subgrp, merge_checks_triggered);
@ -2107,15 +2107,15 @@ update_group_default_originate_route_map_walkcb(struct update_group *updgrp,
return UPDWALK_CONTINUE;
}
void update_group_refresh_default_originate_route_map(struct thread *thread)
void update_group_refresh_default_originate_route_map(struct event *thread)
{
struct bgp *bgp;
char reason[] = "refresh default-originate route-map";
bgp = THREAD_ARG(thread);
bgp = EVENT_ARG(thread);
update_group_walk(bgp, update_group_default_originate_route_map_walkcb,
reason);
THREAD_OFF(bgp->t_rmap_def_originate_eval);
EVENT_OFF(bgp->t_rmap_def_originate_eval);
bgp_unlock(bgp);
}
@ -2215,7 +2215,7 @@ void subgroup_trigger_write(struct update_subgroup *subgrp)
*/
SUBGRP_FOREACH_PEER (subgrp, paf)
if (peer_established(paf->peer))
thread_add_timer_msec(
event_add_timer_msec(
bm->master, bgp_generate_updgrp_packets,
paf->peer, 0,
&paf->peer->t_generate_updgrp_packets);

View File

@ -197,10 +197,10 @@ struct update_subgroup {
/* announcement attribute hash */
struct hash *hash;
struct thread *t_coalesce;
struct event *t_coalesce;
uint32_t v_coalesce;
struct thread *t_merge_check;
struct event *t_merge_check;
/* table version that the subgroup has caught up to. */
uint64_t version;
@ -373,7 +373,7 @@ extern void update_group_af_walk(struct bgp *bgp, afi_t afi, safi_t safi,
extern void update_group_walk(struct bgp *bgp, updgrp_walkcb cb, void *ctx);
extern void update_group_periodic_merge(struct bgp *bgp);
extern void
update_group_refresh_default_originate_route_map(struct thread *thread);
update_group_refresh_default_originate_route_map(struct event *thread);
extern void update_group_start_advtimer(struct bgp *bgp);
extern void update_subgroup_inherit_info(struct update_subgroup *to,

View File

@ -17,7 +17,7 @@
#include "memory.h"
#include "prefix.h"
#include "hash.h"
#include "thread.h"
#include "frrevent.h"
#include "queue.h"
#include "routemap.h"
#include "filter.h"
@ -298,12 +298,12 @@ static void updgrp_show_adj(struct bgp *bgp, afi_t afi, safi_t safi,
update_group_af_walk(bgp, afi, safi, updgrp_show_adj_walkcb, &ctx);
}
static void subgroup_coalesce_timer(struct thread *thread)
static void subgroup_coalesce_timer(struct event *thread)
{
struct update_subgroup *subgrp;
struct bgp *bgp;
subgrp = THREAD_ARG(thread);
subgrp = EVENT_ARG(thread);
if (bgp_debug_update(NULL, NULL, subgrp->update_group, 0))
zlog_debug("u%" PRIu64 ":s%" PRIu64" announcing routes upon coalesce timer expiry(%u ms)",
(SUBGRP_UPDGRP(subgrp))->id, subgrp->id,
@ -329,7 +329,7 @@ static void subgroup_coalesce_timer(struct thread *thread)
SUBGRP_FOREACH_PEER (subgrp, paf) {
peer = PAF_PEER(paf);
THREAD_OFF(peer->t_routeadv);
EVENT_OFF(peer->t_routeadv);
BGP_TIMER_ON(peer->t_routeadv, bgp_routeadv_timer, 0);
}
}
@ -1021,9 +1021,9 @@ void subgroup_announce_all(struct update_subgroup *subgrp)
* We should wait for the coalesce timer. Arm the timer if not done.
*/
if (!subgrp->t_coalesce) {
thread_add_timer_msec(bm->master, subgroup_coalesce_timer,
subgrp, subgrp->v_coalesce,
&subgrp->t_coalesce);
event_add_timer_msec(bm->master, subgroup_coalesce_timer,
subgrp, subgrp->v_coalesce,
&subgrp->t_coalesce);
}
}

View File

@ -12,7 +12,7 @@
#include <zebra.h>
#include "prefix.h"
#include "thread.h"
#include "frrevent.h"
#include "buffer.h"
#include "stream.h"
#include "command.h"

View File

@ -16,7 +16,7 @@
#include "buffer.h"
#include "linklist.h"
#include "stream.h"
#include "thread.h"
#include "frrevent.h"
#include "log.h"
#include "memory.h"
#include "lib_vty.h"
@ -2194,7 +2194,7 @@ DEFUN (no_bgp_maxmed_onstartup,
/* Cancel max-med onstartup if its on */
if (bgp->t_maxmed_onstartup) {
THREAD_OFF(bgp->t_maxmed_onstartup);
EVENT_OFF(bgp->t_maxmed_onstartup);
bgp->maxmed_onstartup_over = 1;
}
@ -7518,9 +7518,9 @@ DEFUN (bgp_set_route_map_delay_timer,
* fired.
*/
if (!rmap_delay_timer && bm->t_rmap_update) {
THREAD_OFF(bm->t_rmap_update);
thread_execute(bm->master, bgp_route_map_update_timer,
NULL, 0);
EVENT_OFF(bm->t_rmap_update);
event_execute(bm->master, bgp_route_map_update_timer,
NULL, 0);
}
return CMD_SUCCESS;
} else {
@ -12330,7 +12330,7 @@ static void bgp_show_neighbor_graceful_restart_capability_per_afi_safi(
if (peer->t_gr_stale != NULL) {
json_object_int_add(json_timer,
"stalePathTimerRemaining",
thread_timer_remain_second(
event_timer_remain_second(
peer->t_gr_stale));
}
@ -12351,7 +12351,7 @@ static void bgp_show_neighbor_graceful_restart_capability_per_afi_safi(
json_object_int_add(
json_timer,
"selectionDeferralTimerRemaining",
thread_timer_remain_second(
event_timer_remain_second(
peer->bgp->gr_info[afi][safi]
.t_select_deferral));
}
@ -12364,7 +12364,7 @@ static void bgp_show_neighbor_graceful_restart_capability_per_afi_safi(
if (peer->t_gr_stale != NULL)
vty_out(vty,
" Stale Path Remaining(sec): %ld\n",
thread_timer_remain_second(
event_timer_remain_second(
peer->t_gr_stale));
/* Display Configured Selection
* Deferral only when when
@ -12379,7 +12379,7 @@ static void bgp_show_neighbor_graceful_restart_capability_per_afi_safi(
NULL)
vty_out(vty,
" Selection Deferral Time Remaining(sec): %ld\n",
thread_timer_remain_second(
event_timer_remain_second(
peer->bgp->gr_info[afi][safi]
.t_select_deferral));
}
@ -12413,7 +12413,7 @@ static void bgp_show_neighbor_graceful_restart_time(struct vty *vty,
if (p->t_gr_restart != NULL)
json_object_int_add(
json_timer, "restartTimerRemaining",
thread_timer_remain_second(p->t_gr_restart));
event_timer_remain_second(p->t_gr_restart));
json_object_object_add(json, "timers", json_timer);
} else {
@ -12426,10 +12426,10 @@ static void bgp_show_neighbor_graceful_restart_time(struct vty *vty,
p->v_gr_restart);
if (p->t_gr_restart != NULL)
vty_out(vty, " Restart Time Remaining(sec): %ld\n",
thread_timer_remain_second(p->t_gr_restart));
event_timer_remain_second(p->t_gr_restart));
if (p->t_gr_restart != NULL) {
vty_out(vty, " Restart Time Remaining(sec): %ld\n",
thread_timer_remain_second(p->t_gr_restart));
event_timer_remain_second(p->t_gr_restart));
}
}
}
@ -13460,11 +13460,11 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
json_neigh,
"bgpTimerConfiguredConditionalAdvertisementsSec",
bgp->condition_check_period);
if (thread_is_scheduled(bgp->t_condition_check))
if (event_is_scheduled(bgp->t_condition_check))
json_object_int_add(
json_neigh,
"bgpTimerUntilConditionalAdvertisementsSec",
thread_timer_remain_second(
event_timer_remain_second(
bgp->t_condition_check));
} else {
/* Administrative shutdown. */
@ -13541,10 +13541,10 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
vty_out(vty,
" Configured conditional advertisements interval is %d seconds\n",
bgp->condition_check_period);
if (thread_is_scheduled(bgp->t_condition_check))
if (event_is_scheduled(bgp->t_condition_check))
vty_out(vty,
" Time until conditional advertisements begin is %lu seconds\n",
thread_timer_remain_second(
event_timer_remain_second(
bgp->t_condition_check));
}
/* Capability. */
@ -14481,13 +14481,13 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
if (p->t_gr_restart)
json_object_int_add(
json_grace, "gracefulRestartTimerMsecs",
thread_timer_remain_second(p->t_gr_restart) *
event_timer_remain_second(p->t_gr_restart) *
1000);
if (p->t_gr_stale)
json_object_int_add(
json_grace, "gracefulStalepathTimerMsecs",
thread_timer_remain_second(p->t_gr_stale) *
event_timer_remain_second(p->t_gr_stale) *
1000);
/* more gr info in new format */
BGP_SHOW_PEER_GR_CAPABILITY(vty, p, json_grace);
@ -14528,12 +14528,12 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
if (p->t_gr_restart)
vty_out(vty,
" The remaining time of restart timer is %ld\n",
thread_timer_remain_second(p->t_gr_restart));
event_timer_remain_second(p->t_gr_restart));
if (p->t_gr_stale)
vty_out(vty,
" The remaining time of stalepath timer is %ld\n",
thread_timer_remain_second(p->t_gr_stale));
event_timer_remain_second(p->t_gr_stale));
/* more gr info in new format */
BGP_SHOW_PEER_GR_CAPABILITY(vty, p, NULL);
@ -14767,14 +14767,15 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
json_neigh, "reducePrefixNumFrom");
json_object_int_add(json_neigh,
"restartInTimerMsec",
thread_timer_remain_second(
p->t_pmax_restart)
* 1000);
event_timer_remain_second(
p->t_pmax_restart) *
1000);
} else
vty_out(vty,
" Reduce the no. of prefix from %s, will restart in %ld seconds\n",
p->host, thread_timer_remain_second(
p->t_pmax_restart));
p->host,
event_timer_remain_second(
p->t_pmax_restart));
} else {
if (use_json)
json_object_boolean_true_add(
@ -14926,19 +14927,18 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
if (p->t_start)
json_object_int_add(
json_neigh, "nextStartTimerDueInMsecs",
thread_timer_remain_second(p->t_start) * 1000);
event_timer_remain_second(p->t_start) * 1000);
if (p->t_connect)
json_object_int_add(
json_neigh, "nextConnectTimerDueInMsecs",
thread_timer_remain_second(p->t_connect)
* 1000);
event_timer_remain_second(p->t_connect) * 1000);
if (p->t_routeadv) {
json_object_int_add(json_neigh, "mraiInterval",
p->v_routeadv);
json_object_int_add(
json_neigh, "mraiTimerExpireInMsecs",
thread_timer_remain_second(p->t_routeadv)
* 1000);
event_timer_remain_second(p->t_routeadv) *
1000);
}
if (p->password)
json_object_int_add(json_neigh, "authenticationEnabled",
@ -14967,15 +14967,15 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
}
if (p->t_start)
vty_out(vty, "Next start timer due in %ld seconds\n",
thread_timer_remain_second(p->t_start));
event_timer_remain_second(p->t_start));
if (p->t_connect)
vty_out(vty, "Next connect timer due in %ld seconds\n",
thread_timer_remain_second(p->t_connect));
event_timer_remain_second(p->t_connect));
if (p->t_routeadv)
vty_out(vty,
"MRAI (interval %u) timer expires in %ld seconds\n",
p->v_routeadv,
thread_timer_remain_second(p->t_routeadv));
event_timer_remain_second(p->t_routeadv));
if (p->password)
vty_out(vty, "Peer Authentication Enabled\n");
@ -18764,14 +18764,14 @@ static const struct cmd_variable_handler bgp_var_peergroup[] = {
DEFINE_HOOK(bgp_config_end, (struct bgp *bgp), (bgp));
static struct thread *t_bgp_cfg;
static struct event *t_bgp_cfg;
bool bgp_config_inprocess(void)
{
return thread_is_scheduled(t_bgp_cfg);
return event_is_scheduled(t_bgp_cfg);
}
static void bgp_config_finish(struct thread *t)
static void bgp_config_finish(struct event *t)
{
struct listnode *node;
struct bgp *bgp;
@ -18783,9 +18783,9 @@ static void bgp_config_finish(struct thread *t)
static void bgp_config_start(void)
{
#define BGP_PRE_CONFIG_MAX_WAIT_SECONDS 600
THREAD_OFF(t_bgp_cfg);
thread_add_timer(bm->master, bgp_config_finish, NULL,
BGP_PRE_CONFIG_MAX_WAIT_SECONDS, &t_bgp_cfg);
EVENT_OFF(t_bgp_cfg);
event_add_timer(bm->master, bgp_config_finish, NULL,
BGP_PRE_CONFIG_MAX_WAIT_SECONDS, &t_bgp_cfg);
}
/* When we receive a hook the configuration is read,
@ -18797,8 +18797,8 @@ static void bgp_config_end(void)
{
#define BGP_POST_CONFIG_DELAY_SECONDS 1
uint32_t bgp_post_config_delay =
thread_is_scheduled(bm->t_rmap_update)
? thread_timer_remain_second(bm->t_rmap_update)
event_is_scheduled(bm->t_rmap_update)
? event_timer_remain_second(bm->t_rmap_update)
: BGP_POST_CONFIG_DELAY_SECONDS;
/* If BGP config processing thread isn't running, then
@ -18807,13 +18807,13 @@ static void bgp_config_end(void)
if (!bgp_config_inprocess())
return;
THREAD_OFF(t_bgp_cfg);
EVENT_OFF(t_bgp_cfg);
/* Start a new timer to make sure we don't send EoR
* before route-maps are processed.
*/
thread_add_timer(bm->master, bgp_config_finish, NULL,
bgp_post_config_delay, &t_bgp_cfg);
event_add_timer(bm->master, bgp_config_finish, NULL,
bgp_post_config_delay, &t_bgp_cfg);
}
static int config_write_interface_one(struct vty *vty, struct vrf *vrf)

View File

@ -13,7 +13,7 @@
#include "sockunion.h"
#include "zclient.h"
#include "routemap.h"
#include "thread.h"
#include "frrevent.h"
#include "queue.h"
#include "memory.h"
#include "lib/json.h"
@ -1046,19 +1046,19 @@ static bool bgp_table_map_apply(struct route_map *map, const struct prefix *p,
return false;
}
static struct thread *bgp_tm_thread_connect;
static struct event *bgp_tm_thread_connect;
static bool bgp_tm_status_connected;
static bool bgp_tm_chunk_obtained;
#define BGP_FLOWSPEC_TABLE_CHUNK 100000
static uint32_t bgp_tm_min, bgp_tm_max, bgp_tm_chunk_size;
struct bgp *bgp_tm_bgp;
static void bgp_zebra_tm_connect(struct thread *t)
static void bgp_zebra_tm_connect(struct event *t)
{
struct zclient *zclient;
int delay = 10, ret = 0;
zclient = THREAD_ARG(t);
zclient = EVENT_ARG(t);
if (bgp_tm_status_connected && zclient->sock > 0)
delay = 60;
else {
@ -1082,8 +1082,8 @@ static void bgp_zebra_tm_connect(struct thread *t)
}
}
}
thread_add_timer(bm->master, bgp_zebra_tm_connect, zclient, delay,
&bgp_tm_thread_connect);
event_add_timer(bm->master, bgp_zebra_tm_connect, zclient, delay,
&bgp_tm_thread_connect);
}
bool bgp_zebra_tm_chunk_obtained(void)
@ -1113,8 +1113,8 @@ void bgp_zebra_init_tm_connect(struct bgp *bgp)
bgp_tm_min = bgp_tm_max = 0;
bgp_tm_chunk_size = BGP_FLOWSPEC_TABLE_CHUNK;
bgp_tm_bgp = bgp;
thread_add_timer(bm->master, bgp_zebra_tm_connect, zclient, delay,
&bgp_tm_thread_connect);
event_add_timer(bm->master, bgp_zebra_tm_connect, zclient, delay,
&bgp_tm_thread_connect);
}
int bgp_zebra_get_table_range(uint32_t chunk_size,
@ -3443,7 +3443,7 @@ void bgp_if_init(void)
hook_register_prio(if_del, 0, bgp_if_delete_hook);
}
void bgp_zebra_init(struct thread_master *master, unsigned short instance)
void bgp_zebra_init(struct event_loop *master, unsigned short instance)
{
zclient_num_connects = 0;

View File

@ -18,8 +18,7 @@
/* Default weight for next hop, if doing weighted ECMP. */
#define BGP_ZEBRA_DEFAULT_NHOP_WEIGHT 1
extern void bgp_zebra_init(struct thread_master *master,
unsigned short instance);
extern void bgp_zebra_init(struct event_loop *master, unsigned short instance);
extern void bgp_if_init(void);
extern void bgp_zebra_init_tm_connect(struct bgp *bgp);
extern uint32_t bgp_zebra_tm_get_id(void);

View File

@ -6,7 +6,7 @@
#include <zebra.h>
#include "prefix.h"
#include "thread.h"
#include "frrevent.h"
#include "buffer.h"
#include "stream.h"
#include "ringbuf.h"
@ -1126,9 +1126,9 @@ static void peer_free(struct peer *peer)
bgp_timer_set(peer);
bgp_reads_off(peer);
bgp_writes_off(peer);
thread_cancel_event_ready(bm->master, peer);
event_cancel_event_ready(bm->master, peer);
FOREACH_AFI_SAFI (afi, safi)
THREAD_OFF(peer->t_revalidate_all[afi][safi]);
EVENT_OFF(peer->t_revalidate_all[afi][safi]);
assert(!peer->t_write);
assert(!peer->t_read);
BGP_EVENT_FLUSH(peer);
@ -2467,16 +2467,16 @@ void peer_nsf_stop(struct peer *peer)
FOREACH_AFI_SAFI_NSF (afi, safi) {
peer->nsf[afi][safi] = 0;
THREAD_OFF(peer->t_llgr_stale[afi][safi]);
EVENT_OFF(peer->t_llgr_stale[afi][safi]);
}
if (peer->t_gr_restart) {
THREAD_OFF(peer->t_gr_restart);
EVENT_OFF(peer->t_gr_restart);
if (bgp_debug_neighbor_events(peer))
zlog_debug("%pBP graceful restart timer stopped", peer);
}
if (peer->t_gr_stale) {
THREAD_OFF(peer->t_gr_stale);
EVENT_OFF(peer->t_gr_stale);
if (bgp_debug_neighbor_events(peer))
zlog_debug(
"%pBP graceful restart stalepath timer stopped",
@ -2516,9 +2516,9 @@ int peer_delete(struct peer *peer)
bgp_keepalives_off(peer);
bgp_reads_off(peer);
bgp_writes_off(peer);
thread_cancel_event_ready(bm->master, peer);
event_cancel_event_ready(bm->master, peer);
FOREACH_AFI_SAFI (afi, safi)
THREAD_OFF(peer->t_revalidate_all[afi][safi]);
EVENT_OFF(peer->t_revalidate_all[afi][safi]);
assert(!CHECK_FLAG(peer->thread_flags, PEER_THREAD_WRITES_ON));
assert(!CHECK_FLAG(peer->thread_flags, PEER_THREAD_READS_ON));
assert(!CHECK_FLAG(peer->thread_flags, PEER_THREAD_KEEPALIVES_ON));
@ -3197,11 +3197,11 @@ int peer_group_bind(struct bgp *bgp, union sockunion *su, struct peer *peer,
return 0;
}
static void bgp_startup_timer_expire(struct thread *thread)
static void bgp_startup_timer_expire(struct event *thread)
{
struct bgp *bgp;
bgp = THREAD_ARG(thread);
bgp = EVENT_ARG(thread);
bgp->t_startup = NULL;
}
@ -3355,8 +3355,8 @@ static struct bgp *bgp_create(as_t *as, const char *name,
if (name)
bgp->name = XSTRDUP(MTYPE_BGP, name);
thread_add_timer(bm->master, bgp_startup_timer_expire, bgp,
bgp->restart_time, &bgp->t_startup);
event_add_timer(bm->master, bgp_startup_timer_expire, bgp,
bgp->restart_time, &bgp->t_startup);
/* printable name we can use in debug messages */
if (inst_type == BGP_INSTANCE_TYPE_DEFAULT) {
@ -3696,7 +3696,7 @@ void bgp_instance_down(struct bgp *bgp)
/* Stop timers. */
if (bgp->t_rmap_def_originate_eval) {
THREAD_OFF(bgp->t_rmap_def_originate_eval);
EVENT_OFF(bgp->t_rmap_def_originate_eval);
bgp_unlock(bgp); /* TODO - This timer is started with a lock -
why? */
}
@ -3748,39 +3748,39 @@ int bgp_delete(struct bgp *bgp)
hook_call(bgp_inst_delete, bgp);
FOREACH_AFI_SAFI (afi, safi)
THREAD_OFF(bgp->t_revalidate[afi][safi]);
EVENT_OFF(bgp->t_revalidate[afi][safi]);
THREAD_OFF(bgp->t_condition_check);
THREAD_OFF(bgp->t_startup);
THREAD_OFF(bgp->t_maxmed_onstartup);
THREAD_OFF(bgp->t_update_delay);
THREAD_OFF(bgp->t_establish_wait);
EVENT_OFF(bgp->t_condition_check);
EVENT_OFF(bgp->t_startup);
EVENT_OFF(bgp->t_maxmed_onstartup);
EVENT_OFF(bgp->t_update_delay);
EVENT_OFF(bgp->t_establish_wait);
/* Set flag indicating bgp instance delete in progress */
SET_FLAG(bgp->flags, BGP_FLAG_DELETE_IN_PROGRESS);
/* Delete the graceful restart info */
FOREACH_AFI_SAFI (afi, safi) {
struct thread *t;
struct event *t;
gr_info = &bgp->gr_info[afi][safi];
if (!gr_info)
continue;
t = gr_info->t_select_deferral;
if (t) {
void *info = THREAD_ARG(t);
void *info = EVENT_ARG(t);
XFREE(MTYPE_TMP, info);
}
THREAD_OFF(gr_info->t_select_deferral);
EVENT_OFF(gr_info->t_select_deferral);
t = gr_info->t_route_select;
if (t) {
void *info = THREAD_ARG(t);
void *info = EVENT_ARG(t);
XFREE(MTYPE_TMP, info);
}
THREAD_OFF(gr_info->t_route_select);
EVENT_OFF(gr_info->t_route_select);
}
if (BGP_DEBUG(zebra, ZEBRA)) {
@ -3803,7 +3803,7 @@ int bgp_delete(struct bgp *bgp)
/* Stop timers. */
if (bgp->t_rmap_def_originate_eval) {
THREAD_OFF(bgp->t_rmap_def_originate_eval);
EVENT_OFF(bgp->t_rmap_def_originate_eval);
bgp_unlock(bgp); /* TODO - This timer is started with a lock -
why? */
}
@ -3897,7 +3897,7 @@ int bgp_delete(struct bgp *bgp)
if (bgp->process_queue)
work_queue_free_and_null(&bgp->process_queue);
thread_master_free_unused(bm->master);
event_master_free_unused(bm->master);
bgp_unlock(bgp); /* initial reference */
return 0;
@ -4528,7 +4528,7 @@ static void peer_flag_modify_action(struct peer *peer, uint64_t flag)
UNSET_FLAG(peer->sflags, PEER_STATUS_PREFIX_OVERFLOW);
if (peer->t_pmax_restart) {
THREAD_OFF(peer->t_pmax_restart);
EVENT_OFF(peer->t_pmax_restart);
if (bgp_debug_neighbor_events(peer))
zlog_debug(
"%pBP Maximum-prefix restart timer canceled",
@ -7398,7 +7398,7 @@ static bool peer_maximum_prefix_clear_overflow(struct peer *peer)
UNSET_FLAG(peer->sflags, PEER_STATUS_PREFIX_OVERFLOW);
if (peer->t_pmax_restart) {
THREAD_OFF(peer->t_pmax_restart);
EVENT_OFF(peer->t_pmax_restart);
if (bgp_debug_neighbor_events(peer))
zlog_debug(
"%pBP Maximum-prefix restart timer cancelled",
@ -8010,7 +8010,7 @@ char *peer_uptime(time_t uptime2, char *buf, size_t len, bool use_json,
return buf;
}
void bgp_master_init(struct thread_master *master, const int buffer_size,
void bgp_master_init(struct event_loop *master, const int buffer_size,
struct list *addresses)
{
qobj_init();
@ -8276,7 +8276,7 @@ void bgp_terminate(void)
if (bm->listen_sockets)
list_delete(&bm->listen_sockets);
THREAD_OFF(bm->t_rmap_update);
EVENT_OFF(bm->t_rmap_update);
bgp_mac_finish();
}

View File

@ -98,7 +98,7 @@ struct bgp_master {
struct list *bgp;
/* BGP thread master. */
struct thread_master *master;
struct event_loop *master;
/* Listening sockets */
struct list *listen_sockets;
@ -126,7 +126,7 @@ struct bgp_master {
uint64_t subgrp_idspace;
/* timer to dampen route map changes */
struct thread *t_rmap_update; /* Handle route map updates */
struct event *t_rmap_update; /* Handle route map updates */
uint32_t rmap_update_timer; /* Route map update timer */
#define RMAP_DEFAULT_UPDATE_TIMER 5 /* disabled by default */
@ -266,11 +266,11 @@ struct graceful_restart_info {
/* Count of EOR received */
uint32_t eor_received;
/* Deferral Timer */
struct thread *t_select_deferral;
struct event *t_select_deferral;
/* Routes Deferred */
uint32_t gr_deferred;
/* Best route select */
struct thread *t_route_select;
struct event *t_route_select;
/* AFI, SAFI enabled */
bool af_enabled[AFI_MAX][SAFI_MAX];
/* Route update completed */
@ -406,15 +406,16 @@ struct bgp {
struct as_confed *confed_peers;
int confed_peers_cnt;
struct thread
*t_startup; /* start-up timer on only once at the beginning */
/* start-up timer on only once at the beginning */
struct event *t_startup;
uint32_t v_maxmed_onstartup; /* Duration of max-med on start-up */
#define BGP_MAXMED_ONSTARTUP_UNCONFIGURED 0 /* 0 means off, its the default */
uint32_t maxmed_onstartup_value; /* Max-med value when active on
start-up */
struct thread
*t_maxmed_onstartup; /* non-null when max-med onstartup is on */
/* non-null when max-med onstartup is on */
struct event *t_maxmed_onstartup;
uint8_t maxmed_onstartup_over; /* Flag to make it effective only once */
bool v_maxmed_admin; /* true/false if max-med administrative is on/off
@ -428,9 +429,9 @@ struct bgp {
uint32_t maxmed_value; /* Max-med value when its active */
/* BGP update delay on startup */
struct thread *t_update_delay;
struct thread *t_establish_wait;
struct thread *t_revalidate[AFI_MAX][SAFI_MAX];
struct event *t_update_delay;
struct event *t_establish_wait;
struct event *t_revalidate[AFI_MAX][SAFI_MAX];
uint8_t update_delay_over;
uint8_t main_zebra_update_hold;
@ -590,7 +591,7 @@ struct bgp {
struct hash *pbr_action_hash;
/* timer to re-evaluate neighbor default-originate route-maps */
struct thread *t_rmap_def_originate_eval;
struct event *t_rmap_def_originate_eval;
#define RMAP_DEFAULT_ORIGINATE_EVAL_TIMER 5
/* BGP distance configuration. */
@ -769,7 +770,7 @@ struct bgp {
/* BGP Conditional advertisement */
uint32_t condition_check_period;
uint32_t condition_filter_count;
struct thread *t_condition_check;
struct event *t_condition_check;
/* BGP VPN SRv6 backend */
bool srv6_enabled;
@ -978,7 +979,7 @@ struct peer_af {
/*
* Trigger timer for bgp_announce_route().
*/
struct thread *t_announce_route;
struct event *t_announce_route;
afi_t afi;
safi_t safi;
@ -1509,24 +1510,24 @@ struct peer {
_Atomic uint32_t v_gr_restart;
/* Threads. */
struct thread *t_read;
struct thread *t_write;
struct thread *t_start;
struct thread *t_connect_check_r;
struct thread *t_connect_check_w;
struct thread *t_connect;
struct thread *t_holdtime;
struct thread *t_routeadv;
struct thread *t_delayopen;
struct thread *t_pmax_restart;
struct thread *t_gr_restart;
struct thread *t_gr_stale;
struct thread *t_llgr_stale[AFI_MAX][SAFI_MAX];
struct thread *t_revalidate_all[AFI_MAX][SAFI_MAX];
struct thread *t_generate_updgrp_packets;
struct thread *t_process_packet;
struct thread *t_process_packet_error;
struct thread *t_refresh_stalepath;
struct event *t_read;
struct event *t_write;
struct event *t_start;
struct event *t_connect_check_r;
struct event *t_connect_check_w;
struct event *t_connect;
struct event *t_holdtime;
struct event *t_routeadv;
struct event *t_delayopen;
struct event *t_pmax_restart;
struct event *t_gr_restart;
struct event *t_gr_stale;
struct event *t_llgr_stale[AFI_MAX][SAFI_MAX];
struct event *t_revalidate_all[AFI_MAX][SAFI_MAX];
struct event *t_generate_updgrp_packets;
struct event *t_process_packet;
struct event *t_process_packet_error;
struct event *t_refresh_stalepath;
/* Thread flags. */
_Atomic uint32_t thread_flags;
@ -2155,7 +2156,7 @@ extern char *peer_uptime(time_t uptime2, char *buf, size_t len, bool use_json,
extern int bgp_config_write(struct vty *);
extern void bgp_master_init(struct thread_master *master, const int buffer_size,
extern void bgp_master_init(struct event_loop *master, const int buffer_size,
struct list *addresses);
extern void bgp_init(unsigned short instance);
@ -2363,7 +2364,7 @@ extern int peer_ttl_security_hops_unset(struct peer *);
extern void peer_tx_shutdown_message_set(struct peer *, const char *msg);
extern void peer_tx_shutdown_message_unset(struct peer *);
extern void bgp_route_map_update_timer(struct thread *thread);
extern void bgp_route_map_update_timer(struct event *thread);
extern const char *bgp_get_name_by_role(uint8_t role);
extern enum asnotation_mode bgp_get_asnotation(struct bgp *bgp);

View File

@ -335,8 +335,7 @@ struct rfapi_rfp_cfg {
* return value:
* rfp_start_val rfp returned value passed on rfp_stop and other rfapi calls
--------------------------------------------*/
extern void *rfp_start(struct thread_master *master,
struct rfapi_rfp_cfg **cfgp,
extern void *rfp_start(struct event_loop *master, struct rfapi_rfp_cfg **cfgp,
struct rfapi_rfp_cb_methods **cbmp);
/*------------------------------------------

View File

@ -14,7 +14,7 @@
#include "bgpd/bgp_nexthop.h"
extern void rfapi_init(void);
extern void vnc_zebra_init(struct thread_master *master);
extern void vnc_zebra_init(struct event_loop *master);
extern void vnc_zebra_destroy(void);
extern void rfapi_delete(struct bgp *);

View File

@ -15,7 +15,7 @@
#include "lib/memory.h"
#include "lib/log.h"
#include "lib/skiplist.h"
#include "lib/thread.h"
#include "frrevent.h"
#include "lib/stream.h"
#include "lib/lib_errors.h"
@ -848,10 +848,10 @@ static void rfapiBgpInfoChainFree(struct bgp_path_info *bpi)
if (CHECK_FLAG(bpi->flags, BGP_PATH_REMOVED)
&& bpi->extra->vnc.import.timer) {
struct rfapi_withdraw *wcb =
THREAD_ARG(bpi->extra->vnc.import.timer);
EVENT_ARG(bpi->extra->vnc.import.timer);
XFREE(MTYPE_RFAPI_WITHDRAW, wcb);
THREAD_OFF(bpi->extra->vnc.import.timer);
EVENT_OFF(bpi->extra->vnc.import.timer);
}
next = bpi->next;
@ -2345,9 +2345,9 @@ static void rfapiMonitorEncapDelete(struct bgp_path_info *vpn_bpi)
/*
* Timer callback for withdraw
*/
static void rfapiWithdrawTimerVPN(struct thread *t)
static void rfapiWithdrawTimerVPN(struct event *t)
{
struct rfapi_withdraw *wcb = THREAD_ARG(t);
struct rfapi_withdraw *wcb = EVENT_ARG(t);
struct bgp_path_info *bpi = wcb->info;
struct bgp *bgp = bgp_get_default();
const struct prefix *p;
@ -2654,9 +2654,9 @@ rfapiWithdrawEncapUpdateCachedUn(struct rfapi_import_table *import_table,
return 0;
}
static void rfapiWithdrawTimerEncap(struct thread *t)
static void rfapiWithdrawTimerEncap(struct event *t)
{
struct rfapi_withdraw *wcb = THREAD_ARG(t);
struct rfapi_withdraw *wcb = EVENT_ARG(t);
struct bgp_path_info *bpi = wcb->info;
int was_first_route = 0;
struct rfapi_monitor_encap *em;
@ -2739,7 +2739,7 @@ static void
rfapiBiStartWithdrawTimer(struct rfapi_import_table *import_table,
struct agg_node *rn, struct bgp_path_info *bpi,
afi_t afi, safi_t safi,
void (*timer_service_func)(struct thread *))
void (*timer_service_func)(struct event *))
{
uint32_t lifetime;
struct rfapi_withdraw *wcb;
@ -2789,8 +2789,8 @@ rfapiBiStartWithdrawTimer(struct rfapi_import_table *import_table,
if (lifetime > UINT32_MAX / 1001) {
/* sub-optimal case, but will probably never happen */
bpi->extra->vnc.import.timer = NULL;
thread_add_timer(bm->master, timer_service_func, wcb, lifetime,
&bpi->extra->vnc.import.timer);
event_add_timer(bm->master, timer_service_func, wcb, lifetime,
&bpi->extra->vnc.import.timer);
} else {
static uint32_t jitter;
uint32_t lifetime_msec;
@ -2805,9 +2805,9 @@ rfapiBiStartWithdrawTimer(struct rfapi_import_table *import_table,
lifetime_msec = (lifetime * 1000) + jitter;
bpi->extra->vnc.import.timer = NULL;
thread_add_timer_msec(bm->master, timer_service_func, wcb,
lifetime_msec,
&bpi->extra->vnc.import.timer);
event_add_timer_msec(bm->master, timer_service_func, wcb,
lifetime_msec,
&bpi->extra->vnc.import.timer);
}
/* re-sort route list (BGP_PATH_REMOVED routes are last) */
@ -2831,7 +2831,7 @@ static void rfapiExpireEncapNow(struct rfapi_import_table *it,
struct agg_node *rn, struct bgp_path_info *bpi)
{
struct rfapi_withdraw *wcb;
struct thread t;
struct event t;
/*
* pretend we're an expiring timer
@ -3076,12 +3076,11 @@ static void rfapiBgpInfoFilteredImportEncap(
*/
if (CHECK_FLAG(bpi->flags, BGP_PATH_REMOVED)
&& bpi->extra->vnc.import.timer) {
struct rfapi_withdraw *wcb = THREAD_ARG(
struct rfapi_withdraw *wcb = EVENT_ARG(
bpi->extra->vnc.import.timer);
XFREE(MTYPE_RFAPI_WITHDRAW, wcb);
THREAD_OFF(
bpi->extra->vnc.import.timer);
EVENT_OFF(bpi->extra->vnc.import.timer);
}
if (action == FIF_ACTION_UPDATE) {
@ -3094,7 +3093,7 @@ static void rfapiBgpInfoFilteredImportEncap(
* bpi
*/
struct rfapi_withdraw *wcb;
struct thread t;
struct event t;
/*
* pretend we're an expiring timer
@ -3169,10 +3168,10 @@ static void rfapiBgpInfoFilteredImportEncap(
__func__);
if (bpi->extra->vnc.import.timer) {
struct rfapi_withdraw *wcb =
THREAD_ARG(bpi->extra->vnc.import.timer);
EVENT_ARG(bpi->extra->vnc.import.timer);
XFREE(MTYPE_RFAPI_WITHDRAW, wcb);
THREAD_OFF(bpi->extra->vnc.import.timer);
EVENT_OFF(bpi->extra->vnc.import.timer);
}
rfapiExpireEncapNow(import_table, rn, bpi);
}
@ -3305,7 +3304,7 @@ static void rfapiExpireVpnNow(struct rfapi_import_table *it,
int lockoffset)
{
struct rfapi_withdraw *wcb;
struct thread t;
struct event t;
/*
* pretend we're an expiring timer
@ -3529,12 +3528,11 @@ void rfapiBgpInfoFilteredImportVPN(
*/
if (CHECK_FLAG(bpi->flags, BGP_PATH_REMOVED)
&& bpi->extra->vnc.import.timer) {
struct rfapi_withdraw *wcb = THREAD_ARG(
struct rfapi_withdraw *wcb = EVENT_ARG(
bpi->extra->vnc.import.timer);
XFREE(MTYPE_RFAPI_WITHDRAW, wcb);
THREAD_OFF(
bpi->extra->vnc.import.timer);
EVENT_OFF(bpi->extra->vnc.import.timer);
import_table->holddown_count[afi] -= 1;
RFAPI_UPDATE_ITABLE_COUNT(
@ -3748,10 +3746,10 @@ void rfapiBgpInfoFilteredImportVPN(
__func__);
if (bpi->extra->vnc.import.timer) {
struct rfapi_withdraw *wcb =
THREAD_ARG(bpi->extra->vnc.import.timer);
EVENT_ARG(bpi->extra->vnc.import.timer);
XFREE(MTYPE_RFAPI_WITHDRAW, wcb);
THREAD_OFF(bpi->extra->vnc.import.timer);
EVENT_OFF(bpi->extra->vnc.import.timer);
}
rfapiExpireVpnNow(import_table, rn, bpi, 0);
}
@ -4046,7 +4044,7 @@ static void rfapiProcessPeerDownRt(struct peer *peer,
struct agg_node *rn;
struct bgp_path_info *bpi;
struct agg_table *rt = NULL;
void (*timer_service_func)(struct thread *) = NULL;
void (*timer_service_func)(struct event *) = NULL;
assert(afi == AFI_IP || afi == AFI_IP6);
@ -4485,7 +4483,7 @@ static void rfapiDeleteRemotePrefixesIt(
continue;
if (bpi->extra->vnc.import.timer) {
struct rfapi_withdraw *wcb =
THREAD_ARG(
EVENT_ARG(
bpi->extra->vnc
.import
.timer);
@ -4498,9 +4496,8 @@ static void rfapiDeleteRemotePrefixesIt(
afi, 1);
XFREE(MTYPE_RFAPI_WITHDRAW,
wcb);
THREAD_OFF(
bpi->extra->vnc.import
.timer);
EVENT_OFF(bpi->extra->vnc.import
.timer);
}
} else {
if (!delete_active)

View File

@ -13,7 +13,7 @@
#ifndef QUAGGA_HGP_RFAPI_IMPORT_H
#define QUAGGA_HGP_RFAPI_IMPORT_H
#include "lib/thread.h"
#include "frrevent.h"
/*
* These are per-rt-import-list

View File

@ -619,7 +619,7 @@ void rfapiMonitorDel(struct bgp *bgp, struct rfapi_descriptor *rfd,
rfapiMonitorDetachImport(m);
}
THREAD_OFF(m->timer);
EVENT_OFF(m->timer);
/*
* remove from rfd list
@ -656,7 +656,7 @@ int rfapiMonitorDelHd(struct rfapi_descriptor *rfd)
rfapiMonitorDetachImport(m);
}
THREAD_OFF(m->timer);
EVENT_OFF(m->timer);
XFREE(MTYPE_RFAPI_MONITOR, m);
rn->info = NULL;
@ -690,7 +690,7 @@ int rfapiMonitorDelHd(struct rfapi_descriptor *rfd)
#endif
}
THREAD_OFF(mon_eth->timer);
EVENT_OFF(mon_eth->timer);
/*
* remove from rfd list
@ -730,9 +730,9 @@ void rfapiMonitorResponseRemovalOn(struct bgp *bgp)
bgp->rfapi_cfg->flags &= ~BGP_VNC_CONFIG_RESPONSE_REMOVAL_DISABLE;
}
static void rfapiMonitorTimerExpire(struct thread *t)
static void rfapiMonitorTimerExpire(struct event *t)
{
struct rfapi_monitor_vpn *m = THREAD_ARG(t);
struct rfapi_monitor_vpn *m = EVENT_ARG(t);
/* forget reference to thread, it's gone */
m->timer = NULL;
@ -743,7 +743,7 @@ static void rfapiMonitorTimerExpire(struct thread *t)
static void rfapiMonitorTimerRestart(struct rfapi_monitor_vpn *m)
{
unsigned long remain = thread_timer_remain_second(m->timer);
unsigned long remain = event_timer_remain_second(m->timer);
/* unexpected case, but avoid wraparound problems below */
if (remain > m->rfd->response_lifetime)
@ -753,7 +753,7 @@ static void rfapiMonitorTimerRestart(struct rfapi_monitor_vpn *m)
if (m->rfd->response_lifetime - remain < 2)
return;
THREAD_OFF(m->timer);
EVENT_OFF(m->timer);
{
char buf[BUFSIZ];
@ -764,8 +764,8 @@ static void rfapiMonitorTimerRestart(struct rfapi_monitor_vpn *m)
m->rfd->response_lifetime);
}
thread_add_timer(bm->master, rfapiMonitorTimerExpire, m,
m->rfd->response_lifetime, &m->timer);
event_add_timer(bm->master, rfapiMonitorTimerExpire, m,
m->rfd->response_lifetime, &m->timer);
}
/*
@ -1036,9 +1036,9 @@ void rfapiMonitorMovedUp(struct rfapi_import_table *import_table,
}
}
static void rfapiMonitorEthTimerExpire(struct thread *t)
static void rfapiMonitorEthTimerExpire(struct event *t)
{
struct rfapi_monitor_eth *m = THREAD_ARG(t);
struct rfapi_monitor_eth *m = EVENT_ARG(t);
/* forget reference to thread, it's gone */
m->timer = NULL;
@ -1051,7 +1051,7 @@ static void rfapiMonitorEthTimerExpire(struct thread *t)
static void rfapiMonitorEthTimerRestart(struct rfapi_monitor_eth *m)
{
unsigned long remain = thread_timer_remain_second(m->timer);
unsigned long remain = event_timer_remain_second(m->timer);
/* unexpected case, but avoid wraparound problems below */
if (remain > m->rfd->response_lifetime)
@ -1061,7 +1061,7 @@ static void rfapiMonitorEthTimerRestart(struct rfapi_monitor_eth *m)
if (m->rfd->response_lifetime - remain < 2)
return;
THREAD_OFF(m->timer);
EVENT_OFF(m->timer);
{
char buf[BUFSIZ];
@ -1072,8 +1072,8 @@ static void rfapiMonitorEthTimerRestart(struct rfapi_monitor_eth *m)
m->rfd->response_lifetime);
}
thread_add_timer(bm->master, rfapiMonitorEthTimerExpire, m,
m->rfd->response_lifetime, &m->timer);
event_add_timer(bm->master, rfapiMonitorEthTimerExpire, m,
m->rfd->response_lifetime, &m->timer);
}
static int mon_eth_cmp(const void *a, const void *b)
@ -1399,7 +1399,7 @@ void rfapiMonitorEthDel(struct bgp *bgp, struct rfapi_descriptor *rfd,
rfapiMonitorEthDetachImport(bgp, val);
}
THREAD_OFF(val->timer);
EVENT_OFF(val->timer);
/*
* remove from rfd list

View File

@ -25,7 +25,7 @@ struct rfapi_monitor_vpn {
#define RFAPI_MON_FLAG_NEEDCALLBACK 0x00000001 /* deferred callback */
// int dcount; /* debugging counter */
struct thread *timer;
struct event *timer;
};
struct rfapi_monitor_encap {
@ -41,7 +41,7 @@ struct rfapi_monitor_eth {
struct rfapi_descriptor *rfd; /* which NVE requested the route */
struct ethaddr macaddr;
uint32_t logical_net_id;
struct thread *timer;
struct event *timer;
};
/*

View File

@ -255,8 +255,8 @@ static void rfapi_info_free(struct rfapi_info *goner)
if (goner->timer) {
struct rfapi_rib_tcb *tcb;
tcb = THREAD_ARG(goner->timer);
THREAD_OFF(goner->timer);
tcb = EVENT_ARG(goner->timer);
EVENT_OFF(goner->timer);
XFREE(MTYPE_RFAPI_RECENT_DELETE, tcb);
}
XFREE(MTYPE_RFAPI_INFO, goner);
@ -278,9 +278,9 @@ struct rfapi_rib_tcb {
/*
* remove route from rib
*/
static void rfapiRibExpireTimer(struct thread *t)
static void rfapiRibExpireTimer(struct event *t)
{
struct rfapi_rib_tcb *tcb = THREAD_ARG(t);
struct rfapi_rib_tcb *tcb = EVENT_ARG(t);
RFAPI_RIB_CHECK_COUNTS(1, 0);
@ -325,8 +325,8 @@ static void rfapiRibStartTimer(struct rfapi_descriptor *rfd,
struct rfapi_rib_tcb *tcb = NULL;
if (ri->timer) {
tcb = THREAD_ARG(ri->timer);
THREAD_OFF(ri->timer);
tcb = EVENT_ARG(ri->timer);
EVENT_OFF(ri->timer);
} else {
tcb = XCALLOC(MTYPE_RFAPI_RECENT_DELETE,
sizeof(struct rfapi_rib_tcb));
@ -345,8 +345,8 @@ static void rfapiRibStartTimer(struct rfapi_descriptor *rfd,
vnc_zlog_debug_verbose("%s: rfd %p pfx %pRN life %u", __func__, rfd, rn,
ri->lifetime);
thread_add_timer(bm->master, rfapiRibExpireTimer, tcb, ri->lifetime,
&ri->timer);
event_add_timer(bm->master, rfapiRibExpireTimer, tcb, ri->lifetime,
&ri->timer);
}
extern void rfapi_rib_key_init(struct prefix *prefix, /* may be NULL */
@ -900,8 +900,8 @@ static void process_pending_node(struct bgp *bgp, struct rfapi_descriptor *rfd,
if (ri->timer) {
struct rfapi_rib_tcb *tcb;
tcb = THREAD_ARG(ri->timer);
THREAD_OFF(ri->timer);
tcb = EVENT_ARG(ri->timer);
EVENT_OFF(ri->timer);
XFREE(MTYPE_RFAPI_RECENT_DELETE, tcb);
}
@ -985,8 +985,8 @@ static void process_pending_node(struct bgp *bgp, struct rfapi_descriptor *rfd,
if (ori->timer) {
struct rfapi_rib_tcb *tcb;
tcb = THREAD_ARG(ori->timer);
THREAD_OFF(ori->timer);
tcb = EVENT_ARG(ori->timer);
EVENT_OFF(ori->timer);
XFREE(MTYPE_RFAPI_RECENT_DELETE, tcb);
}
@ -1319,8 +1319,8 @@ callback:
if (ri->timer) {
struct rfapi_rib_tcb *tcb;
tcb = THREAD_ARG(ri->timer);
THREAD_OFF(ri->timer);
tcb = EVENT_ARG(ri->timer);
EVENT_OFF(ri->timer);
XFREE(MTYPE_RFAPI_RECENT_DELETE, tcb);
}
RFAPI_RIB_CHECK_COUNTS(0, delete_list->count);

View File

@ -61,7 +61,7 @@ struct rfapi_info {
struct bgp_tea_options *tea_options;
struct rfapi_un_option *un_options;
struct rfapi_vn_option *vn_options;
struct thread *timer;
struct event *timer;
};
/*

View File

@ -516,10 +516,10 @@ void rfapiPrintBi(void *stream, struct bgp_path_info *bpi)
if (CHECK_FLAG(bpi->flags, BGP_PATH_REMOVED) && bpi->extra
&& bpi->extra->vnc.import.timer) {
struct thread *t =
(struct thread *)bpi->extra->vnc.import.timer;
struct event *t = (struct event *)bpi->extra->vnc.import.timer;
r = snprintf(p, REMAIN, " [%4lu] ",
thread_timer_remain_second(t));
event_timer_remain_second(t));
INCP;
} else {
@ -910,7 +910,7 @@ int rfapiShowVncQueries(void *stream, struct prefix *pfx_match)
fp(out, "%-15s %-15s", "", "");
buf_remain[0] = 0;
rfapiFormatSeconds(
thread_timer_remain_second(m->timer),
event_timer_remain_second(m->timer),
buf_remain, BUFSIZ);
fp(out, " %-15s %-10s\n",
inet_ntop(m->p.family, &m->p.u.prefix,
@ -983,7 +983,7 @@ int rfapiShowVncQueries(void *stream, struct prefix *pfx_match)
} else
fp(out, "%-15s %-15s", "", "");
buf_remain[0] = 0;
rfapiFormatSeconds(thread_timer_remain_second(
rfapiFormatSeconds(event_timer_remain_second(
mon_eth->timer),
buf_remain, BUFSIZ);
fp(out, " %-17s %10d %-10s\n",
@ -1114,9 +1114,8 @@ static int rfapiPrintRemoteRegBi(struct bgp *bgp, void *stream,
time_t age;
char buf_age[BUFSIZ];
struct thread *t =
(struct thread *)bpi->extra->vnc.import.timer;
remaining = thread_timer_remain_second(t);
struct event *t = (struct event *)bpi->extra->vnc.import.timer;
remaining = event_timer_remain_second(t);
#ifdef RFAPI_REGISTRATIONS_REPORT_AGE
/*
@ -1174,6 +1173,7 @@ static int rfapiPrintRemoteRegBi(struct bgp *bgp, void *stream,
}
if (tun_type != BGP_ENCAP_TYPE_MPLS && bpi->extra) {
uint32_t l = decode_label(&bpi->extra->label[0]);
if (!MPLS_LABEL_IS_NULL(l)) {
fp(out, " Label: %d", l);
if (nlines == 1)

View File

@ -1692,7 +1692,7 @@ void vnc_direct_bgp_rh_add_route(struct bgp *bgp, afi_t afi,
* export expiration timer is already running on
* this route: cancel it
*/
THREAD_OFF(eti->timer);
EVENT_OFF(eti->timer);
bgp_update(peer, prefix, /* prefix */
0, /* addpath_id */
@ -1704,9 +1704,9 @@ void vnc_direct_bgp_rh_add_route(struct bgp *bgp, afi_t afi,
bgp_attr_unintern(&iattr);
}
static void vncExportWithdrawTimer(struct thread *t)
static void vncExportWithdrawTimer(struct event *t)
{
struct vnc_export_info *eti = THREAD_ARG(t);
struct vnc_export_info *eti = EVENT_ARG(t);
const struct prefix *p = agg_node_get_prefix(eti->node);
/*
@ -1765,8 +1765,8 @@ void vnc_direct_bgp_rh_del_route(struct bgp *bgp, afi_t afi,
if (!eti->timer && eti->lifetime <= INT32_MAX) {
eti->timer = NULL;
thread_add_timer(bm->master, vncExportWithdrawTimer, eti,
eti->lifetime, &eti->timer);
event_add_timer(bm->master, vncExportWithdrawTimer, eti,
eti->lifetime, &eti->timer);
vnc_zlog_debug_verbose(
"%s: set expiration timer for %u seconds", __func__,
eti->lifetime);
@ -1922,7 +1922,7 @@ void vnc_direct_bgp_rh_vpn_enable(struct bgp *bgp, afi_t afi)
* already running on
* this route: cancel it
*/
THREAD_OFF(eti->timer);
EVENT_OFF(eti->timer);
vnc_zlog_debug_verbose(
"%s: calling bgp_update",
@ -1991,7 +1991,7 @@ void vnc_direct_bgp_rh_vpn_disable(struct bgp *bgp, afi_t afi)
ZEBRA_ROUTE_VNC_DIRECT_RH,
BGP_ROUTE_REDISTRIBUTE);
if (eti) {
THREAD_OFF(eti->timer);
EVENT_OFF(eti->timer);
vnc_eti_delete(eti);
}

View File

@ -9,7 +9,7 @@
#define _QUAGGA_VNC_VNC_EXPORT_TABLE_H_
#include "lib/table.h"
#include "lib/thread.h"
#include "frrevent.h"
#include "lib/vty.h"
#include "bgpd/bgpd.h"
@ -29,7 +29,7 @@ struct vnc_export_info {
uint8_t type;
uint8_t subtype;
uint32_t lifetime;
struct thread *timer;
struct event *timer;
};
extern struct agg_node *vnc_etn_get(struct bgp *bgp, vnc_export_type_t type,

View File

@ -890,7 +890,7 @@ static zclient_handler *const vnc_handlers[] = {
* Modeled after bgp_zebra.c'bgp_zebra_init()
* Charriere asks, "Is it possible to carry two?"
*/
void vnc_zebra_init(struct thread_master *master)
void vnc_zebra_init(struct event_loop *master)
{
/* Set default values. */
zclient_vnc = zclient_new(master, &zclient_options_default,

View File

@ -17,7 +17,7 @@
struct rfp_instance_t {
struct rfapi_rfp_cfg rfapi_config;
struct rfapi_rfp_cb_methods rfapi_callbacks;
struct thread_master *master;
struct event_loop *master;
uint32_t config_var;
};
@ -271,7 +271,7 @@ static int rfp_cfg_write_cb(struct vty *vty, void *rfp_start_val)
* rfp_start_val rfp returned value passed on rfp_stop and rfp_cfg_write
*
--------------------------------------------*/
void *rfp_start(struct thread_master *master, struct rfapi_rfp_cfg **cfgp,
void *rfp_start(struct event_loop *master, struct rfapi_rfp_cfg **cfgp,
struct rfapi_rfp_cb_methods **cbmp)
{
memset(&global_rfi, 0, sizeof(global_rfi));

View File

@ -335,16 +335,16 @@ Time/interval formats
FRR library helper formats
^^^^^^^^^^^^^^^^^^^^^^^^^^
.. frrfmt:: %pTH (struct thread *)
.. frrfmt:: %pTH (struct event *)
Print remaining time on timer thread. Interval-printing flag characters
Print remaining time on timer event. Interval-printing flag characters
listed above for ``%pTV`` can be added, e.g. ``%pTHtx``.
``NULL`` pointers are printed as ``-``.
.. frrfmt:: %pTHD (struct thread *)
.. frrfmt:: %pTHD (struct event *)
Print debugging information for given thread. Sample output:
Print debugging information for given event. Sample output:
.. code-block:: none

View File

@ -56,9 +56,9 @@ Basic boilerplate:
#include "hook.h"
#include "module.h"
#include "libfrr.h"
#include "thread.h"
#include "frrevent.h"
static int module_late_init(struct thread_master *master)
static int module_late_init(struct event_loop *master)
{
/* Do initialization stuff here */
return 0;

View File

@ -182,7 +182,7 @@ The controller is defined and implemented in `path_pcep_controller.[hc]`.
Part of the controller code runs in FRR main thread and part runs in its own
FRR pthread started to isolate the main thread from the PCCs' event loop.
To communicate between the threads it uses FRR events, timers and
`thread_execute` calls.
`event_execute` calls.
PCC

View File

@ -28,7 +28,7 @@ within the event system are variations on the term "thread". The primary
datastructure that holds the state of an event loop in this system is called a
"threadmaster". Events scheduled on the event loop - what would today be called
an 'event' or 'task' in systems such as libevent - are called "threads" and the
datastructure for them is ``struct thread``. To add to the confusion, these
datastructure for them is ``struct event``. To add to the confusion, these
"threads" have various types, one of which is "event". To hopefully avoid some
of this confusion, this document refers to these "threads" as a 'task' except
where the datastructures are explicitly named. When they are explicitly named,
@ -47,7 +47,7 @@ section. For now it provides basic information necessary to understand the
interplay between the event system and kernel threads.
The core event system is implemented in :file:`lib/thread.[ch]`. The primary
structure is ``struct thread_master``, hereafter referred to as a
structure is ``struct event_loop``, hereafter referred to as a
``threadmaster``. A ``threadmaster`` is a global state object, or context, that
holds all the tasks currently pending execution as well as statistics on tasks
that have already executed. The event system is driven by adding tasks to this
@ -57,7 +57,7 @@ execute. At initialization, a daemon will typically create one
fetch each task and execute it.
These tasks have various types corresponding to their general action. The types
are given by integer macros in :file:`thread.h` and are:
are given by integer macros in :file:`event.h` and are:
``THREAD_READ``
Task which waits for a file descriptor to become ready for reading and then
@ -80,8 +80,8 @@ are given by integer macros in :file:`thread.h` and are:
Type used internally for tasks on the ready queue.
``THREAD_UNUSED``
Type used internally for ``struct thread`` objects that aren't being used.
The event system pools ``struct thread`` to avoid heap allocations; this is
Type used internally for ``struct event`` objects that aren't being used.
The event system pools ``struct event`` to avoid heap allocations; this is
the type they have when they're in the pool.
``THREAD_EXECUTE``
@ -95,9 +95,9 @@ irrelevant for the time being) for the specific type. For example, to add a
::
thread_add_read(struct thread_master *master, int (*handler)(struct thread *), void *arg, int fd, struct thread **ref);
event_add_read(struct event_loop *master, int (*handler)(struct event *), void *arg, int fd, struct event **ref);
The ``struct thread`` is then created and added to the appropriate internal
The ``struct event`` is then created and added to the appropriate internal
datastructure within the ``threadmaster``. Note that the ``READ`` and
``WRITE`` tasks are independent - a ``READ`` task only tests for
readability, for example.
@ -111,13 +111,13 @@ program. When no more tasks are available, the program dies. Typically at
startup the first task added is an I/O task for VTYSH as well as any network
sockets needed for peerings or IPC.
To retrieve the next task to run the program calls ``thread_fetch()``.
``thread_fetch()`` internally computes which task to execute next based on
To retrieve the next task to run the program calls ``event_fetch()``.
``event_fetch()`` internally computes which task to execute next based on
rudimentary priority logic. Events (type ``THREAD_EVENT``) execute with the
highest priority, followed by expired timers and finally I/O tasks (type
``THREAD_READ`` and ``THREAD_WRITE``). When scheduling a task a function and an
arbitrary argument are provided. The task returned from ``thread_fetch()`` is
then executed with ``thread_call()``.
arbitrary argument are provided. The task returned from ``event_fetch()`` is
then executed with ``event_call()``.
The following diagram illustrates a simplified version of this infrastructure.
@ -133,18 +133,18 @@ illustrated at the bottom.
Mapping the general names used in the figure to specific FRR functions:
- ``task`` is ``struct thread *``
- ``fetch`` is ``thread_fetch()``
- ``exec()`` is ``thread_call``
- ``cancel()`` is ``thread_cancel()``
- ``schedule()`` is any of the various task-specific ``thread_add_*`` functions
- ``task`` is ``struct event *``
- ``fetch`` is ``event_fetch()``
- ``exec()`` is ``event_call``
- ``cancel()`` is ``event_cancel()``
- ``schedule()`` is any of the various task-specific ``event_add_*`` functions
Adding tasks is done with various task-specific function-like macros. These
macros wrap underlying functions in :file:`thread.c` to provide additional
information added at compile time, such as the line number the task was
scheduled from, that can be accessed at runtime for debugging, logging and
informational purposes. Each task type has its own specific scheduling function
that follow the naming convention ``thread_add_<type>``; see :file:`thread.h`
that follow the naming convention ``event_add_<type>``; see :file:`event.h`
for details.
There are some gotchas to keep in mind:
@ -228,7 +228,7 @@ well as *any other pthread*. This serves as the basis for inter-thread
communication and boils down to a slightly more complicated method of message
passing, where the messages are the regular task events as used in the
event-driven model. The only difference is thread cancellation, which requires
calling ``thread_cancel_async()`` instead of ``thread_cancel`` to cancel a task
calling ``event_cancel_async()`` instead of ``event_cancel`` to cancel a task
currently scheduled on a ``threadmaster`` belonging to a different pthread.
This is necessary to avoid race conditions in the specific case where one
pthread wants to guarantee that a task on another pthread is cancelled before

View File

@ -120,7 +120,7 @@ atomic ops & datastructures with other types of locking, e.g. rwlocks.
The ``thread_master`` code currently always holds RCU everywhere, except
while doing the actual ``poll()`` syscall. This is both an optimization as
well as an "easement" into getting RCU going. The current implementation
contract is that any ``struct thread *`` callback is called with a RCU
contract is that any ``struct event *`` callback is called with a RCU
holding depth of 1, and that this is owned by the thread so it may (should)
drop and reacquire it when doing some longer-running work.

View File

@ -150,8 +150,8 @@ Example::
frr_libfrr:frr_pthread_stop (loglevel: TRACE_DEBUG_LINE (13)) (type: tracepoint)
frr_libfrr:frr_pthread_run (loglevel: TRACE_DEBUG_LINE (13)) (type: tracepoint)
frr_libfrr:thread_call (loglevel: TRACE_INFO (6)) (type: tracepoint)
frr_libfrr:thread_cancel_async (loglevel: TRACE_INFO (6)) (type: tracepoint)
frr_libfrr:thread_cancel (loglevel: TRACE_INFO (6)) (type: tracepoint)
frr_libfrr:event_cancel_async (loglevel: TRACE_INFO (6)) (type: tracepoint)
frr_libfrr:event_cancel (loglevel: TRACE_INFO (6)) (type: tracepoint)
frr_libfrr:schedule_write (loglevel: TRACE_INFO (6)) (type: tracepoint)
frr_libfrr:schedule_read (loglevel: TRACE_INFO (6)) (type: tracepoint)
frr_libfrr:schedule_event (loglevel: TRACE_INFO (6)) (type: tracepoint)

View File

@ -13,7 +13,7 @@
#include <zebra.h>
#include "linklist.h"
#include "thread.h"
#include "frrevent.h"
#include "prefix.h"
#include "command.h"
#include "stream.h"
@ -192,7 +192,7 @@ void show_ip_eigrp_neighbor_sub(struct vty *vty, struct eigrp_neighbor *nbr,
vty_out(vty, "%-3u %-17pI4 %-21s", 0, &nbr->src, IF_NAME(nbr->ei));
if (nbr->t_holddown)
vty_out(vty, "%-7lu",
thread_timer_remain_second(nbr->t_holddown));
event_timer_remain_second(nbr->t_holddown));
else
vty_out(vty, "- ");
vty_out(vty, "%-8u %-6u %-5u", 0, 0, EIGRP_PACKET_RETRANS_TIME);

View File

@ -21,7 +21,7 @@
#include "command.h"
#include "prefix.h"
#include "table.h"
#include "thread.h"
#include "frrevent.h"
#include "memory.h"
#include "log.h"
#include "stream.h"
@ -111,11 +111,11 @@ void eigrp_distribute_update(struct distribute_ctx *ctx,
// TODO: check Graceful restart after 10sec
/* cancel GR scheduled */
thread_cancel(&(e->t_distribute));
event_cancel(&(e->t_distribute));
/* schedule Graceful restart for whole process in 10sec */
thread_add_timer(master, eigrp_distribute_timer_process, e,
(10), &e->t_distribute);
event_add_timer(master, eigrp_distribute_timer_process, e, (10),
&e->t_distribute);
return;
}
@ -186,10 +186,10 @@ void eigrp_distribute_update(struct distribute_ctx *ctx,
// TODO: check Graceful restart after 10sec
/* Cancel GR scheduled */
thread_cancel(&(ei->t_distribute));
event_cancel(&(ei->t_distribute));
/* schedule Graceful restart for interface in 10sec */
thread_add_timer(master, eigrp_distribute_timer_interface, ei, 10,
&ei->t_distribute);
event_add_timer(master, eigrp_distribute_timer_interface, ei, 10,
&ei->t_distribute);
}
/*
@ -242,11 +242,11 @@ void eigrp_distribute_update_all_wrapper(struct access_list *notused)
* Called when 10sec waiting time expire and
* executes Graceful restart for whole process
*/
void eigrp_distribute_timer_process(struct thread *thread)
void eigrp_distribute_timer_process(struct event *thread)
{
struct eigrp *eigrp;
eigrp = THREAD_ARG(thread);
eigrp = EVENT_ARG(thread);
/* execute GR for whole process */
eigrp_update_send_process_GR(eigrp, EIGRP_GR_FILTER, NULL);
@ -263,11 +263,11 @@ void eigrp_distribute_timer_process(struct thread *thread)
* Called when 10sec waiting time expire and
* executes Graceful restart for interface
*/
void eigrp_distribute_timer_interface(struct thread *thread)
void eigrp_distribute_timer_interface(struct event *thread)
{
struct eigrp_interface *ei;
ei = THREAD_ARG(thread);
ei = EVENT_ARG(thread);
ei->t_distribute = NULL;
/* execute GR for interface */

View File

@ -23,7 +23,7 @@ extern void eigrp_distribute_update(struct distribute_ctx *ctx,
extern void eigrp_distribute_update_interface(struct interface *ifp);
extern void eigrp_distribute_update_all(struct prefix_list *plist);
extern void eigrp_distribute_update_all_wrapper(struct access_list *alist);
extern void eigrp_distribute_timer_process(struct thread *thread);
extern void eigrp_distribute_timer_interface(struct thread *thread);
extern void eigrp_distribute_timer_process(struct event *thread);
extern void eigrp_distribute_timer_interface(struct event *thread);
#endif /* EIGRPD_EIGRP_FILTER_H_ */

View File

@ -53,8 +53,8 @@
*/
#include <zebra.h>
#include <thread.h>
#include "frrevent.h"
#include "prefix.h"
#include "table.h"
#include "memory.h"

View File

@ -16,7 +16,7 @@
#include <zebra.h>
#include "thread.h"
#include "frrevent.h"
#include "memory.h"
#include "linklist.h"
#include "prefix.h"
@ -66,11 +66,11 @@ static const struct message eigrp_general_tlv_type_str[] = {
* Sends hello packet via multicast for all interfaces eigrp
* is configured for
*/
void eigrp_hello_timer(struct thread *thread)
void eigrp_hello_timer(struct event *thread)
{
struct eigrp_interface *ei;
ei = THREAD_ARG(thread);
ei = EVENT_ARG(thread);
if (IS_DEBUG_EIGRP(0, TIMERS))
zlog_debug("Start Hello Timer (%s) Expire [%u]", IF_NAME(ei),
@ -80,8 +80,8 @@ void eigrp_hello_timer(struct thread *thread)
eigrp_hello_send(ei, EIGRP_HELLO_NORMAL, NULL);
/* Hello timer set. */
thread_add_timer(master, eigrp_hello_timer, ei, ei->params.v_hello,
&ei->t_hello);
event_add_timer(master, eigrp_hello_timer, ei, ei->params.v_hello,
&ei->t_hello);
}
/**
@ -726,8 +726,8 @@ void eigrp_hello_send_ack(struct eigrp_neighbor *nbr)
listnode_add(nbr->ei->eigrp->oi_write_q, nbr->ei);
nbr->ei->on_write_q = 1;
}
thread_add_write(master, eigrp_write, nbr->ei->eigrp,
nbr->ei->eigrp->fd, &nbr->ei->eigrp->t_write);
event_add_write(master, eigrp_write, nbr->ei->eigrp,
nbr->ei->eigrp->fd, &nbr->ei->eigrp->t_write);
}
}
@ -771,12 +771,12 @@ void eigrp_hello_send(struct eigrp_interface *ei, uint8_t flags,
if (ei->eigrp->t_write == NULL) {
if (flags & EIGRP_HELLO_GRACEFUL_SHUTDOWN) {
thread_execute(master, eigrp_write, ei->eigrp,
ei->eigrp->fd);
event_execute(master, eigrp_write, ei->eigrp,
ei->eigrp->fd);
} else {
thread_add_write(master, eigrp_write, ei->eigrp,
ei->eigrp->fd,
&ei->eigrp->t_write);
event_add_write(master, eigrp_write, ei->eigrp,
ei->eigrp->fd,
&ei->eigrp->t_write);
}
}
}

View File

@ -16,7 +16,7 @@
#include <zebra.h>
#include "thread.h"
#include "frrevent.h"
#include "linklist.h"
#include "prefix.h"
#include "if.h"
@ -251,7 +251,7 @@ int eigrp_if_up(struct eigrp_interface *ei)
/* Set multicast memberships appropriately for new state. */
eigrp_if_set_multicast(ei);
thread_add_event(master, eigrp_hello_timer, ei, (1), &ei->t_hello);
event_add_event(master, eigrp_hello_timer, ei, (1), &ei->t_hello);
/*Prepare metrics*/
metric.bandwidth = eigrp_bandwidth_to_scaled(ei->params.bandwidth);
@ -333,7 +333,7 @@ int eigrp_if_down(struct eigrp_interface *ei)
return 0;
/* Shutdown packet reception and sending */
THREAD_OFF(ei->t_hello);
EVENT_OFF(ei->t_hello);
eigrp_if_stream_unset(ei);
@ -360,7 +360,7 @@ void eigrp_if_stream_unset(struct eigrp_interface *ei)
if (ei->on_write_q) {
listnode_delete(eigrp->oi_write_q, ei);
if (list_isempty(eigrp->oi_write_q))
thread_cancel(&(eigrp->t_write));
event_cancel(&(eigrp->t_write));
ei->on_write_q = 0;
}
}
@ -422,7 +422,7 @@ void eigrp_if_free(struct eigrp_interface *ei, int source)
struct eigrp *eigrp = ei->eigrp;
if (source == INTERFACE_DOWN_BY_VTY) {
thread_cancel(&ei->t_hello);
event_cancel(&ei->t_hello);
eigrp_hello_send(ei, EIGRP_HELLO_GRACEFUL_SHUTDOWN, NULL);
}

View File

@ -27,6 +27,6 @@
/* FSM macros*/
#define EIGRP_FSM_EVENT_SCHEDULE(I, E) \
thread_add_event(master, eigrp_fsm_event, (I), (E))
event_add_event(master, eigrp_fsm_event, (I), (E))
#endif /* _ZEBRA_EIGRP_MACROS_H_ */

View File

@ -17,7 +17,7 @@
#include <lib/version.h>
#include "getopt.h"
#include "thread.h"
#include "frrevent.h"
#include "prefix.h"
#include "linklist.h"
#include "if.h"
@ -76,7 +76,7 @@ struct zebra_privs_t eigrpd_privs = {
struct option longopts[] = {{0}};
/* Master of threads. */
struct thread_master *master;
struct event_loop *master;
/* Forward declaration of daemon info structure. */
static struct frr_daemon_info eigrpd_di;

View File

@ -20,7 +20,7 @@
#include "prefix.h"
#include "memory.h"
#include "command.h"
#include "thread.h"
#include "frrevent.h"
#include "stream.h"
#include "table.h"
#include "log.h"
@ -164,19 +164,19 @@ void eigrp_nbr_delete(struct eigrp_neighbor *nbr)
eigrp_topology_neighbor_down(nbr->ei->eigrp, nbr);
/* Cancel all events. */ /* Thread lookup cost would be negligible. */
thread_cancel_event(master, nbr);
event_cancel_event(master, nbr);
eigrp_fifo_free(nbr->multicast_queue);
eigrp_fifo_free(nbr->retrans_queue);
THREAD_OFF(nbr->t_holddown);
EVENT_OFF(nbr->t_holddown);
if (nbr->ei)
listnode_delete(nbr->ei->nbrs, nbr);
XFREE(MTYPE_EIGRP_NEIGHBOR, nbr);
}
void holddown_timer_expired(struct thread *thread)
void holddown_timer_expired(struct event *thread)
{
struct eigrp_neighbor *nbr = THREAD_ARG(thread);
struct eigrp_neighbor *nbr = EVENT_ARG(thread);
struct eigrp *eigrp = nbr->ei->eigrp;
zlog_info("Neighbor %pI4 (%s) is down: holding time expired", &nbr->src,
@ -210,7 +210,7 @@ void eigrp_nbr_state_set(struct eigrp_neighbor *nbr, uint8_t state)
// hold time..
nbr->v_holddown = EIGRP_HOLD_INTERVAL_DEFAULT;
THREAD_OFF(nbr->t_holddown);
EVENT_OFF(nbr->t_holddown);
/* out with the old */
if (nbr->multicast_queue)
@ -252,24 +252,24 @@ void eigrp_nbr_state_update(struct eigrp_neighbor *nbr)
switch (nbr->state) {
case EIGRP_NEIGHBOR_DOWN: {
/*Start Hold Down Timer for neighbor*/
// THREAD_OFF(nbr->t_holddown);
// THREAD_TIMER_ON(master, nbr->t_holddown,
// EVENT_OFF(nbr->t_holddown);
// EVENT_TIMER_ON(master, nbr->t_holddown,
// holddown_timer_expired,
// nbr, nbr->v_holddown);
break;
}
case EIGRP_NEIGHBOR_PENDING: {
/*Reset Hold Down Timer for neighbor*/
THREAD_OFF(nbr->t_holddown);
thread_add_timer(master, holddown_timer_expired, nbr,
nbr->v_holddown, &nbr->t_holddown);
EVENT_OFF(nbr->t_holddown);
event_add_timer(master, holddown_timer_expired, nbr,
nbr->v_holddown, &nbr->t_holddown);
break;
}
case EIGRP_NEIGHBOR_UP: {
/*Reset Hold Down Timer for neighbor*/
THREAD_OFF(nbr->t_holddown);
thread_add_timer(master, holddown_timer_expired, nbr,
nbr->v_holddown, &nbr->t_holddown);
EVENT_OFF(nbr->t_holddown);
event_add_timer(master, holddown_timer_expired, nbr,
nbr->v_holddown, &nbr->t_holddown);
break;
}
}

View File

@ -24,7 +24,7 @@ extern struct eigrp_neighbor *eigrp_nbr_get(struct eigrp_interface *ei,
extern struct eigrp_neighbor *eigrp_nbr_new(struct eigrp_interface *ei);
extern void eigrp_nbr_delete(struct eigrp_neighbor *neigh);
extern void holddown_timer_expired(struct thread *thread);
extern void holddown_timer_expired(struct event *thread);
extern int eigrp_neighborship_check(struct eigrp_neighbor *neigh,
struct TLV_Parameter_Type *tlv);

View File

@ -12,7 +12,7 @@
#include <zebra.h>
#include "thread.h"
#include "frrevent.h"
#include "linklist.h"
#include "prefix.h"
#include "if.h"

View File

@ -20,7 +20,7 @@ extern int eigrp_if_ipmulticast(struct eigrp *, struct prefix *, unsigned int);
extern int eigrp_network_set(struct eigrp *eigrp, struct prefix *p);
extern int eigrp_network_unset(struct eigrp *eigrp, struct prefix *p);
extern void eigrp_hello_timer(struct thread *thread);
extern void eigrp_hello_timer(struct event *thread);
extern void eigrp_if_update(struct interface *);
extern int eigrp_if_add_allspfrouters(struct eigrp *, struct prefix *,
unsigned int);

View File

@ -12,7 +12,7 @@
#include <zebra.h>
#include "thread.h"
#include "frrevent.h"
#include "memory.h"
#include "linklist.h"
#include "vty.h"
@ -305,9 +305,9 @@ int eigrp_check_sha256_digest(struct stream *s,
return 1;
}
void eigrp_write(struct thread *thread)
void eigrp_write(struct event *thread)
{
struct eigrp *eigrp = THREAD_ARG(thread);
struct eigrp *eigrp = EVENT_ARG(thread);
struct eigrp_header *eigrph;
struct eigrp_interface *ei;
struct eigrp_packet *ep;
@ -453,13 +453,13 @@ out:
/* If packets still remain in queue, call write thread. */
if (!list_isempty(eigrp->oi_write_q)) {
thread_add_write(master, eigrp_write, eigrp, eigrp->fd,
&eigrp->t_write);
event_add_write(master, eigrp_write, eigrp, eigrp->fd,
&eigrp->t_write);
}
}
/* Starting point of packet process function. */
void eigrp_read(struct thread *thread)
void eigrp_read(struct event *thread)
{
int ret;
struct stream *ibuf;
@ -474,10 +474,10 @@ void eigrp_read(struct thread *thread)
uint16_t length = 0;
/* first of all get interface pointer. */
eigrp = THREAD_ARG(thread);
eigrp = EVENT_ARG(thread);
/* prepare for next packet. */
thread_add_read(master, eigrp_read, eigrp, eigrp->fd, &eigrp->t_read);
event_add_read(master, eigrp_read, eigrp, eigrp->fd, &eigrp->t_read);
stream_reset(eigrp->ibuf);
if (!(ibuf = eigrp_recv_packet(eigrp, eigrp->fd, &ifp, eigrp->ibuf))) {
@ -828,9 +828,9 @@ void eigrp_send_packet_reliably(struct eigrp_neighbor *nbr)
eigrp_fifo_push(nbr->ei->obuf, duplicate);
/*Start retransmission timer*/
thread_add_timer(master, eigrp_unack_packet_retrans, nbr,
EIGRP_PACKET_RETRANS_TIME,
&ep->t_retrans_timer);
event_add_timer(master, eigrp_unack_packet_retrans, nbr,
EIGRP_PACKET_RETRANS_TIME,
&ep->t_retrans_timer);
/*Increment sequence number counter*/
nbr->ei->eigrp->sequence_number++;
@ -840,8 +840,8 @@ void eigrp_send_packet_reliably(struct eigrp_neighbor *nbr)
listnode_add(nbr->ei->eigrp->oi_write_q, nbr->ei);
nbr->ei->on_write_q = 1;
}
thread_add_write(master, eigrp_write, nbr->ei->eigrp,
nbr->ei->eigrp->fd, &nbr->ei->eigrp->t_write);
event_add_write(master, eigrp_write, nbr->ei->eigrp,
nbr->ei->eigrp->fd, &nbr->ei->eigrp->t_write);
}
}
@ -923,7 +923,7 @@ void eigrp_packet_free(struct eigrp_packet *ep)
if (ep->s)
stream_free(ep->s);
THREAD_OFF(ep->t_retrans_timer);
EVENT_OFF(ep->t_retrans_timer);
XFREE(MTYPE_EIGRP_PACKET, ep);
}
@ -970,10 +970,10 @@ static int eigrp_check_network_mask(struct eigrp_interface *ei,
return 0;
}
void eigrp_unack_packet_retrans(struct thread *thread)
void eigrp_unack_packet_retrans(struct event *thread)
{
struct eigrp_neighbor *nbr;
nbr = (struct eigrp_neighbor *)THREAD_ARG(thread);
nbr = (struct eigrp_neighbor *)EVENT_ARG(thread);
struct eigrp_packet *ep;
ep = eigrp_fifo_next(nbr->retrans_queue);
@ -992,24 +992,24 @@ void eigrp_unack_packet_retrans(struct thread *thread)
}
/*Start retransmission timer*/
thread_add_timer(master, eigrp_unack_packet_retrans, nbr,
EIGRP_PACKET_RETRANS_TIME,
&ep->t_retrans_timer);
event_add_timer(master, eigrp_unack_packet_retrans, nbr,
EIGRP_PACKET_RETRANS_TIME,
&ep->t_retrans_timer);
/* Hook thread to write packet. */
if (nbr->ei->on_write_q == 0) {
listnode_add(nbr->ei->eigrp->oi_write_q, nbr->ei);
nbr->ei->on_write_q = 1;
}
thread_add_write(master, eigrp_write, nbr->ei->eigrp,
nbr->ei->eigrp->fd, &nbr->ei->eigrp->t_write);
event_add_write(master, eigrp_write, nbr->ei->eigrp,
nbr->ei->eigrp->fd, &nbr->ei->eigrp->t_write);
}
}
void eigrp_unack_multicast_packet_retrans(struct thread *thread)
void eigrp_unack_multicast_packet_retrans(struct event *thread)
{
struct eigrp_neighbor *nbr;
nbr = (struct eigrp_neighbor *)THREAD_ARG(thread);
nbr = (struct eigrp_neighbor *)EVENT_ARG(thread);
struct eigrp_packet *ep;
ep = eigrp_fifo_next(nbr->multicast_queue);
@ -1027,17 +1027,17 @@ void eigrp_unack_multicast_packet_retrans(struct thread *thread)
}
/*Start retransmission timer*/
thread_add_timer(master, eigrp_unack_multicast_packet_retrans,
nbr, EIGRP_PACKET_RETRANS_TIME,
&ep->t_retrans_timer);
event_add_timer(master, eigrp_unack_multicast_packet_retrans,
nbr, EIGRP_PACKET_RETRANS_TIME,
&ep->t_retrans_timer);
/* Hook thread to write packet. */
if (nbr->ei->on_write_q == 0) {
listnode_add(nbr->ei->eigrp->oi_write_q, nbr->ei);
nbr->ei->on_write_q = 1;
}
thread_add_write(master, eigrp_write, nbr->ei->eigrp,
nbr->ei->eigrp->fd, &nbr->ei->eigrp->t_write);
event_add_write(master, eigrp_write, nbr->ei->eigrp,
nbr->ei->eigrp->fd, &nbr->ei->eigrp->t_write);
}
}

View File

@ -18,8 +18,8 @@
#define _ZEBRA_EIGRP_PACKET_H
/*Prototypes*/
extern void eigrp_read(struct thread *thread);
extern void eigrp_write(struct thread *thread);
extern void eigrp_read(struct event *thread);
extern void eigrp_write(struct event *thread);
extern struct eigrp_packet *eigrp_packet_new(size_t size,
struct eigrp_neighbor *nbr);
@ -51,8 +51,8 @@ extern uint16_t eigrp_add_authTLV_MD5_to_stream(struct stream *s,
extern uint16_t eigrp_add_authTLV_SHA256_to_stream(struct stream *s,
struct eigrp_interface *ei);
extern void eigrp_unack_packet_retrans(struct thread *thread);
extern void eigrp_unack_multicast_packet_retrans(struct thread *thread);
extern void eigrp_unack_packet_retrans(struct event *thread);
extern void eigrp_unack_multicast_packet_retrans(struct event *thread);
/*
* untill there is reason to have their own header, these externs are found in
@ -65,7 +65,7 @@ extern void eigrp_hello_send_ack(struct eigrp_neighbor *nbr);
extern void eigrp_hello_receive(struct eigrp *eigrp, struct ip *iph,
struct eigrp_header *eigrph, struct stream *s,
struct eigrp_interface *ei, int size);
extern void eigrp_hello_timer(struct thread *thread);
extern void eigrp_hello_timer(struct event *thread);
/*
* These externs are found in eigrp_update.c
@ -81,7 +81,7 @@ extern void eigrp_update_send_all(struct eigrp *eigrp,
struct eigrp_interface *exception);
extern void eigrp_update_send_init(struct eigrp_neighbor *nbr);
extern void eigrp_update_send_EOT(struct eigrp_neighbor *nbr);
extern void eigrp_update_send_GR_thread(struct thread *thread);
extern void eigrp_update_send_GR_thread(struct event *thread);
extern void eigrp_update_send_GR(struct eigrp_neighbor *nbr,
enum GR_type gr_type, struct vty *vty);
extern void eigrp_update_send_interface_GR(struct eigrp_interface *ei,

View File

@ -12,7 +12,7 @@
#include <zebra.h>
#include "thread.h"
#include "frrevent.h"
#include "memory.h"
#include "linklist.h"
#include "prefix.h"

View File

@ -16,7 +16,7 @@
#include <zebra.h>
#include "thread.h"
#include "frrevent.h"
#include "memory.h"
#include "linklist.h"
#include "prefix.h"

View File

@ -12,7 +12,7 @@
#include <zebra.h>
#include "thread.h"
#include "frrevent.h"
#include "memory.h"
#include "linklist.h"
#include "prefix.h"

View File

@ -11,7 +11,7 @@
*/
#include <zebra.h>
#include "thread.h"
#include "frrevent.h"
#include "memory.h"
#include "linklist.h"
#include "prefix.h"

View File

@ -16,7 +16,7 @@
#include <net-snmp/net-snmp-config.h>
#include <net-snmp/net-snmp-includes.h>
#include "thread.h"
#include "frrevent.h"
#include "memory.h"
#include "linklist.h"
#include "prefix.h"

View File

@ -71,9 +71,9 @@ struct eigrp {
struct list *oi_write_q;
/*Threads*/
struct thread *t_write;
struct thread *t_read;
struct thread *t_distribute; /* timer for distribute list */
struct event *t_write;
struct event *t_read;
struct event *t_distribute; /* timer for distribute list */
struct route_table *networks; /* EIGRP config networks. */
@ -165,8 +165,8 @@ struct eigrp_interface {
struct list *nbrs; /* EIGRP Neighbor List */
/* Threads. */
struct thread *t_hello; /* timer */
struct thread *t_distribute; /* timer for distribute list */
struct event *t_hello; /* timer */
struct event *t_distribute; /* timer for distribute list */
int on_write_q;
@ -240,8 +240,8 @@ struct eigrp_neighbor {
uint16_t v_holddown;
/* Threads. */
struct thread *t_holddown;
struct thread *t_nbr_send_gr; /* thread for sending multiple GR packet
struct event *t_holddown;
struct event *t_nbr_send_gr; /* thread for sending multiple GR packet
chunks */
struct eigrp_fifo *retrans_queue;
@ -271,7 +271,7 @@ struct eigrp_packet {
struct in_addr dst;
/*Packet retransmission thread*/
struct thread *t_retrans_timer;
struct event *t_retrans_timer;
/*Packet retransmission counter*/
uint8_t retrans_counter;

View File

@ -16,7 +16,7 @@
#include <zebra.h>
#include "thread.h"
#include "frrevent.h"
#include "memory.h"
#include "linklist.h"
#include "prefix.h"
@ -895,19 +895,19 @@ static void eigrp_update_send_GR_part(struct eigrp_neighbor *nbr)
*
* Uses nbr_gr_packet_type and t_nbr_send_gr from neighbor.
*/
void eigrp_update_send_GR_thread(struct thread *thread)
void eigrp_update_send_GR_thread(struct event *thread)
{
struct eigrp_neighbor *nbr;
/* get argument from thread */
nbr = THREAD_ARG(thread);
nbr = EVENT_ARG(thread);
/* remove this thread pointer */
/* if there is packet waiting in queue,
* schedule this thread again with small delay */
if (nbr->retrans_queue->count > 0) {
thread_add_timer_msec(master, eigrp_update_send_GR_thread, nbr,
10, &nbr->t_nbr_send_gr);
event_add_timer_msec(master, eigrp_update_send_GR_thread, nbr,
10, &nbr->t_nbr_send_gr);
return;
}
@ -916,7 +916,7 @@ void eigrp_update_send_GR_thread(struct thread *thread)
/* if it wasn't last chunk, schedule this thread again */
if (nbr->nbr_gr_packet_type != EIGRP_PACKET_PART_LAST) {
thread_execute(master, eigrp_update_send_GR_thread, nbr, 0);
event_execute(master, eigrp_update_send_GR_thread, nbr, 0);
}
}
@ -982,7 +982,7 @@ void eigrp_update_send_GR(struct eigrp_neighbor *nbr, enum GR_type gr_type,
/* indicate, that this is first GR Update packet chunk */
nbr->nbr_gr_packet_type = EIGRP_PACKET_PART_FIRST;
/* execute packet sending in thread */
thread_execute(master, eigrp_update_send_GR_thread, nbr, 0);
event_execute(master, eigrp_update_send_GR_thread, nbr, 0);
}
/**

View File

@ -17,7 +17,7 @@
#include <zebra.h>
#include "memory.h"
#include "thread.h"
#include "frrevent.h"
#include "prefix.h"
#include "table.h"
#include "vty.h"

View File

@ -12,7 +12,7 @@
#include <zebra.h>
#include "thread.h"
#include "frrevent.h"
#include "command.h"
#include "network.h"
#include "prefix.h"
@ -49,7 +49,7 @@ static int eigrp_zebra_read_route(ZAPI_CALLBACK_ARGS);
struct zclient *zclient = NULL;
/* For registering threads. */
extern struct thread_master *master;
extern struct event_loop *master;
struct in_addr router_id_zebra;
/* Router-id update message from zebra. */

Some files were not shown because too many files have changed in this diff Show More