mirror of
https://git.proxmox.com/git/mirror_frr
synced 2025-04-28 11:50:21 +00:00
*: Convert thread_add_XXX functions to event_add_XXX
Signed-off-by: Donald Sharp <sharpd@nvidia.com>
This commit is contained in:
parent
e6685141aa
commit
907a2395f4
@ -148,9 +148,11 @@ babel_create_routing_process (void)
|
||||
}
|
||||
|
||||
/* Threads. */
|
||||
thread_add_read(master, babel_read_protocol, NULL, protocol_socket, &babel_routing_process->t_read);
|
||||
event_add_read(master, babel_read_protocol, NULL, protocol_socket,
|
||||
&babel_routing_process->t_read);
|
||||
/* wait a little: zebra will announce interfaces, addresses, routes... */
|
||||
thread_add_timer_msec(master, babel_init_routing_process, NULL, 200L, &babel_routing_process->t_update);
|
||||
event_add_timer_msec(master, babel_init_routing_process, NULL, 200L,
|
||||
&babel_routing_process->t_update);
|
||||
|
||||
/* Distribute list install. */
|
||||
babel_routing_process->distribute_ctx = distribute_list_ctx_create (vrf_lookup_by_id(VRF_DEFAULT));
|
||||
@ -193,7 +195,8 @@ static void babel_read_protocol(struct event *thread)
|
||||
}
|
||||
|
||||
/* re-add thread */
|
||||
thread_add_read(master, &babel_read_protocol, NULL, protocol_socket, &babel_routing_process->t_read);
|
||||
event_add_read(master, &babel_read_protocol, NULL, protocol_socket,
|
||||
&babel_routing_process->t_read);
|
||||
}
|
||||
|
||||
/* Zebra will give some information, especially about interfaces. This function
|
||||
@ -483,7 +486,8 @@ babel_set_timer(struct timeval *timeout)
|
||||
{
|
||||
long msecs = timeout->tv_sec * 1000 + timeout->tv_usec / 1000;
|
||||
thread_cancel(&(babel_routing_process->t_update));
|
||||
thread_add_timer_msec(master, babel_main_loop, NULL, msecs, &babel_routing_process->t_update);
|
||||
event_add_timer_msec(master, babel_main_loop, NULL, msecs,
|
||||
&babel_routing_process->t_update);
|
||||
}
|
||||
|
||||
void
|
||||
|
24
bfdd/bfd.c
24
bfdd/bfd.c
@ -1957,23 +1957,23 @@ static int bfd_vrf_enable(struct vrf *vrf)
|
||||
bvrf->bg_echov6 = bp_echov6_socket(vrf);
|
||||
|
||||
if (!bvrf->bg_ev[0] && bvrf->bg_shop != -1)
|
||||
thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop,
|
||||
&bvrf->bg_ev[0]);
|
||||
event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop,
|
||||
&bvrf->bg_ev[0]);
|
||||
if (!bvrf->bg_ev[1] && bvrf->bg_mhop != -1)
|
||||
thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop,
|
||||
&bvrf->bg_ev[1]);
|
||||
event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop,
|
||||
&bvrf->bg_ev[1]);
|
||||
if (!bvrf->bg_ev[2] && bvrf->bg_shop6 != -1)
|
||||
thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop6,
|
||||
&bvrf->bg_ev[2]);
|
||||
event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop6,
|
||||
&bvrf->bg_ev[2]);
|
||||
if (!bvrf->bg_ev[3] && bvrf->bg_mhop6 != -1)
|
||||
thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop6,
|
||||
&bvrf->bg_ev[3]);
|
||||
event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop6,
|
||||
&bvrf->bg_ev[3]);
|
||||
if (!bvrf->bg_ev[4] && bvrf->bg_echo != -1)
|
||||
thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echo,
|
||||
&bvrf->bg_ev[4]);
|
||||
event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echo,
|
||||
&bvrf->bg_ev[4]);
|
||||
if (!bvrf->bg_ev[5] && bvrf->bg_echov6 != -1)
|
||||
thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echov6,
|
||||
&bvrf->bg_ev[5]);
|
||||
event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echov6,
|
||||
&bvrf->bg_ev[5]);
|
||||
|
||||
if (vrf->vrf_id != VRF_DEFAULT) {
|
||||
bfdd_zclient_register(vrf->vrf_id);
|
||||
|
@ -702,28 +702,28 @@ static void bfd_sd_reschedule(struct bfd_vrf_global *bvrf, int sd)
|
||||
{
|
||||
if (sd == bvrf->bg_shop) {
|
||||
THREAD_OFF(bvrf->bg_ev[0]);
|
||||
thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop,
|
||||
&bvrf->bg_ev[0]);
|
||||
event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop,
|
||||
&bvrf->bg_ev[0]);
|
||||
} else if (sd == bvrf->bg_mhop) {
|
||||
THREAD_OFF(bvrf->bg_ev[1]);
|
||||
thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop,
|
||||
&bvrf->bg_ev[1]);
|
||||
event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop,
|
||||
&bvrf->bg_ev[1]);
|
||||
} else if (sd == bvrf->bg_shop6) {
|
||||
THREAD_OFF(bvrf->bg_ev[2]);
|
||||
thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop6,
|
||||
&bvrf->bg_ev[2]);
|
||||
event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop6,
|
||||
&bvrf->bg_ev[2]);
|
||||
} else if (sd == bvrf->bg_mhop6) {
|
||||
THREAD_OFF(bvrf->bg_ev[3]);
|
||||
thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop6,
|
||||
&bvrf->bg_ev[3]);
|
||||
event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop6,
|
||||
&bvrf->bg_ev[3]);
|
||||
} else if (sd == bvrf->bg_echo) {
|
||||
THREAD_OFF(bvrf->bg_ev[4]);
|
||||
thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echo,
|
||||
&bvrf->bg_ev[4]);
|
||||
event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echo,
|
||||
&bvrf->bg_ev[4]);
|
||||
} else if (sd == bvrf->bg_echov6) {
|
||||
THREAD_OFF(bvrf->bg_ev[5]);
|
||||
thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echov6,
|
||||
&bvrf->bg_ev[5]);
|
||||
event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echov6,
|
||||
&bvrf->bg_ev[5]);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -375,8 +375,8 @@ int main(int argc, char *argv[])
|
||||
/* Initialize zebra connection. */
|
||||
bfdd_zclient_init(&bglobal.bfdd_privs);
|
||||
|
||||
thread_add_read(master, control_accept, NULL, bglobal.bg_csock,
|
||||
&bglobal.bg_csockev);
|
||||
event_add_read(master, control_accept, NULL, bglobal.bg_csock,
|
||||
&bglobal.bg_csockev);
|
||||
|
||||
/* Install commands. */
|
||||
bfdd_vty_init();
|
||||
|
@ -154,7 +154,7 @@ void control_accept(struct event *t)
|
||||
|
||||
control_new(csock);
|
||||
|
||||
thread_add_read(master, control_accept, NULL, sd, &bglobal.bg_csockev);
|
||||
event_add_read(master, control_accept, NULL, sd, &bglobal.bg_csockev);
|
||||
}
|
||||
|
||||
|
||||
@ -171,7 +171,7 @@ struct bfd_control_socket *control_new(int sd)
|
||||
bcs->bcs_notify = 0;
|
||||
|
||||
bcs->bcs_sd = sd;
|
||||
thread_add_read(master, control_read, bcs, sd, &bcs->bcs_ev);
|
||||
event_add_read(master, control_read, bcs, sd, &bcs->bcs_ev);
|
||||
|
||||
TAILQ_INIT(&bcs->bcs_bcqueue);
|
||||
TAILQ_INIT(&bcs->bcs_bnplist);
|
||||
@ -286,8 +286,8 @@ static int control_queue_dequeue(struct bfd_control_socket *bcs)
|
||||
bcs->bcs_bout = &bcq->bcq_bcb;
|
||||
|
||||
bcs->bcs_outev = NULL;
|
||||
thread_add_write(master, control_write, bcs, bcs->bcs_sd,
|
||||
&bcs->bcs_outev);
|
||||
event_add_write(master, control_write, bcs, bcs->bcs_sd,
|
||||
&bcs->bcs_outev);
|
||||
|
||||
return 1;
|
||||
|
||||
@ -315,8 +315,8 @@ static int control_queue_enqueue(struct bfd_control_socket *bcs,
|
||||
bcs->bcs_bout = bcb;
|
||||
|
||||
/* New messages, active write events. */
|
||||
thread_add_write(master, control_write, bcs, bcs->bcs_sd,
|
||||
&bcs->bcs_outev);
|
||||
event_add_write(master, control_write, bcs, bcs->bcs_sd,
|
||||
&bcs->bcs_outev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -511,7 +511,7 @@ skip_header:
|
||||
|
||||
schedule_next_read:
|
||||
bcs->bcs_ev = NULL;
|
||||
thread_add_read(master, control_read, bcs, sd, &bcs->bcs_ev);
|
||||
event_add_read(master, control_read, bcs, sd, &bcs->bcs_ev);
|
||||
}
|
||||
|
||||
static void control_write(struct event *t)
|
||||
@ -529,8 +529,8 @@ static void control_write(struct event *t)
|
||||
if (bwrite < 0) {
|
||||
if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR) {
|
||||
bcs->bcs_outev = NULL;
|
||||
thread_add_write(master, control_write, bcs,
|
||||
bcs->bcs_sd, &bcs->bcs_outev);
|
||||
event_add_write(master, control_write, bcs, bcs->bcs_sd,
|
||||
&bcs->bcs_outev);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -543,8 +543,8 @@ static void control_write(struct event *t)
|
||||
bcb->bcb_left -= bwrite;
|
||||
if (bcb->bcb_left > 0) {
|
||||
bcs->bcs_outev = NULL;
|
||||
thread_add_write(master, control_write, bcs, bcs->bcs_sd,
|
||||
&bcs->bcs_outev);
|
||||
event_add_write(master, control_write, bcs, bcs->bcs_sd,
|
||||
&bcs->bcs_outev);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -429,8 +429,8 @@ static int bfd_dplane_enqueue(struct bfd_dplane_ctx *bdc, const void *buf,
|
||||
|
||||
/* Schedule if it is not yet. */
|
||||
if (bdc->outbufev == NULL)
|
||||
thread_add_write(master, bfd_dplane_write, bdc, bdc->sock,
|
||||
&bdc->outbufev);
|
||||
event_add_write(master, bfd_dplane_write, bdc, bdc->sock,
|
||||
&bdc->outbufev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -609,7 +609,7 @@ static void bfd_dplane_read(struct event *t)
|
||||
return;
|
||||
|
||||
stream_pulldown(bdc->inbuf);
|
||||
thread_add_read(master, bfd_dplane_read, bdc, bdc->sock, &bdc->inbufev);
|
||||
event_add_read(master, bfd_dplane_read, bdc, bdc->sock, &bdc->inbufev);
|
||||
}
|
||||
|
||||
static void _bfd_session_register_dplane(struct hash_bucket *hb, void *arg)
|
||||
@ -641,7 +641,7 @@ static struct bfd_dplane_ctx *bfd_dplane_ctx_new(int sock)
|
||||
if (sock == -1)
|
||||
return bdc;
|
||||
|
||||
thread_add_read(master, bfd_dplane_read, bdc, sock, &bdc->inbufev);
|
||||
event_add_read(master, bfd_dplane_read, bdc, sock, &bdc->inbufev);
|
||||
|
||||
/* Register all unattached sessions. */
|
||||
bfd_key_iterate(_bfd_session_register_dplane, bdc);
|
||||
@ -682,8 +682,8 @@ static void bfd_dplane_ctx_free(struct bfd_dplane_ctx *bdc)
|
||||
socket_close(&bdc->sock);
|
||||
THREAD_OFF(bdc->inbufev);
|
||||
THREAD_OFF(bdc->outbufev);
|
||||
thread_add_timer(master, bfd_dplane_client_connect, bdc, 3,
|
||||
&bdc->connectev);
|
||||
event_add_timer(master, bfd_dplane_client_connect, bdc, 3,
|
||||
&bdc->connectev);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -840,8 +840,8 @@ static void bfd_dplane_accept(struct event *t)
|
||||
zlog_debug("%s: new data plane client connected", __func__);
|
||||
|
||||
reschedule_and_return:
|
||||
thread_add_read(master, bfd_dplane_accept, bg, bg->bg_dplane_sock,
|
||||
&bglobal.bg_dplane_sockev);
|
||||
event_add_read(master, bfd_dplane_accept, bg, bg->bg_dplane_sock,
|
||||
&bglobal.bg_dplane_sockev);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -856,7 +856,7 @@ static void _bfd_dplane_client_bootstrap(struct bfd_dplane_ctx *bdc)
|
||||
stream_reset(bdc->outbuf);
|
||||
|
||||
/* Ask for read notifications. */
|
||||
thread_add_read(master, bfd_dplane_read, bdc, bdc->sock, &bdc->inbufev);
|
||||
event_add_read(master, bfd_dplane_read, bdc, bdc->sock, &bdc->inbufev);
|
||||
|
||||
/* Remove all sessions then register again to send them all. */
|
||||
bfd_key_iterate(_bfd_session_unregister_dplane, bdc);
|
||||
@ -938,8 +938,8 @@ static void bfd_dplane_client_connect(struct event *t)
|
||||
|
||||
/* If we are not connected yet, ask for write notifications. */
|
||||
bdc->connecting = true;
|
||||
thread_add_write(master, bfd_dplane_write, bdc, bdc->sock,
|
||||
&bdc->outbufev);
|
||||
event_add_write(master, bfd_dplane_write, bdc, bdc->sock,
|
||||
&bdc->outbufev);
|
||||
} else {
|
||||
if (bglobal.debug_dplane)
|
||||
zlog_debug("%s: server connection: %d", __func__, sock);
|
||||
@ -952,8 +952,8 @@ reschedule_connect:
|
||||
THREAD_OFF(bdc->inbufev);
|
||||
THREAD_OFF(bdc->outbufev);
|
||||
socket_close(&sock);
|
||||
thread_add_timer(master, bfd_dplane_client_connect, bdc, 3,
|
||||
&bdc->connectev);
|
||||
event_add_timer(master, bfd_dplane_client_connect, bdc, 3,
|
||||
&bdc->connectev);
|
||||
}
|
||||
|
||||
static void bfd_dplane_client_init(const struct sockaddr *sa, socklen_t salen)
|
||||
@ -974,8 +974,8 @@ static void bfd_dplane_client_init(const struct sockaddr *sa, socklen_t salen)
|
||||
|
||||
bdc->client = true;
|
||||
|
||||
thread_add_timer(master, bfd_dplane_client_connect, bdc, 0,
|
||||
&bdc->connectev);
|
||||
event_add_timer(master, bfd_dplane_client_connect, bdc, 0,
|
||||
&bdc->connectev);
|
||||
|
||||
/* Insert into data plane lists. */
|
||||
TAILQ_INSERT_TAIL(&bglobal.bg_dplaneq, bdc, entry);
|
||||
@ -1067,8 +1067,8 @@ void bfd_dplane_init(const struct sockaddr *sa, socklen_t salen, bool client)
|
||||
}
|
||||
|
||||
bglobal.bg_dplane_sock = sock;
|
||||
thread_add_read(master, bfd_dplane_accept, &bglobal, sock,
|
||||
&bglobal.bg_dplane_sockev);
|
||||
event_add_read(master, bfd_dplane_accept, &bglobal, sock,
|
||||
&bglobal.bg_dplane_sockev);
|
||||
}
|
||||
|
||||
int bfd_dplane_add_session(struct bfd_session *bs)
|
||||
|
14
bfdd/event.c
14
bfdd/event.c
@ -36,8 +36,8 @@ void bfd_recvtimer_update(struct bfd_session *bs)
|
||||
|
||||
tv_normalize(&tv);
|
||||
|
||||
thread_add_timer_tv(master, bfd_recvtimer_cb, bs, &tv,
|
||||
&bs->recvtimer_ev);
|
||||
event_add_timer_tv(master, bfd_recvtimer_cb, bs, &tv,
|
||||
&bs->recvtimer_ev);
|
||||
}
|
||||
|
||||
void bfd_echo_recvtimer_update(struct bfd_session *bs)
|
||||
@ -54,8 +54,8 @@ void bfd_echo_recvtimer_update(struct bfd_session *bs)
|
||||
|
||||
tv_normalize(&tv);
|
||||
|
||||
thread_add_timer_tv(master, bfd_echo_recvtimer_cb, bs, &tv,
|
||||
&bs->echo_recvtimer_ev);
|
||||
event_add_timer_tv(master, bfd_echo_recvtimer_cb, bs, &tv,
|
||||
&bs->echo_recvtimer_ev);
|
||||
}
|
||||
|
||||
void bfd_xmttimer_update(struct bfd_session *bs, uint64_t jitter)
|
||||
@ -72,7 +72,7 @@ void bfd_xmttimer_update(struct bfd_session *bs, uint64_t jitter)
|
||||
|
||||
tv_normalize(&tv);
|
||||
|
||||
thread_add_timer_tv(master, bfd_xmt_cb, bs, &tv, &bs->xmttimer_ev);
|
||||
event_add_timer_tv(master, bfd_xmt_cb, bs, &tv, &bs->xmttimer_ev);
|
||||
}
|
||||
|
||||
void bfd_echo_xmttimer_update(struct bfd_session *bs, uint64_t jitter)
|
||||
@ -89,8 +89,8 @@ void bfd_echo_xmttimer_update(struct bfd_session *bs, uint64_t jitter)
|
||||
|
||||
tv_normalize(&tv);
|
||||
|
||||
thread_add_timer_tv(master, bfd_echo_xmt_cb, bs, &tv,
|
||||
&bs->echo_xmttimer_ev);
|
||||
event_add_timer_tv(master, bfd_echo_xmt_cb, bs, &tv,
|
||||
&bs->echo_xmttimer_ev);
|
||||
}
|
||||
|
||||
void bfd_recvtimer_delete(struct bfd_session *bs)
|
||||
|
@ -1344,8 +1344,8 @@ static void bmp_stats(struct event *thread)
|
||||
struct timeval tv;
|
||||
|
||||
if (bt->stat_msec)
|
||||
thread_add_timer_msec(bm->master, bmp_stats, bt, bt->stat_msec,
|
||||
&bt->t_stats);
|
||||
event_add_timer_msec(bm->master, bmp_stats, bt, bt->stat_msec,
|
||||
&bt->t_stats);
|
||||
|
||||
gettimeofday(&tv, NULL);
|
||||
|
||||
@ -1409,7 +1409,7 @@ static void bmp_read(struct event *t)
|
||||
return;
|
||||
}
|
||||
|
||||
thread_add_read(bm->master, bmp_read, bmp, bmp->socket, &bmp->t_read);
|
||||
event_add_read(bm->master, bmp_read, bmp, bmp->socket, &bmp->t_read);
|
||||
}
|
||||
|
||||
static struct bmp *bmp_open(struct bmp_targets *bt, int bmp_sock)
|
||||
@ -1485,7 +1485,7 @@ static struct bmp *bmp_open(struct bmp_targets *bt, int bmp_sock)
|
||||
bmp->state = BMP_PeerUp;
|
||||
bmp->pullwr = pullwr_new(bm->master, bmp_sock, bmp, bmp_wrfill,
|
||||
bmp_wrerr);
|
||||
thread_add_read(bm->master, bmp_read, bmp, bmp_sock, &bmp->t_read);
|
||||
event_add_read(bm->master, bmp_read, bmp, bmp_sock, &bmp->t_read);
|
||||
bmp_send_initiation(bmp);
|
||||
|
||||
return bmp;
|
||||
@ -1499,7 +1499,7 @@ static void bmp_accept(struct event *thread)
|
||||
int bmp_sock;
|
||||
|
||||
/* We continue hearing BMP socket. */
|
||||
thread_add_read(bm->master, bmp_accept, bl, bl->sock, &bl->t_accept);
|
||||
event_add_read(bm->master, bmp_accept, bl, bl->sock, &bl->t_accept);
|
||||
|
||||
memset(&su, 0, sizeof(union sockunion));
|
||||
|
||||
@ -1721,7 +1721,7 @@ static void bmp_listener_start(struct bmp_listener *bl)
|
||||
goto out_sock;
|
||||
|
||||
bl->sock = sock;
|
||||
thread_add_read(bm->master, bmp_accept, bl, sock, &bl->t_accept);
|
||||
event_add_read(bm->master, bmp_accept, bl, sock, &bl->t_accept);
|
||||
return;
|
||||
out_sock:
|
||||
close(sock);
|
||||
@ -1980,12 +1980,12 @@ static void bmp_active_setup(struct bmp_active *ba)
|
||||
ba->curretry = ba->maxretry;
|
||||
|
||||
if (ba->socket == -1)
|
||||
thread_add_timer_msec(bm->master, bmp_active_thread, ba,
|
||||
ba->curretry, &ba->t_timer);
|
||||
event_add_timer_msec(bm->master, bmp_active_thread, ba,
|
||||
ba->curretry, &ba->t_timer);
|
||||
else {
|
||||
thread_add_read(bm->master, bmp_active_thread, ba, ba->socket,
|
||||
&ba->t_read);
|
||||
thread_add_write(bm->master, bmp_active_thread, ba, ba->socket,
|
||||
event_add_read(bm->master, bmp_active_thread, ba, ba->socket,
|
||||
&ba->t_read);
|
||||
event_add_write(bm->master, bmp_active_thread, ba, ba->socket,
|
||||
&ba->t_write);
|
||||
}
|
||||
}
|
||||
@ -2199,8 +2199,8 @@ DEFPY(bmp_stats_cfg,
|
||||
bt->stat_msec = BMP_STAT_DEFAULT_TIMER;
|
||||
|
||||
if (bt->stat_msec)
|
||||
thread_add_timer_msec(bm->master, bmp_stats, bt, bt->stat_msec,
|
||||
&bt->t_stats);
|
||||
event_add_timer_msec(bm->master, bmp_stats, bt, bt->stat_msec,
|
||||
&bt->t_stats);
|
||||
return CMD_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -168,8 +168,8 @@ static void bgp_conditional_adv_timer(struct event *t)
|
||||
bgp = THREAD_ARG(t);
|
||||
assert(bgp);
|
||||
|
||||
thread_add_timer(bm->master, bgp_conditional_adv_timer, bgp,
|
||||
bgp->condition_check_period, &bgp->t_condition_check);
|
||||
event_add_timer(bm->master, bgp_conditional_adv_timer, bgp,
|
||||
bgp->condition_check_period, &bgp->t_condition_check);
|
||||
|
||||
/* loop through each peer and check if we have peers with
|
||||
* advmap_table_change attribute set, to make sure we send
|
||||
@ -329,8 +329,8 @@ void bgp_conditional_adv_enable(struct peer *peer, afi_t afi, safi_t safi)
|
||||
|
||||
/* Register for conditional routes polling timer */
|
||||
if (!thread_is_scheduled(bgp->t_condition_check))
|
||||
thread_add_timer(bm->master, bgp_conditional_adv_timer, bgp, 0,
|
||||
&bgp->t_condition_check);
|
||||
event_add_timer(bm->master, bgp_conditional_adv_timer, bgp, 0,
|
||||
&bgp->t_condition_check);
|
||||
}
|
||||
|
||||
void bgp_conditional_adv_disable(struct peer *peer, afi_t afi, safi_t safi)
|
||||
|
@ -107,8 +107,8 @@ static void bgp_reuse_timer(struct event *t)
|
||||
struct bgp_damp_config *bdc = THREAD_ARG(t);
|
||||
|
||||
bdc->t_reuse = NULL;
|
||||
thread_add_timer(bm->master, bgp_reuse_timer, bdc, DELTA_REUSE,
|
||||
&bdc->t_reuse);
|
||||
event_add_timer(bm->master, bgp_reuse_timer, bdc, DELTA_REUSE,
|
||||
&bdc->t_reuse);
|
||||
|
||||
t_now = monotime(NULL);
|
||||
|
||||
@ -395,8 +395,8 @@ int bgp_damp_enable(struct bgp *bgp, afi_t afi, safi_t safi, time_t half,
|
||||
bgp_damp_parameter_set(half, reuse, suppress, max, bdc);
|
||||
|
||||
/* Register reuse timer. */
|
||||
thread_add_timer(bm->master, bgp_reuse_timer, bdc, DELTA_REUSE,
|
||||
&bdc->t_reuse);
|
||||
event_add_timer(bm->master, bgp_reuse_timer, bdc, DELTA_REUSE,
|
||||
&bdc->t_reuse);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -154,13 +154,13 @@ static int bgp_dump_interval_add(struct bgp_dump *bgp_dump, int interval)
|
||||
interval = interval
|
||||
- secs_into_day % interval; /* always > 0 */
|
||||
}
|
||||
thread_add_timer(bm->master, bgp_dump_interval_func, bgp_dump,
|
||||
interval, &bgp_dump->t_interval);
|
||||
event_add_timer(bm->master, bgp_dump_interval_func, bgp_dump,
|
||||
interval, &bgp_dump->t_interval);
|
||||
} else {
|
||||
/* One-off dump: execute immediately, don't affect any scheduled
|
||||
* dumps */
|
||||
thread_add_event(bm->master, bgp_dump_interval_func, bgp_dump,
|
||||
0, &bgp_dump->t_interval);
|
||||
event_add_event(bm->master, bgp_dump_interval_func, bgp_dump, 0,
|
||||
&bgp_dump->t_interval);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -4174,9 +4174,9 @@ static void bgp_evpn_es_cons_checks_timer_start(void)
|
||||
if (BGP_DEBUG(evpn_mh, EVPN_MH_ES))
|
||||
zlog_debug("periodic consistency checking started");
|
||||
|
||||
thread_add_timer(bm->master, bgp_evpn_run_consistency_checks, NULL,
|
||||
BGP_EVPN_CONS_CHECK_INTERVAL,
|
||||
&bgp_mh_info->t_cons_check);
|
||||
event_add_timer(bm->master, bgp_evpn_run_consistency_checks, NULL,
|
||||
BGP_EVPN_CONS_CHECK_INTERVAL,
|
||||
&bgp_mh_info->t_cons_check);
|
||||
}
|
||||
|
||||
/* queue up the es for background consistency checks */
|
||||
@ -4380,7 +4380,7 @@ static void bgp_evpn_run_consistency_checks(struct event *t)
|
||||
}
|
||||
|
||||
/* restart the timer */
|
||||
thread_add_timer(bm->master, bgp_evpn_run_consistency_checks, NULL,
|
||||
event_add_timer(bm->master, bgp_evpn_run_consistency_checks, NULL,
|
||||
BGP_EVPN_CONS_CHECK_INTERVAL,
|
||||
&bgp_mh_info->t_cons_check);
|
||||
}
|
||||
|
@ -343,8 +343,8 @@ static struct peer *peer_xfer_conn(struct peer *from_peer)
|
||||
|
||||
bgp_reads_on(peer);
|
||||
bgp_writes_on(peer);
|
||||
thread_add_event(bm->master, bgp_process_packet, peer, 0,
|
||||
&peer->t_process_packet);
|
||||
event_add_event(bm->master, bgp_process_packet, peer, 0,
|
||||
&peer->t_process_packet);
|
||||
|
||||
return (peer);
|
||||
}
|
||||
@ -583,8 +583,8 @@ void bgp_routeadv_timer(struct event *thread)
|
||||
|
||||
peer->synctime = monotime(NULL);
|
||||
|
||||
thread_add_timer_msec(bm->master, bgp_generate_updgrp_packets, peer, 0,
|
||||
&peer->t_generate_updgrp_packets);
|
||||
event_add_timer_msec(bm->master, bgp_generate_updgrp_packets, peer, 0,
|
||||
&peer->t_generate_updgrp_packets);
|
||||
|
||||
/* MRAI timer will be started again when FIFO is built, no need to
|
||||
* do it here.
|
||||
@ -820,10 +820,9 @@ static void bgp_graceful_restart_timer_expire(struct event *thread)
|
||||
bgp_set_llgr_stale(peer, afi, safi);
|
||||
bgp_clear_stale_route(peer, afi, safi);
|
||||
|
||||
thread_add_timer(bm->master,
|
||||
bgp_llgr_stale_timer_expire, paf,
|
||||
peer->llgr[afi][safi].stale_time,
|
||||
&peer->t_llgr_stale[afi][safi]);
|
||||
event_add_timer(bm->master, bgp_llgr_stale_timer_expire,
|
||||
paf, peer->llgr[afi][safi].stale_time,
|
||||
&peer->t_llgr_stale[afi][safi]);
|
||||
|
||||
for (ALL_LIST_ELEMENTS(peer->bgp->peer, node, nnode,
|
||||
tmp_peer))
|
||||
@ -1147,8 +1146,8 @@ static void bgp_maxmed_onstartup_begin(struct bgp *bgp)
|
||||
zlog_info("Begin maxmed onstartup mode - timer %d seconds",
|
||||
bgp->v_maxmed_onstartup);
|
||||
|
||||
thread_add_timer(bm->master, bgp_maxmed_onstartup_timer, bgp,
|
||||
bgp->v_maxmed_onstartup, &bgp->t_maxmed_onstartup);
|
||||
event_add_timer(bm->master, bgp_maxmed_onstartup_timer, bgp,
|
||||
bgp->v_maxmed_onstartup, &bgp->t_maxmed_onstartup);
|
||||
|
||||
if (!bgp->v_maxmed_admin) {
|
||||
bgp->maxmed_active = 1;
|
||||
@ -1206,12 +1205,12 @@ static void bgp_update_delay_begin(struct bgp *bgp)
|
||||
peer->update_delay_over = 0;
|
||||
|
||||
/* Start the update-delay timer */
|
||||
thread_add_timer(bm->master, bgp_update_delay_timer, bgp,
|
||||
bgp->v_update_delay, &bgp->t_update_delay);
|
||||
event_add_timer(bm->master, bgp_update_delay_timer, bgp,
|
||||
bgp->v_update_delay, &bgp->t_update_delay);
|
||||
|
||||
if (bgp->v_establish_wait != bgp->v_update_delay)
|
||||
thread_add_timer(bm->master, bgp_establish_wait_timer, bgp,
|
||||
bgp->v_establish_wait, &bgp->t_establish_wait);
|
||||
event_add_timer(bm->master, bgp_establish_wait_timer, bgp,
|
||||
bgp->v_establish_wait, &bgp->t_establish_wait);
|
||||
|
||||
frr_timestamp(3, bgp->update_delay_begin_time,
|
||||
sizeof(bgp->update_delay_begin_time));
|
||||
@ -1941,10 +1940,10 @@ enum bgp_fsm_state_progress bgp_start(struct peer *peer)
|
||||
* bgp_connect_check() as the handler for each and cancel the
|
||||
* unused event in that function.
|
||||
*/
|
||||
thread_add_read(bm->master, bgp_connect_check, peer, peer->fd,
|
||||
&peer->t_connect_check_r);
|
||||
thread_add_write(bm->master, bgp_connect_check, peer, peer->fd,
|
||||
&peer->t_connect_check_w);
|
||||
event_add_read(bm->master, bgp_connect_check, peer, peer->fd,
|
||||
&peer->t_connect_check_r);
|
||||
event_add_write(bm->master, bgp_connect_check, peer, peer->fd,
|
||||
&peer->t_connect_check_w);
|
||||
break;
|
||||
}
|
||||
return BGP_FSM_SUCCESS;
|
||||
@ -2046,9 +2045,9 @@ static int bgp_start_deferral_timer(struct bgp *bgp, afi_t afi, safi_t safi,
|
||||
thread_info->safi = safi;
|
||||
thread_info->bgp = bgp;
|
||||
|
||||
thread_add_timer(bm->master, bgp_graceful_deferral_timer_expire,
|
||||
thread_info, bgp->select_defer_time,
|
||||
&gr_info->t_select_deferral);
|
||||
event_add_timer(bm->master, bgp_graceful_deferral_timer_expire,
|
||||
thread_info, bgp->select_defer_time,
|
||||
&gr_info->t_select_deferral);
|
||||
}
|
||||
gr_info->eor_required++;
|
||||
/* Send message to RIB indicating route update pending */
|
||||
|
@ -11,14 +11,14 @@
|
||||
#define BGP_TIMER_ON(T, F, V) \
|
||||
do { \
|
||||
if ((peer->status != Deleted)) \
|
||||
thread_add_timer(bm->master, (F), peer, (V), &(T)); \
|
||||
event_add_timer(bm->master, (F), peer, (V), &(T)); \
|
||||
} while (0)
|
||||
|
||||
#define BGP_EVENT_ADD(P, E) \
|
||||
do { \
|
||||
if ((P)->status != Deleted) \
|
||||
thread_add_event(bm->master, bgp_event, (P), (E), \
|
||||
NULL); \
|
||||
event_add_event(bm->master, bgp_event, (P), (E), \
|
||||
NULL); \
|
||||
} while (0)
|
||||
|
||||
#define BGP_EVENT_FLUSH(P) \
|
||||
@ -27,17 +27,18 @@
|
||||
thread_cancel_event_ready(bm->master, (P)); \
|
||||
} while (0)
|
||||
|
||||
#define BGP_UPDATE_GROUP_TIMER_ON(T, F) \
|
||||
do { \
|
||||
if (BGP_SUPPRESS_FIB_ENABLED(peer->bgp) && \
|
||||
PEER_ROUTE_ADV_DELAY(peer)) \
|
||||
thread_add_timer_msec(bm->master, (F), peer, \
|
||||
(BGP_DEFAULT_UPDATE_ADVERTISEMENT_TIME * 1000),\
|
||||
(T)); \
|
||||
else \
|
||||
thread_add_timer_msec(bm->master, (F), peer, \
|
||||
0, (T)); \
|
||||
} while (0) \
|
||||
#define BGP_UPDATE_GROUP_TIMER_ON(T, F) \
|
||||
do { \
|
||||
if (BGP_SUPPRESS_FIB_ENABLED(peer->bgp) && \
|
||||
PEER_ROUTE_ADV_DELAY(peer)) \
|
||||
event_add_timer_msec( \
|
||||
bm->master, (F), peer, \
|
||||
(BGP_DEFAULT_UPDATE_ADVERTISEMENT_TIME * \
|
||||
1000), \
|
||||
(T)); \
|
||||
else \
|
||||
event_add_timer_msec(bm->master, (F), peer, 0, (T)); \
|
||||
} while (0)
|
||||
|
||||
#define BGP_MSEC_JITTER 10
|
||||
|
||||
|
@ -55,8 +55,8 @@ void bgp_writes_on(struct peer *peer)
|
||||
assert(!peer->t_connect_check_w);
|
||||
assert(peer->fd);
|
||||
|
||||
thread_add_write(fpt->master, bgp_process_writes, peer, peer->fd,
|
||||
&peer->t_write);
|
||||
event_add_write(fpt->master, bgp_process_writes, peer, peer->fd,
|
||||
&peer->t_write);
|
||||
SET_FLAG(peer->thread_flags, PEER_THREAD_WRITES_ON);
|
||||
}
|
||||
|
||||
@ -85,8 +85,8 @@ void bgp_reads_on(struct peer *peer)
|
||||
assert(!peer->t_connect_check_w);
|
||||
assert(peer->fd);
|
||||
|
||||
thread_add_read(fpt->master, bgp_process_reads, peer, peer->fd,
|
||||
&peer->t_read);
|
||||
event_add_read(fpt->master, bgp_process_reads, peer, peer->fd,
|
||||
&peer->t_read);
|
||||
|
||||
SET_FLAG(peer->thread_flags, PEER_THREAD_READS_ON);
|
||||
}
|
||||
@ -142,8 +142,8 @@ static void bgp_process_writes(struct event *thread)
|
||||
* sent in the update message
|
||||
*/
|
||||
if (reschedule) {
|
||||
thread_add_write(fpt->master, bgp_process_writes, peer,
|
||||
peer->fd, &peer->t_write);
|
||||
event_add_write(fpt->master, bgp_process_writes, peer, peer->fd,
|
||||
&peer->t_write);
|
||||
} else if (!fatal) {
|
||||
BGP_UPDATE_GROUP_TIMER_ON(&peer->t_generate_updgrp_packets,
|
||||
bgp_generate_updgrp_packets);
|
||||
@ -247,8 +247,8 @@ static void bgp_process_reads(struct event *thread)
|
||||
/* Handle the error in the main pthread, include the
|
||||
* specific state change from 'bgp_read'.
|
||||
*/
|
||||
thread_add_event(bm->master, bgp_packet_process_error,
|
||||
peer, code, &peer->t_process_packet_error);
|
||||
event_add_event(bm->master, bgp_packet_process_error, peer,
|
||||
code, &peer->t_process_packet_error);
|
||||
goto done;
|
||||
}
|
||||
|
||||
@ -292,11 +292,11 @@ done:
|
||||
if (!ibuf_full)
|
||||
assert(ringbuf_space(peer->ibuf_work) >= peer->max_packet_size);
|
||||
|
||||
thread_add_read(fpt->master, bgp_process_reads, peer, peer->fd,
|
||||
&peer->t_read);
|
||||
event_add_read(fpt->master, bgp_process_reads, peer, peer->fd,
|
||||
&peer->t_read);
|
||||
if (added_pkt)
|
||||
thread_add_event(bm->master, bgp_process_packet, peer, 0,
|
||||
&peer->t_process_packet);
|
||||
event_add_event(bm->master, bgp_process_packet, peer, 0,
|
||||
&peer->t_process_packet);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -362,8 +362,8 @@ static void bgp_accept(struct event *thread)
|
||||
return;
|
||||
}
|
||||
|
||||
thread_add_read(bm->master, bgp_accept, listener, accept_sock,
|
||||
&listener->thread);
|
||||
event_add_read(bm->master, bgp_accept, listener, accept_sock,
|
||||
&listener->thread);
|
||||
|
||||
/* Accept client connection. */
|
||||
bgp_sock = sockunion_accept(accept_sock, &su);
|
||||
@ -861,8 +861,8 @@ static int bgp_listener(int sock, struct sockaddr *sa, socklen_t salen,
|
||||
listener->bgp = bgp;
|
||||
|
||||
memcpy(&listener->su, sa, salen);
|
||||
thread_add_read(bm->master, bgp_accept, listener, sock,
|
||||
&listener->thread);
|
||||
event_add_read(bm->master, bgp_accept, listener, sock,
|
||||
&listener->thread);
|
||||
listnode_add(bm->listen_sockets, listener);
|
||||
|
||||
return 0;
|
||||
|
@ -811,8 +811,8 @@ void bgp_nht_interface_events(struct peer *peer)
|
||||
return;
|
||||
|
||||
if (bnc->ifindex)
|
||||
thread_add_event(bm->master, bgp_nht_ifp_initial, bnc->bgp,
|
||||
bnc->ifindex, NULL);
|
||||
event_add_event(bm->master, bgp_nht_ifp_initial, bnc->bgp,
|
||||
bnc->ifindex, NULL);
|
||||
}
|
||||
|
||||
void bgp_parse_nexthop_update(int command, vrf_id_t vrf_id)
|
||||
|
@ -2595,10 +2595,10 @@ static int bgp_route_refresh_receive(struct peer *peer, bgp_size_t size)
|
||||
}
|
||||
|
||||
if (peer_established(peer))
|
||||
thread_add_timer(bm->master,
|
||||
bgp_refresh_stalepath_timer_expire,
|
||||
paf, peer->bgp->stalepath_time,
|
||||
&peer->t_refresh_stalepath);
|
||||
event_add_timer(bm->master,
|
||||
bgp_refresh_stalepath_timer_expire, paf,
|
||||
peer->bgp->stalepath_time,
|
||||
&peer->t_refresh_stalepath);
|
||||
|
||||
if (bgp_debug_neighbor_events(peer))
|
||||
zlog_debug(
|
||||
@ -3021,9 +3021,9 @@ void bgp_process_packet(struct event *thread)
|
||||
frr_with_mutex (&peer->io_mtx) {
|
||||
// more work to do, come back later
|
||||
if (peer->ibuf->count > 0)
|
||||
thread_add_event(
|
||||
bm->master, bgp_process_packet, peer, 0,
|
||||
&peer->t_process_packet);
|
||||
event_add_event(bm->master, bgp_process_packet,
|
||||
peer, 0,
|
||||
&peer->t_process_packet);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -3281,7 +3281,7 @@ static void bgp_process_main_one(struct bgp *bgp, struct bgp_dest *dest,
|
||||
|
||||
if (!bgp->t_rmap_def_originate_eval) {
|
||||
bgp_lock(bgp);
|
||||
thread_add_timer(
|
||||
event_add_timer(
|
||||
bm->master,
|
||||
update_group_refresh_default_originate_route_map,
|
||||
bgp, RMAP_DEFAULT_ORIGINATE_EVAL_TIMER,
|
||||
@ -3433,7 +3433,7 @@ void bgp_best_path_select_defer(struct bgp *bgp, afi_t afi, safi_t safi)
|
||||
/* If there are more routes to be processed, start the
|
||||
* selection timer
|
||||
*/
|
||||
thread_add_timer(bm->master, bgp_route_select_timer_expire, thread_info,
|
||||
event_add_timer(bm->master, bgp_route_select_timer_expire, thread_info,
|
||||
BGP_ROUTE_SELECT_DELAY,
|
||||
&bgp->gr_info[afi][safi].t_route_select);
|
||||
}
|
||||
@ -5109,11 +5109,11 @@ void bgp_announce_route(struct peer *peer, afi_t afi, safi_t safi, bool force)
|
||||
* multiple peers and the announcement doesn't happen in the
|
||||
* vty context.
|
||||
*/
|
||||
thread_add_timer_msec(bm->master, bgp_announce_route_timer_expired, paf,
|
||||
(subgrp->peer_count == 1)
|
||||
? BGP_ANNOUNCE_ROUTE_SHORT_DELAY_MS
|
||||
: BGP_ANNOUNCE_ROUTE_DELAY_MS,
|
||||
&paf->t_announce_route);
|
||||
event_add_timer_msec(bm->master, bgp_announce_route_timer_expired, paf,
|
||||
(subgrp->peer_count == 1)
|
||||
? BGP_ANNOUNCE_ROUTE_SHORT_DELAY_MS
|
||||
: BGP_ANNOUNCE_ROUTE_DELAY_MS,
|
||||
&paf->t_announce_route);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -5263,8 +5263,8 @@ static void bgp_soft_reconfig_table_task(struct event *thread)
|
||||
*/
|
||||
if (dest || table->soft_reconfig_init) {
|
||||
table->soft_reconfig_init = false;
|
||||
thread_add_event(bm->master, bgp_soft_reconfig_table_task,
|
||||
table, 0, &table->soft_reconfig_thread);
|
||||
event_add_event(bm->master, bgp_soft_reconfig_table_task, table,
|
||||
0, &table->soft_reconfig_thread);
|
||||
return;
|
||||
}
|
||||
/* we're done, clean up the background iteration context info and
|
||||
@ -5365,9 +5365,9 @@ bool bgp_soft_reconfig_in(struct peer *peer, afi_t afi, safi_t safi)
|
||||
bgp_soft_reconfig_table_flag(table, true);
|
||||
|
||||
if (!table->soft_reconfig_thread)
|
||||
thread_add_event(bm->master,
|
||||
bgp_soft_reconfig_table_task, table, 0,
|
||||
&table->soft_reconfig_thread);
|
||||
event_add_event(bm->master,
|
||||
bgp_soft_reconfig_table_task, table, 0,
|
||||
&table->soft_reconfig_thread);
|
||||
/* Cancel bgp_announce_route_timer_expired threads.
|
||||
* bgp_announce_route_timer_expired threads have been scheduled
|
||||
* to announce routes as soon as the soft_reconfigure process
|
||||
|
@ -4361,9 +4361,8 @@ static void bgp_route_map_mark_update(const char *rmap_name)
|
||||
|
||||
/* rmap_update_timer of 0 means don't do route updates */
|
||||
if (bm->rmap_update_timer) {
|
||||
thread_add_timer(bm->master, bgp_route_map_update_timer,
|
||||
NULL, bm->rmap_update_timer,
|
||||
&bm->t_rmap_update);
|
||||
event_add_timer(bm->master, bgp_route_map_update_timer, NULL,
|
||||
bm->rmap_update_timer, &bm->t_rmap_update);
|
||||
|
||||
/* Signal the groups that a route-map update event has
|
||||
* started */
|
||||
|
@ -409,8 +409,8 @@ static void bgpd_sync_callback(struct event *thread)
|
||||
struct prefix prefix;
|
||||
struct pfx_record rec;
|
||||
|
||||
thread_add_read(bm->master, bgpd_sync_callback, NULL,
|
||||
rpki_sync_socket_bgpd, NULL);
|
||||
event_add_read(bm->master, bgpd_sync_callback, NULL,
|
||||
rpki_sync_socket_bgpd, NULL);
|
||||
|
||||
if (atomic_load_explicit(&rtr_update_overflow, memory_order_seq_cst)) {
|
||||
while (read(rpki_sync_socket_bgpd, &rec,
|
||||
@ -448,8 +448,8 @@ static void bgpd_sync_callback(struct event *thread)
|
||||
rrp->prefix = prefix;
|
||||
rrp->afi = afi;
|
||||
rrp->safi = safi;
|
||||
thread_add_event(bm->master, rpki_revalidate_prefix,
|
||||
rrp, 0, &bgp->t_revalidate[afi][safi]);
|
||||
event_add_event(bm->master, rpki_revalidate_prefix, rrp,
|
||||
0, &bgp->t_revalidate[afi][safi]);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -529,7 +529,7 @@ static void revalidate_all_routes(void)
|
||||
rvp->afi = afi;
|
||||
rvp->safi = safi;
|
||||
|
||||
thread_add_event(
|
||||
event_add_event(
|
||||
bm->master, bgp_rpki_revalidate_peer,
|
||||
rvp, 0,
|
||||
&peer->t_revalidate_all[afi][safi]);
|
||||
@ -580,8 +580,8 @@ static void rpki_init_sync_socket(void)
|
||||
}
|
||||
|
||||
|
||||
thread_add_read(bm->master, bgpd_sync_callback, NULL,
|
||||
rpki_sync_socket_bgpd, NULL);
|
||||
event_add_read(bm->master, bgpd_sync_callback, NULL,
|
||||
rpki_sync_socket_bgpd, NULL);
|
||||
|
||||
return;
|
||||
|
||||
@ -635,9 +635,9 @@ static void sync_expired(struct event *thread)
|
||||
{
|
||||
if (!rtr_mgr_conf_in_sync(rtr_config)) {
|
||||
RPKI_DEBUG("rtr_mgr is not synced, retrying.");
|
||||
thread_add_timer(bm->master, sync_expired, NULL,
|
||||
BGP_RPKI_CACHE_SERVER_SYNC_RETRY_TIMEOUT,
|
||||
&t_rpki_sync);
|
||||
event_add_timer(bm->master, sync_expired, NULL,
|
||||
BGP_RPKI_CACHE_SERVER_SYNC_RETRY_TIMEOUT,
|
||||
&t_rpki_sync);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -680,7 +680,7 @@ static int start(void)
|
||||
return ERROR;
|
||||
}
|
||||
|
||||
thread_add_timer(bm->master, sync_expired, NULL, 0, &t_rpki_sync);
|
||||
event_add_timer(bm->master, sync_expired, NULL, 0, &t_rpki_sync);
|
||||
|
||||
XFREE(MTYPE_BGP_RPKI_CACHE_GROUP, groups);
|
||||
|
||||
|
@ -1449,8 +1449,8 @@ bool update_subgroup_trigger_merge_check(struct update_subgroup *subgrp,
|
||||
return false;
|
||||
|
||||
subgrp->t_merge_check = NULL;
|
||||
thread_add_timer_msec(bm->master, update_subgroup_merge_check_thread_cb,
|
||||
subgrp, 0, &subgrp->t_merge_check);
|
||||
event_add_timer_msec(bm->master, update_subgroup_merge_check_thread_cb,
|
||||
subgrp, 0, &subgrp->t_merge_check);
|
||||
|
||||
SUBGRP_INCR_STAT(subgrp, merge_checks_triggered);
|
||||
|
||||
@ -2215,7 +2215,7 @@ void subgroup_trigger_write(struct update_subgroup *subgrp)
|
||||
*/
|
||||
SUBGRP_FOREACH_PEER (subgrp, paf)
|
||||
if (peer_established(paf->peer))
|
||||
thread_add_timer_msec(
|
||||
event_add_timer_msec(
|
||||
bm->master, bgp_generate_updgrp_packets,
|
||||
paf->peer, 0,
|
||||
&paf->peer->t_generate_updgrp_packets);
|
||||
|
@ -1021,9 +1021,9 @@ void subgroup_announce_all(struct update_subgroup *subgrp)
|
||||
* We should wait for the coalesce timer. Arm the timer if not done.
|
||||
*/
|
||||
if (!subgrp->t_coalesce) {
|
||||
thread_add_timer_msec(bm->master, subgroup_coalesce_timer,
|
||||
subgrp, subgrp->v_coalesce,
|
||||
&subgrp->t_coalesce);
|
||||
event_add_timer_msec(bm->master, subgroup_coalesce_timer,
|
||||
subgrp, subgrp->v_coalesce,
|
||||
&subgrp->t_coalesce);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -18784,8 +18784,8 @@ static void bgp_config_start(void)
|
||||
{
|
||||
#define BGP_PRE_CONFIG_MAX_WAIT_SECONDS 600
|
||||
THREAD_OFF(t_bgp_cfg);
|
||||
thread_add_timer(bm->master, bgp_config_finish, NULL,
|
||||
BGP_PRE_CONFIG_MAX_WAIT_SECONDS, &t_bgp_cfg);
|
||||
event_add_timer(bm->master, bgp_config_finish, NULL,
|
||||
BGP_PRE_CONFIG_MAX_WAIT_SECONDS, &t_bgp_cfg);
|
||||
}
|
||||
|
||||
/* When we receive a hook the configuration is read,
|
||||
@ -18812,8 +18812,8 @@ static void bgp_config_end(void)
|
||||
/* Start a new timer to make sure we don't send EoR
|
||||
* before route-maps are processed.
|
||||
*/
|
||||
thread_add_timer(bm->master, bgp_config_finish, NULL,
|
||||
bgp_post_config_delay, &t_bgp_cfg);
|
||||
event_add_timer(bm->master, bgp_config_finish, NULL,
|
||||
bgp_post_config_delay, &t_bgp_cfg);
|
||||
}
|
||||
|
||||
static int config_write_interface_one(struct vty *vty, struct vrf *vrf)
|
||||
|
@ -1082,8 +1082,8 @@ static void bgp_zebra_tm_connect(struct event *t)
|
||||
}
|
||||
}
|
||||
}
|
||||
thread_add_timer(bm->master, bgp_zebra_tm_connect, zclient, delay,
|
||||
&bgp_tm_thread_connect);
|
||||
event_add_timer(bm->master, bgp_zebra_tm_connect, zclient, delay,
|
||||
&bgp_tm_thread_connect);
|
||||
}
|
||||
|
||||
bool bgp_zebra_tm_chunk_obtained(void)
|
||||
@ -1113,8 +1113,8 @@ void bgp_zebra_init_tm_connect(struct bgp *bgp)
|
||||
bgp_tm_min = bgp_tm_max = 0;
|
||||
bgp_tm_chunk_size = BGP_FLOWSPEC_TABLE_CHUNK;
|
||||
bgp_tm_bgp = bgp;
|
||||
thread_add_timer(bm->master, bgp_zebra_tm_connect, zclient, delay,
|
||||
&bgp_tm_thread_connect);
|
||||
event_add_timer(bm->master, bgp_zebra_tm_connect, zclient, delay,
|
||||
&bgp_tm_thread_connect);
|
||||
}
|
||||
|
||||
int bgp_zebra_get_table_range(uint32_t chunk_size,
|
||||
|
@ -3355,8 +3355,8 @@ static struct bgp *bgp_create(as_t *as, const char *name,
|
||||
if (name)
|
||||
bgp->name = XSTRDUP(MTYPE_BGP, name);
|
||||
|
||||
thread_add_timer(bm->master, bgp_startup_timer_expire, bgp,
|
||||
bgp->restart_time, &bgp->t_startup);
|
||||
event_add_timer(bm->master, bgp_startup_timer_expire, bgp,
|
||||
bgp->restart_time, &bgp->t_startup);
|
||||
|
||||
/* printable name we can use in debug messages */
|
||||
if (inst_type == BGP_INSTANCE_TYPE_DEFAULT) {
|
||||
|
@ -2789,8 +2789,8 @@ rfapiBiStartWithdrawTimer(struct rfapi_import_table *import_table,
|
||||
if (lifetime > UINT32_MAX / 1001) {
|
||||
/* sub-optimal case, but will probably never happen */
|
||||
bpi->extra->vnc.import.timer = NULL;
|
||||
thread_add_timer(bm->master, timer_service_func, wcb, lifetime,
|
||||
&bpi->extra->vnc.import.timer);
|
||||
event_add_timer(bm->master, timer_service_func, wcb, lifetime,
|
||||
&bpi->extra->vnc.import.timer);
|
||||
} else {
|
||||
static uint32_t jitter;
|
||||
uint32_t lifetime_msec;
|
||||
@ -2805,9 +2805,9 @@ rfapiBiStartWithdrawTimer(struct rfapi_import_table *import_table,
|
||||
lifetime_msec = (lifetime * 1000) + jitter;
|
||||
|
||||
bpi->extra->vnc.import.timer = NULL;
|
||||
thread_add_timer_msec(bm->master, timer_service_func, wcb,
|
||||
lifetime_msec,
|
||||
&bpi->extra->vnc.import.timer);
|
||||
event_add_timer_msec(bm->master, timer_service_func, wcb,
|
||||
lifetime_msec,
|
||||
&bpi->extra->vnc.import.timer);
|
||||
}
|
||||
|
||||
/* re-sort route list (BGP_PATH_REMOVED routes are last) */
|
||||
|
@ -764,8 +764,8 @@ static void rfapiMonitorTimerRestart(struct rfapi_monitor_vpn *m)
|
||||
m->rfd->response_lifetime);
|
||||
}
|
||||
|
||||
thread_add_timer(bm->master, rfapiMonitorTimerExpire, m,
|
||||
m->rfd->response_lifetime, &m->timer);
|
||||
event_add_timer(bm->master, rfapiMonitorTimerExpire, m,
|
||||
m->rfd->response_lifetime, &m->timer);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1072,8 +1072,8 @@ static void rfapiMonitorEthTimerRestart(struct rfapi_monitor_eth *m)
|
||||
m->rfd->response_lifetime);
|
||||
}
|
||||
|
||||
thread_add_timer(bm->master, rfapiMonitorEthTimerExpire, m,
|
||||
m->rfd->response_lifetime, &m->timer);
|
||||
event_add_timer(bm->master, rfapiMonitorEthTimerExpire, m,
|
||||
m->rfd->response_lifetime, &m->timer);
|
||||
}
|
||||
|
||||
static int mon_eth_cmp(const void *a, const void *b)
|
||||
|
@ -345,8 +345,8 @@ static void rfapiRibStartTimer(struct rfapi_descriptor *rfd,
|
||||
vnc_zlog_debug_verbose("%s: rfd %p pfx %pRN life %u", __func__, rfd, rn,
|
||||
ri->lifetime);
|
||||
|
||||
thread_add_timer(bm->master, rfapiRibExpireTimer, tcb, ri->lifetime,
|
||||
&ri->timer);
|
||||
event_add_timer(bm->master, rfapiRibExpireTimer, tcb, ri->lifetime,
|
||||
&ri->timer);
|
||||
}
|
||||
|
||||
extern void rfapi_rib_key_init(struct prefix *prefix, /* may be NULL */
|
||||
|
@ -1765,8 +1765,8 @@ void vnc_direct_bgp_rh_del_route(struct bgp *bgp, afi_t afi,
|
||||
|
||||
if (!eti->timer && eti->lifetime <= INT32_MAX) {
|
||||
eti->timer = NULL;
|
||||
thread_add_timer(bm->master, vncExportWithdrawTimer, eti,
|
||||
eti->lifetime, &eti->timer);
|
||||
event_add_timer(bm->master, vncExportWithdrawTimer, eti,
|
||||
eti->lifetime, &eti->timer);
|
||||
vnc_zlog_debug_verbose(
|
||||
"%s: set expiration timer for %u seconds", __func__,
|
||||
eti->lifetime);
|
||||
|
@ -95,7 +95,7 @@ irrelevant for the time being) for the specific type. For example, to add a
|
||||
|
||||
::
|
||||
|
||||
thread_add_read(struct thread_master *master, int (*handler)(struct event *), void *arg, int fd, struct event **ref);
|
||||
event_add_read(struct thread_master *master, int (*handler)(struct event *), void *arg, int fd, struct event **ref);
|
||||
|
||||
The ``struct event`` is then created and added to the appropriate internal
|
||||
datastructure within the ``threadmaster``. Note that the ``READ`` and
|
||||
@ -137,14 +137,14 @@ Mapping the general names used in the figure to specific FRR functions:
|
||||
- ``fetch`` is ``thread_fetch()``
|
||||
- ``exec()`` is ``thread_call``
|
||||
- ``cancel()`` is ``thread_cancel()``
|
||||
- ``schedule()`` is any of the various task-specific ``thread_add_*`` functions
|
||||
- ``schedule()`` is any of the various task-specific ``event_add_*`` functions
|
||||
|
||||
Adding tasks is done with various task-specific function-like macros. These
|
||||
macros wrap underlying functions in :file:`thread.c` to provide additional
|
||||
information added at compile time, such as the line number the task was
|
||||
scheduled from, that can be accessed at runtime for debugging, logging and
|
||||
informational purposes. Each task type has its own specific scheduling function
|
||||
that follow the naming convention ``thread_add_<type>``; see :file:`event.h`
|
||||
that follow the naming convention ``event_add_<type>``; see :file:`event.h`
|
||||
for details.
|
||||
|
||||
There are some gotchas to keep in mind:
|
||||
|
@ -114,8 +114,8 @@ void eigrp_distribute_update(struct distribute_ctx *ctx,
|
||||
thread_cancel(&(e->t_distribute));
|
||||
|
||||
/* schedule Graceful restart for whole process in 10sec */
|
||||
thread_add_timer(master, eigrp_distribute_timer_process, e,
|
||||
(10), &e->t_distribute);
|
||||
event_add_timer(master, eigrp_distribute_timer_process, e, (10),
|
||||
&e->t_distribute);
|
||||
|
||||
return;
|
||||
}
|
||||
@ -188,8 +188,8 @@ void eigrp_distribute_update(struct distribute_ctx *ctx,
|
||||
/* Cancel GR scheduled */
|
||||
thread_cancel(&(ei->t_distribute));
|
||||
/* schedule Graceful restart for interface in 10sec */
|
||||
thread_add_timer(master, eigrp_distribute_timer_interface, ei, 10,
|
||||
&ei->t_distribute);
|
||||
event_add_timer(master, eigrp_distribute_timer_interface, ei, 10,
|
||||
&ei->t_distribute);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -80,8 +80,8 @@ void eigrp_hello_timer(struct event *thread)
|
||||
eigrp_hello_send(ei, EIGRP_HELLO_NORMAL, NULL);
|
||||
|
||||
/* Hello timer set. */
|
||||
thread_add_timer(master, eigrp_hello_timer, ei, ei->params.v_hello,
|
||||
&ei->t_hello);
|
||||
event_add_timer(master, eigrp_hello_timer, ei, ei->params.v_hello,
|
||||
&ei->t_hello);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -726,8 +726,8 @@ void eigrp_hello_send_ack(struct eigrp_neighbor *nbr)
|
||||
listnode_add(nbr->ei->eigrp->oi_write_q, nbr->ei);
|
||||
nbr->ei->on_write_q = 1;
|
||||
}
|
||||
thread_add_write(master, eigrp_write, nbr->ei->eigrp,
|
||||
nbr->ei->eigrp->fd, &nbr->ei->eigrp->t_write);
|
||||
event_add_write(master, eigrp_write, nbr->ei->eigrp,
|
||||
nbr->ei->eigrp->fd, &nbr->ei->eigrp->t_write);
|
||||
}
|
||||
}
|
||||
|
||||
@ -774,9 +774,9 @@ void eigrp_hello_send(struct eigrp_interface *ei, uint8_t flags,
|
||||
thread_execute(master, eigrp_write, ei->eigrp,
|
||||
ei->eigrp->fd);
|
||||
} else {
|
||||
thread_add_write(master, eigrp_write, ei->eigrp,
|
||||
ei->eigrp->fd,
|
||||
&ei->eigrp->t_write);
|
||||
event_add_write(master, eigrp_write, ei->eigrp,
|
||||
ei->eigrp->fd,
|
||||
&ei->eigrp->t_write);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -251,7 +251,7 @@ int eigrp_if_up(struct eigrp_interface *ei)
|
||||
/* Set multicast memberships appropriately for new state. */
|
||||
eigrp_if_set_multicast(ei);
|
||||
|
||||
thread_add_event(master, eigrp_hello_timer, ei, (1), &ei->t_hello);
|
||||
event_add_event(master, eigrp_hello_timer, ei, (1), &ei->t_hello);
|
||||
|
||||
/*Prepare metrics*/
|
||||
metric.bandwidth = eigrp_bandwidth_to_scaled(ei->params.bandwidth);
|
||||
|
@ -27,6 +27,6 @@
|
||||
|
||||
/* FSM macros*/
|
||||
#define EIGRP_FSM_EVENT_SCHEDULE(I, E) \
|
||||
thread_add_event(master, eigrp_fsm_event, (I), (E))
|
||||
event_add_event(master, eigrp_fsm_event, (I), (E))
|
||||
|
||||
#endif /* _ZEBRA_EIGRP_MACROS_H_ */
|
||||
|
@ -261,15 +261,15 @@ void eigrp_nbr_state_update(struct eigrp_neighbor *nbr)
|
||||
case EIGRP_NEIGHBOR_PENDING: {
|
||||
/*Reset Hold Down Timer for neighbor*/
|
||||
THREAD_OFF(nbr->t_holddown);
|
||||
thread_add_timer(master, holddown_timer_expired, nbr,
|
||||
nbr->v_holddown, &nbr->t_holddown);
|
||||
event_add_timer(master, holddown_timer_expired, nbr,
|
||||
nbr->v_holddown, &nbr->t_holddown);
|
||||
break;
|
||||
}
|
||||
case EIGRP_NEIGHBOR_UP: {
|
||||
/*Reset Hold Down Timer for neighbor*/
|
||||
THREAD_OFF(nbr->t_holddown);
|
||||
thread_add_timer(master, holddown_timer_expired, nbr,
|
||||
nbr->v_holddown, &nbr->t_holddown);
|
||||
event_add_timer(master, holddown_timer_expired, nbr,
|
||||
nbr->v_holddown, &nbr->t_holddown);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -453,8 +453,8 @@ out:
|
||||
|
||||
/* If packets still remain in queue, call write thread. */
|
||||
if (!list_isempty(eigrp->oi_write_q)) {
|
||||
thread_add_write(master, eigrp_write, eigrp, eigrp->fd,
|
||||
&eigrp->t_write);
|
||||
event_add_write(master, eigrp_write, eigrp, eigrp->fd,
|
||||
&eigrp->t_write);
|
||||
}
|
||||
}
|
||||
|
||||
@ -477,7 +477,7 @@ void eigrp_read(struct event *thread)
|
||||
eigrp = THREAD_ARG(thread);
|
||||
|
||||
/* prepare for next packet. */
|
||||
thread_add_read(master, eigrp_read, eigrp, eigrp->fd, &eigrp->t_read);
|
||||
event_add_read(master, eigrp_read, eigrp, eigrp->fd, &eigrp->t_read);
|
||||
|
||||
stream_reset(eigrp->ibuf);
|
||||
if (!(ibuf = eigrp_recv_packet(eigrp, eigrp->fd, &ifp, eigrp->ibuf))) {
|
||||
@ -828,9 +828,9 @@ void eigrp_send_packet_reliably(struct eigrp_neighbor *nbr)
|
||||
eigrp_fifo_push(nbr->ei->obuf, duplicate);
|
||||
|
||||
/*Start retransmission timer*/
|
||||
thread_add_timer(master, eigrp_unack_packet_retrans, nbr,
|
||||
EIGRP_PACKET_RETRANS_TIME,
|
||||
&ep->t_retrans_timer);
|
||||
event_add_timer(master, eigrp_unack_packet_retrans, nbr,
|
||||
EIGRP_PACKET_RETRANS_TIME,
|
||||
&ep->t_retrans_timer);
|
||||
|
||||
/*Increment sequence number counter*/
|
||||
nbr->ei->eigrp->sequence_number++;
|
||||
@ -840,8 +840,8 @@ void eigrp_send_packet_reliably(struct eigrp_neighbor *nbr)
|
||||
listnode_add(nbr->ei->eigrp->oi_write_q, nbr->ei);
|
||||
nbr->ei->on_write_q = 1;
|
||||
}
|
||||
thread_add_write(master, eigrp_write, nbr->ei->eigrp,
|
||||
nbr->ei->eigrp->fd, &nbr->ei->eigrp->t_write);
|
||||
event_add_write(master, eigrp_write, nbr->ei->eigrp,
|
||||
nbr->ei->eigrp->fd, &nbr->ei->eigrp->t_write);
|
||||
}
|
||||
}
|
||||
|
||||
@ -992,17 +992,17 @@ void eigrp_unack_packet_retrans(struct event *thread)
|
||||
}
|
||||
|
||||
/*Start retransmission timer*/
|
||||
thread_add_timer(master, eigrp_unack_packet_retrans, nbr,
|
||||
EIGRP_PACKET_RETRANS_TIME,
|
||||
&ep->t_retrans_timer);
|
||||
event_add_timer(master, eigrp_unack_packet_retrans, nbr,
|
||||
EIGRP_PACKET_RETRANS_TIME,
|
||||
&ep->t_retrans_timer);
|
||||
|
||||
/* Hook thread to write packet. */
|
||||
if (nbr->ei->on_write_q == 0) {
|
||||
listnode_add(nbr->ei->eigrp->oi_write_q, nbr->ei);
|
||||
nbr->ei->on_write_q = 1;
|
||||
}
|
||||
thread_add_write(master, eigrp_write, nbr->ei->eigrp,
|
||||
nbr->ei->eigrp->fd, &nbr->ei->eigrp->t_write);
|
||||
event_add_write(master, eigrp_write, nbr->ei->eigrp,
|
||||
nbr->ei->eigrp->fd, &nbr->ei->eigrp->t_write);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1027,17 +1027,17 @@ void eigrp_unack_multicast_packet_retrans(struct event *thread)
|
||||
}
|
||||
|
||||
/*Start retransmission timer*/
|
||||
thread_add_timer(master, eigrp_unack_multicast_packet_retrans,
|
||||
nbr, EIGRP_PACKET_RETRANS_TIME,
|
||||
&ep->t_retrans_timer);
|
||||
event_add_timer(master, eigrp_unack_multicast_packet_retrans,
|
||||
nbr, EIGRP_PACKET_RETRANS_TIME,
|
||||
&ep->t_retrans_timer);
|
||||
|
||||
/* Hook thread to write packet. */
|
||||
if (nbr->ei->on_write_q == 0) {
|
||||
listnode_add(nbr->ei->eigrp->oi_write_q, nbr->ei);
|
||||
nbr->ei->on_write_q = 1;
|
||||
}
|
||||
thread_add_write(master, eigrp_write, nbr->ei->eigrp,
|
||||
nbr->ei->eigrp->fd, &nbr->ei->eigrp->t_write);
|
||||
event_add_write(master, eigrp_write, nbr->ei->eigrp,
|
||||
nbr->ei->eigrp->fd, &nbr->ei->eigrp->t_write);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -906,8 +906,8 @@ void eigrp_update_send_GR_thread(struct event *thread)
|
||||
/* if there is packet waiting in queue,
|
||||
* schedule this thread again with small delay */
|
||||
if (nbr->retrans_queue->count > 0) {
|
||||
thread_add_timer_msec(master, eigrp_update_send_GR_thread, nbr,
|
||||
10, &nbr->t_nbr_send_gr);
|
||||
event_add_timer_msec(master, eigrp_update_send_GR_thread, nbr,
|
||||
10, &nbr->t_nbr_send_gr);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -157,7 +157,7 @@ static struct eigrp *eigrp_new(uint16_t as, vrf_id_t vrf_id)
|
||||
|
||||
eigrp->ibuf = stream_new(EIGRP_PACKET_MAX_LEN + 1);
|
||||
|
||||
thread_add_read(master, eigrp_read, eigrp, eigrp->fd, &eigrp->t_read);
|
||||
event_add_read(master, eigrp_read, eigrp, eigrp->fd, &eigrp->t_read);
|
||||
eigrp->oi_write_q = list_new();
|
||||
|
||||
eigrp->topology_table = route_table_init();
|
||||
|
@ -267,8 +267,8 @@ void fabricd_initial_sync_hello(struct isis_circuit *circuit)
|
||||
if (f->initial_sync_timeout)
|
||||
return;
|
||||
|
||||
thread_add_timer(master, fabricd_initial_sync_timeout, f,
|
||||
timeout, &f->initial_sync_timeout);
|
||||
event_add_timer(master, fabricd_initial_sync_timeout, f, timeout,
|
||||
&f->initial_sync_timeout);
|
||||
f->initial_sync_start = monotime(NULL);
|
||||
|
||||
if (IS_DEBUG_ADJ_PACKETS)
|
||||
@ -408,10 +408,9 @@ static void fabricd_tier_calculation_cb(struct event *thread)
|
||||
zlog_info("OpenFabric: Got tier %hhu from algorithm. Arming timer.",
|
||||
tier);
|
||||
f->tier_pending = tier;
|
||||
thread_add_timer(master, fabricd_tier_set_timer, f,
|
||||
f->area->lsp_gen_interval[ISIS_LEVEL2 - 1],
|
||||
&f->tier_set_timer);
|
||||
|
||||
event_add_timer(master, fabricd_tier_set_timer, f,
|
||||
f->area->lsp_gen_interval[ISIS_LEVEL2 - 1],
|
||||
&f->tier_set_timer);
|
||||
}
|
||||
|
||||
static void fabricd_bump_tier_calculation_timer(struct fabricd *f)
|
||||
@ -427,9 +426,9 @@ static void fabricd_bump_tier_calculation_timer(struct fabricd *f)
|
||||
* the calculation */
|
||||
THREAD_OFF(f->tier_calculation_timer);
|
||||
|
||||
thread_add_timer(master, fabricd_tier_calculation_cb, f,
|
||||
2 * f->area->lsp_gen_interval[ISIS_LEVEL2 - 1],
|
||||
&f->tier_calculation_timer);
|
||||
event_add_timer(master, fabricd_tier_calculation_cb, f,
|
||||
2 * f->area->lsp_gen_interval[ISIS_LEVEL2 - 1],
|
||||
&f->tier_calculation_timer);
|
||||
}
|
||||
|
||||
static void fabricd_set_tier(struct fabricd *f, uint8_t tier)
|
||||
@ -711,9 +710,9 @@ void fabricd_trigger_csnp(struct isis_area *area, bool circuit_scoped)
|
||||
continue;
|
||||
|
||||
THREAD_OFF(circuit->t_send_csnp[ISIS_LEVEL2 - 1]);
|
||||
thread_add_timer_msec(master, send_l2_csnp, circuit,
|
||||
isis_jitter(f->csnp_delay, CSNP_JITTER),
|
||||
&circuit->t_send_csnp[ISIS_LEVEL2 - 1]);
|
||||
event_add_timer_msec(master, send_l2_csnp, circuit,
|
||||
isis_jitter(f->csnp_delay, CSNP_JITTER),
|
||||
&circuit->t_send_csnp[ISIS_LEVEL2 - 1]);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -396,13 +396,13 @@ void isis_adj_state_change(struct isis_adjacency **padj,
|
||||
adj->flaps++;
|
||||
|
||||
if (level == IS_LEVEL_1) {
|
||||
thread_add_timer(master, send_l1_csnp,
|
||||
circuit, 0,
|
||||
&circuit->t_send_csnp[0]);
|
||||
event_add_timer(
|
||||
master, send_l1_csnp, circuit,
|
||||
0, &circuit->t_send_csnp[0]);
|
||||
} else {
|
||||
thread_add_timer(master, send_l2_csnp,
|
||||
circuit, 0,
|
||||
&circuit->t_send_csnp[1]);
|
||||
event_add_timer(
|
||||
master, send_l2_csnp, circuit,
|
||||
0, &circuit->t_send_csnp[1]);
|
||||
}
|
||||
} else if (old_state == ISIS_ADJ_UP) {
|
||||
circuit->upadjcount[level - 1]--;
|
||||
|
@ -627,12 +627,12 @@ void isis_circuit_stream(struct isis_circuit *circuit, struct stream **stream)
|
||||
void isis_circuit_prepare(struct isis_circuit *circuit)
|
||||
{
|
||||
#if ISIS_METHOD != ISIS_METHOD_DLPI
|
||||
thread_add_read(master, isis_receive, circuit, circuit->fd,
|
||||
&circuit->t_read);
|
||||
event_add_read(master, isis_receive, circuit, circuit->fd,
|
||||
&circuit->t_read);
|
||||
#else
|
||||
thread_add_timer_msec(master, isis_receive, circuit,
|
||||
listcount(circuit->area->circuit_list) * 100,
|
||||
&circuit->t_read);
|
||||
event_add_timer_msec(master, isis_receive, circuit,
|
||||
listcount(circuit->area->circuit_list) * 100,
|
||||
&circuit->t_read);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -722,10 +722,10 @@ int isis_circuit_up(struct isis_circuit *circuit)
|
||||
send_hello_sched(circuit, level, TRIGGERED_IIH_DELAY);
|
||||
circuit->u.bc.lan_neighs[level - 1] = list_new();
|
||||
|
||||
thread_add_timer(master, isis_run_dr,
|
||||
&circuit->level_arg[level - 1],
|
||||
2 * circuit->hello_interval[level - 1],
|
||||
&circuit->u.bc.t_run_dr[level - 1]);
|
||||
event_add_timer(master, isis_run_dr,
|
||||
&circuit->level_arg[level - 1],
|
||||
2 * circuit->hello_interval[level - 1],
|
||||
&circuit->u.bc.t_run_dr[level - 1]);
|
||||
}
|
||||
|
||||
/* 8.4.1 b) FIXME: solicit ES - 8.4.6 */
|
||||
@ -740,13 +740,13 @@ int isis_circuit_up(struct isis_circuit *circuit)
|
||||
|
||||
/* initializing PSNP timers */
|
||||
if (circuit->is_type & IS_LEVEL_1)
|
||||
thread_add_timer(
|
||||
event_add_timer(
|
||||
master, send_l1_psnp, circuit,
|
||||
isis_jitter(circuit->psnp_interval[0], PSNP_JITTER),
|
||||
&circuit->t_send_psnp[0]);
|
||||
|
||||
if (circuit->is_type & IS_LEVEL_2)
|
||||
thread_add_timer(
|
||||
event_add_timer(
|
||||
master, send_l2_psnp, circuit,
|
||||
isis_jitter(circuit->psnp_interval[1], PSNP_JITTER),
|
||||
&circuit->t_send_psnp[1]);
|
||||
|
@ -223,29 +223,27 @@ int isis_dr_resign(struct isis_circuit *circuit, int level)
|
||||
if (level == 1) {
|
||||
memset(circuit->u.bc.l1_desig_is, 0, ISIS_SYS_ID_LEN + 1);
|
||||
|
||||
thread_add_timer(master, send_l1_psnp, circuit,
|
||||
isis_jitter(circuit->psnp_interval[level - 1],
|
||||
PSNP_JITTER),
|
||||
&circuit->t_send_psnp[0]);
|
||||
event_add_timer(master, send_l1_psnp, circuit,
|
||||
isis_jitter(circuit->psnp_interval[level - 1],
|
||||
PSNP_JITTER),
|
||||
&circuit->t_send_psnp[0]);
|
||||
} else {
|
||||
memset(circuit->u.bc.l2_desig_is, 0, ISIS_SYS_ID_LEN + 1);
|
||||
|
||||
thread_add_timer(master, send_l2_psnp, circuit,
|
||||
isis_jitter(circuit->psnp_interval[level - 1],
|
||||
PSNP_JITTER),
|
||||
&circuit->t_send_psnp[1]);
|
||||
event_add_timer(master, send_l2_psnp, circuit,
|
||||
isis_jitter(circuit->psnp_interval[level - 1],
|
||||
PSNP_JITTER),
|
||||
&circuit->t_send_psnp[1]);
|
||||
}
|
||||
|
||||
THREAD_OFF(circuit->t_send_csnp[level - 1]);
|
||||
|
||||
thread_add_timer(master, isis_run_dr,
|
||||
&circuit->level_arg[level - 1],
|
||||
2 * circuit->hello_interval[level - 1],
|
||||
&circuit->u.bc.t_run_dr[level - 1]);
|
||||
event_add_timer(master, isis_run_dr, &circuit->level_arg[level - 1],
|
||||
2 * circuit->hello_interval[level - 1],
|
||||
&circuit->u.bc.t_run_dr[level - 1]);
|
||||
|
||||
|
||||
thread_add_event(master, isis_event_dis_status_change, circuit, 0,
|
||||
NULL);
|
||||
event_add_event(master, isis_event_dis_status_change, circuit, 0, NULL);
|
||||
|
||||
return ISIS_OK;
|
||||
}
|
||||
@ -276,10 +274,10 @@ int isis_dr_commence(struct isis_circuit *circuit, int level)
|
||||
assert(circuit->circuit_id); /* must be non-zero */
|
||||
lsp_generate_pseudo(circuit, 1);
|
||||
|
||||
thread_add_timer(master, send_l1_csnp, circuit,
|
||||
isis_jitter(circuit->csnp_interval[level - 1],
|
||||
CSNP_JITTER),
|
||||
&circuit->t_send_csnp[0]);
|
||||
event_add_timer(master, send_l1_csnp, circuit,
|
||||
isis_jitter(circuit->csnp_interval[level - 1],
|
||||
CSNP_JITTER),
|
||||
&circuit->t_send_csnp[0]);
|
||||
|
||||
} else {
|
||||
memcpy(old_dr, circuit->u.bc.l2_desig_is, ISIS_SYS_ID_LEN + 1);
|
||||
@ -296,18 +294,16 @@ int isis_dr_commence(struct isis_circuit *circuit, int level)
|
||||
assert(circuit->circuit_id); /* must be non-zero */
|
||||
lsp_generate_pseudo(circuit, 2);
|
||||
|
||||
thread_add_timer(master, send_l2_csnp, circuit,
|
||||
isis_jitter(circuit->csnp_interval[level - 1],
|
||||
CSNP_JITTER),
|
||||
&circuit->t_send_csnp[1]);
|
||||
event_add_timer(master, send_l2_csnp, circuit,
|
||||
isis_jitter(circuit->csnp_interval[level - 1],
|
||||
CSNP_JITTER),
|
||||
&circuit->t_send_csnp[1]);
|
||||
}
|
||||
|
||||
thread_add_timer(master, isis_run_dr,
|
||||
&circuit->level_arg[level - 1],
|
||||
2 * circuit->hello_interval[level - 1],
|
||||
&circuit->u.bc.t_run_dr[level - 1]);
|
||||
thread_add_event(master, isis_event_dis_status_change, circuit, 0,
|
||||
NULL);
|
||||
event_add_timer(master, isis_run_dr, &circuit->level_arg[level - 1],
|
||||
2 * circuit->hello_interval[level - 1],
|
||||
&circuit->u.bc.t_run_dr[level - 1]);
|
||||
event_add_event(master, isis_event_dis_status_change, circuit, 0, NULL);
|
||||
|
||||
return ISIS_OK;
|
||||
}
|
||||
|
@ -35,8 +35,8 @@ void dyn_cache_init(struct isis *isis)
|
||||
{
|
||||
isis->dyn_cache = list_new();
|
||||
if (!CHECK_FLAG(im->options, F_ISIS_UNIT_TEST))
|
||||
thread_add_timer(master, dyn_cache_cleanup, isis, 120,
|
||||
&isis->t_dync_clean);
|
||||
event_add_timer(master, dyn_cache_cleanup, isis, 120,
|
||||
&isis->t_dync_clean);
|
||||
}
|
||||
|
||||
void dyn_cache_finish(struct isis *isis)
|
||||
@ -72,7 +72,7 @@ static void dyn_cache_cleanup(struct event *thread)
|
||||
XFREE(MTYPE_ISIS_DYNHN, dyn);
|
||||
}
|
||||
|
||||
thread_add_timer(master, dyn_cache_cleanup, isis, 120,
|
||||
event_add_timer(master, dyn_cache_cleanup, isis, 120,
|
||||
&isis->t_dync_clean);
|
||||
}
|
||||
|
||||
|
@ -63,23 +63,23 @@ static void circuit_commence_level(struct isis_circuit *circuit, int level)
|
||||
|
||||
if (!circuit->is_passive) {
|
||||
if (level == 1) {
|
||||
thread_add_timer(master, send_l1_psnp, circuit,
|
||||
isis_jitter(circuit->psnp_interval[0],
|
||||
PSNP_JITTER),
|
||||
&circuit->t_send_psnp[0]);
|
||||
event_add_timer(master, send_l1_psnp, circuit,
|
||||
isis_jitter(circuit->psnp_interval[0],
|
||||
PSNP_JITTER),
|
||||
&circuit->t_send_psnp[0]);
|
||||
} else {
|
||||
thread_add_timer(master, send_l2_psnp, circuit,
|
||||
isis_jitter(circuit->psnp_interval[1],
|
||||
PSNP_JITTER),
|
||||
&circuit->t_send_psnp[1]);
|
||||
event_add_timer(master, send_l2_psnp, circuit,
|
||||
isis_jitter(circuit->psnp_interval[1],
|
||||
PSNP_JITTER),
|
||||
&circuit->t_send_psnp[1]);
|
||||
}
|
||||
}
|
||||
|
||||
if (circuit->circ_type == CIRCUIT_T_BROADCAST) {
|
||||
thread_add_timer(master, isis_run_dr,
|
||||
&circuit->level_arg[level - 1],
|
||||
2 * circuit->hello_interval[level - 1],
|
||||
&circuit->u.bc.t_run_dr[level - 1]);
|
||||
event_add_timer(master, isis_run_dr,
|
||||
&circuit->level_arg[level - 1],
|
||||
2 * circuit->hello_interval[level - 1],
|
||||
&circuit->u.bc.t_run_dr[level - 1]);
|
||||
|
||||
send_hello_sched(circuit, level, TRIGGERED_IIH_DELAY);
|
||||
circuit->u.bc.lan_neighs[level - 1] = list_new();
|
||||
|
@ -373,9 +373,8 @@ void isis_ldp_sync_holddown_timer_add(struct isis_circuit *circuit)
|
||||
ils_debug("%s: start holddown timer for %s time %d", __func__,
|
||||
circuit->interface->name, ldp_sync_info->holddown);
|
||||
|
||||
thread_add_timer(master, isis_ldp_sync_holddown_timer,
|
||||
circuit, ldp_sync_info->holddown,
|
||||
&ldp_sync_info->t_holddown);
|
||||
event_add_timer(master, isis_ldp_sync_holddown_timer, circuit,
|
||||
ldp_sync_info->holddown, &ldp_sync_info->t_holddown);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1507,8 +1507,8 @@ int isis_rlfa_activate(struct isis_spftree *spftree, struct rlfa *rlfa,
|
||||
spftree->lfa.protection_counters.rlfa[vertex->N.ip.priority] += 1;
|
||||
|
||||
THREAD_OFF(area->t_rlfa_rib_update);
|
||||
thread_add_timer(master, isis_area_verify_routes_cb, area, 2,
|
||||
&area->t_rlfa_rib_update);
|
||||
event_add_timer(master, isis_area_verify_routes_cb, area, 2,
|
||||
&area->t_rlfa_rib_update);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1526,8 +1526,8 @@ void isis_rlfa_deactivate(struct isis_spftree *spftree, struct rlfa *rlfa)
|
||||
spftree->lfa.protection_counters.rlfa[vertex->N.ip.priority] -= 1;
|
||||
|
||||
THREAD_OFF(area->t_rlfa_rib_update);
|
||||
thread_add_timer(master, isis_area_verify_routes_cb, area, 2,
|
||||
&area->t_rlfa_rib_update);
|
||||
event_add_timer(master, isis_area_verify_routes_cb, area, 2,
|
||||
&area->t_rlfa_rib_update);
|
||||
}
|
||||
|
||||
void isis_rlfa_list_init(struct isis_spftree *spftree)
|
||||
|
@ -1384,9 +1384,9 @@ int lsp_generate(struct isis_area *area, int level)
|
||||
overload_time = isis_restart_read_overload_time(area);
|
||||
if (overload_time > 0) {
|
||||
isis_area_overload_bit_set(area, true);
|
||||
thread_add_timer(master, set_overload_on_start_timer,
|
||||
area, overload_time,
|
||||
&area->t_overload_on_startup_timer);
|
||||
event_add_timer(master, set_overload_on_start_timer,
|
||||
area, overload_time,
|
||||
&area->t_overload_on_startup_timer);
|
||||
}
|
||||
device_startup = false;
|
||||
}
|
||||
@ -1420,9 +1420,8 @@ int lsp_generate(struct isis_area *area, int level)
|
||||
|
||||
THREAD_OFF(area->t_lsp_refresh[level - 1]);
|
||||
area->lsp_regenerate_pending[level - 1] = 0;
|
||||
thread_add_timer(master, lsp_refresh,
|
||||
&area->lsp_refresh_arg[level - 1], refresh_time,
|
||||
&area->t_lsp_refresh[level - 1]);
|
||||
event_add_timer(master, lsp_refresh, &area->lsp_refresh_arg[level - 1],
|
||||
refresh_time, &area->t_lsp_refresh[level - 1]);
|
||||
|
||||
if (IS_DEBUG_UPDATE_PACKETS) {
|
||||
zlog_debug("ISIS-Upd (%s): Building L%d LSP %s, len %hu, seq 0x%08x, cksum 0x%04hx, lifetime %hus refresh %hus",
|
||||
@ -1501,9 +1500,8 @@ static int lsp_regenerate(struct isis_area *area, int level)
|
||||
lsp_seqno_update(lsp);
|
||||
|
||||
refresh_time = lsp_refresh_time(lsp, rem_lifetime);
|
||||
thread_add_timer(master, lsp_refresh,
|
||||
&area->lsp_refresh_arg[level - 1], refresh_time,
|
||||
&area->t_lsp_refresh[level - 1]);
|
||||
event_add_timer(master, lsp_refresh, &area->lsp_refresh_arg[level - 1],
|
||||
refresh_time, &area->t_lsp_refresh[level - 1]);
|
||||
area->lsp_regenerate_pending[level - 1] = 0;
|
||||
|
||||
if (IS_DEBUG_UPDATE_PACKETS) {
|
||||
@ -1668,10 +1666,9 @@ int _lsp_regenerate_schedule(struct isis_area *area, int level,
|
||||
}
|
||||
|
||||
area->lsp_regenerate_pending[lvl - 1] = 1;
|
||||
thread_add_timer_msec(master, lsp_refresh,
|
||||
&area->lsp_refresh_arg[lvl - 1],
|
||||
timeout,
|
||||
&area->t_lsp_refresh[lvl - 1]);
|
||||
event_add_timer_msec(master, lsp_refresh,
|
||||
&area->lsp_refresh_arg[lvl - 1], timeout,
|
||||
&area->t_lsp_refresh[lvl - 1]);
|
||||
}
|
||||
|
||||
if (all_pseudo) {
|
||||
@ -1825,13 +1822,13 @@ int lsp_generate_pseudo(struct isis_circuit *circuit, int level)
|
||||
THREAD_OFF(circuit->u.bc.t_refresh_pseudo_lsp[level - 1]);
|
||||
circuit->lsp_regenerate_pending[level - 1] = 0;
|
||||
if (level == IS_LEVEL_1)
|
||||
thread_add_timer(
|
||||
master, lsp_l1_refresh_pseudo, circuit, refresh_time,
|
||||
&circuit->u.bc.t_refresh_pseudo_lsp[level - 1]);
|
||||
event_add_timer(master, lsp_l1_refresh_pseudo, circuit,
|
||||
refresh_time,
|
||||
&circuit->u.bc.t_refresh_pseudo_lsp[level - 1]);
|
||||
else if (level == IS_LEVEL_2)
|
||||
thread_add_timer(
|
||||
master, lsp_l2_refresh_pseudo, circuit, refresh_time,
|
||||
&circuit->u.bc.t_refresh_pseudo_lsp[level - 1]);
|
||||
event_add_timer(master, lsp_l2_refresh_pseudo, circuit,
|
||||
refresh_time,
|
||||
&circuit->u.bc.t_refresh_pseudo_lsp[level - 1]);
|
||||
|
||||
if (IS_DEBUG_UPDATE_PACKETS) {
|
||||
zlog_debug(
|
||||
@ -1880,13 +1877,13 @@ static int lsp_regenerate_pseudo(struct isis_circuit *circuit, int level)
|
||||
|
||||
refresh_time = lsp_refresh_time(lsp, rem_lifetime);
|
||||
if (level == IS_LEVEL_1)
|
||||
thread_add_timer(
|
||||
master, lsp_l1_refresh_pseudo, circuit, refresh_time,
|
||||
&circuit->u.bc.t_refresh_pseudo_lsp[level - 1]);
|
||||
event_add_timer(master, lsp_l1_refresh_pseudo, circuit,
|
||||
refresh_time,
|
||||
&circuit->u.bc.t_refresh_pseudo_lsp[level - 1]);
|
||||
else if (level == IS_LEVEL_2)
|
||||
thread_add_timer(
|
||||
master, lsp_l2_refresh_pseudo, circuit, refresh_time,
|
||||
&circuit->u.bc.t_refresh_pseudo_lsp[level - 1]);
|
||||
event_add_timer(master, lsp_l2_refresh_pseudo, circuit,
|
||||
refresh_time,
|
||||
&circuit->u.bc.t_refresh_pseudo_lsp[level - 1]);
|
||||
|
||||
if (IS_DEBUG_UPDATE_PACKETS) {
|
||||
zlog_debug(
|
||||
@ -2032,11 +2029,11 @@ int lsp_regenerate_schedule_pseudo(struct isis_circuit *circuit, int level)
|
||||
circuit->lsp_regenerate_pending[lvl - 1] = 1;
|
||||
|
||||
if (lvl == IS_LEVEL_1) {
|
||||
thread_add_timer_msec(
|
||||
event_add_timer_msec(
|
||||
master, lsp_l1_refresh_pseudo, circuit, timeout,
|
||||
&circuit->u.bc.t_refresh_pseudo_lsp[lvl - 1]);
|
||||
} else if (lvl == IS_LEVEL_2) {
|
||||
thread_add_timer_msec(
|
||||
event_add_timer_msec(
|
||||
master, lsp_l2_refresh_pseudo, circuit, timeout,
|
||||
&circuit->u.bc.t_refresh_pseudo_lsp[lvl - 1]);
|
||||
}
|
||||
@ -2060,7 +2057,7 @@ void lsp_tick(struct event *thread)
|
||||
area = THREAD_ARG(thread);
|
||||
assert(area);
|
||||
area->t_tick = NULL;
|
||||
thread_add_timer(master, lsp_tick, area, 1, &area->t_tick);
|
||||
event_add_timer(master, lsp_tick, area, 1, &area->t_tick);
|
||||
|
||||
struct isis_circuit *fabricd_init_c = fabricd_initial_sync_circuit(area);
|
||||
|
||||
|
@ -186,8 +186,8 @@ static void isis_config_start(void)
|
||||
/* Max wait time for config to load before generating lsp */
|
||||
#define ISIS_PRE_CONFIG_MAX_WAIT_SECONDS 600
|
||||
THREAD_OFF(t_isis_cfg);
|
||||
thread_add_timer(im->master, isis_config_finish, NULL,
|
||||
ISIS_PRE_CONFIG_MAX_WAIT_SECONDS, &t_isis_cfg);
|
||||
event_add_timer(im->master, isis_config_finish, NULL,
|
||||
ISIS_PRE_CONFIG_MAX_WAIT_SECONDS, &t_isis_cfg);
|
||||
}
|
||||
|
||||
static void isis_config_end(void)
|
||||
|
@ -193,8 +193,8 @@ static int process_p2p_hello(struct iih_info *iih)
|
||||
|
||||
/* lets take care of the expiry */
|
||||
THREAD_OFF(adj->t_expire);
|
||||
thread_add_timer(master, isis_adj_expire, adj, (long)adj->hold_time,
|
||||
&adj->t_expire);
|
||||
event_add_timer(master, isis_adj_expire, adj, (long)adj->hold_time,
|
||||
&adj->t_expire);
|
||||
|
||||
/* While fabricds initial sync is in progress, ignore hellos from other
|
||||
* interfaces than the one we are performing the initial sync on. */
|
||||
@ -466,8 +466,8 @@ static int process_lan_hello(struct iih_info *iih)
|
||||
: iih->circuit->u.bc.l2_desig_is;
|
||||
|
||||
if (memcmp(dis, iih->dis, ISIS_SYS_ID_LEN + 1)) {
|
||||
thread_add_event(master, isis_event_dis_status_change,
|
||||
iih->circuit, 0, NULL);
|
||||
event_add_event(master, isis_event_dis_status_change,
|
||||
iih->circuit, 0, NULL);
|
||||
memcpy(dis, iih->dis, ISIS_SYS_ID_LEN + 1);
|
||||
}
|
||||
}
|
||||
@ -485,8 +485,8 @@ static int process_lan_hello(struct iih_info *iih)
|
||||
|
||||
/* lets take care of the expiry */
|
||||
THREAD_OFF(adj->t_expire);
|
||||
thread_add_timer(master, isis_adj_expire, adj, (long)adj->hold_time,
|
||||
&adj->t_expire);
|
||||
event_add_timer(master, isis_adj_expire, adj, (long)adj->hold_time,
|
||||
&adj->t_expire);
|
||||
|
||||
/*
|
||||
* If the snpa for this circuit is found from LAN Neighbours TLV
|
||||
@ -2059,10 +2059,9 @@ static void _send_hello_sched(struct isis_circuit *circuit,
|
||||
THREAD_OFF(*threadp);
|
||||
}
|
||||
|
||||
thread_add_timer_msec(master, send_hello_cb,
|
||||
&circuit->level_arg[level - 1],
|
||||
isis_jitter(delay, IIH_JITTER),
|
||||
threadp);
|
||||
event_add_timer_msec(master, send_hello_cb,
|
||||
&circuit->level_arg[level - 1],
|
||||
isis_jitter(delay, IIH_JITTER), threadp);
|
||||
}
|
||||
|
||||
void send_hello_sched(struct isis_circuit *circuit, int level, long delay)
|
||||
@ -2254,9 +2253,9 @@ void send_l1_csnp(struct event *thread)
|
||||
send_csnp(circuit, 1);
|
||||
}
|
||||
/* set next timer thread */
|
||||
thread_add_timer(master, send_l1_csnp, circuit,
|
||||
isis_jitter(circuit->csnp_interval[0], CSNP_JITTER),
|
||||
&circuit->t_send_csnp[0]);
|
||||
event_add_timer(master, send_l1_csnp, circuit,
|
||||
isis_jitter(circuit->csnp_interval[0], CSNP_JITTER),
|
||||
&circuit->t_send_csnp[0]);
|
||||
}
|
||||
|
||||
void send_l2_csnp(struct event *thread)
|
||||
@ -2274,9 +2273,9 @@ void send_l2_csnp(struct event *thread)
|
||||
send_csnp(circuit, 2);
|
||||
}
|
||||
/* set next timer thread */
|
||||
thread_add_timer(master, send_l2_csnp, circuit,
|
||||
isis_jitter(circuit->csnp_interval[1], CSNP_JITTER),
|
||||
&circuit->t_send_csnp[1]);
|
||||
event_add_timer(master, send_l2_csnp, circuit,
|
||||
isis_jitter(circuit->csnp_interval[1], CSNP_JITTER),
|
||||
&circuit->t_send_csnp[1]);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2405,9 +2404,9 @@ void send_l1_psnp(struct event *thread)
|
||||
|
||||
send_psnp(1, circuit);
|
||||
/* set next timer thread */
|
||||
thread_add_timer(master, send_l1_psnp, circuit,
|
||||
isis_jitter(circuit->psnp_interval[0], PSNP_JITTER),
|
||||
&circuit->t_send_psnp[0]);
|
||||
event_add_timer(master, send_l1_psnp, circuit,
|
||||
isis_jitter(circuit->psnp_interval[0], PSNP_JITTER),
|
||||
&circuit->t_send_psnp[0]);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2426,9 +2425,9 @@ void send_l2_psnp(struct event *thread)
|
||||
send_psnp(2, circuit);
|
||||
|
||||
/* set next timer thread */
|
||||
thread_add_timer(master, send_l2_psnp, circuit,
|
||||
isis_jitter(circuit->psnp_interval[1], PSNP_JITTER),
|
||||
&circuit->t_send_psnp[1]);
|
||||
event_add_timer(master, send_l2_psnp, circuit,
|
||||
isis_jitter(circuit->psnp_interval[1], PSNP_JITTER),
|
||||
&circuit->t_send_psnp[1]);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1961,9 +1961,9 @@ int _isis_spf_schedule(struct isis_area *area, int level,
|
||||
if (area->spf_timer[level - 1])
|
||||
return ISIS_OK;
|
||||
|
||||
thread_add_timer_msec(master, isis_run_spf_cb,
|
||||
isis_run_spf_arg(area, level), delay,
|
||||
&area->spf_timer[level - 1]);
|
||||
event_add_timer_msec(master, isis_run_spf_cb,
|
||||
isis_run_spf_arg(area, level), delay,
|
||||
&area->spf_timer[level - 1]);
|
||||
return ISIS_OK;
|
||||
}
|
||||
|
||||
@ -1990,8 +1990,8 @@ int _isis_spf_schedule(struct isis_area *area, int level,
|
||||
timer = area->min_spf_interval[level - 1] - diff;
|
||||
}
|
||||
|
||||
thread_add_timer(master, isis_run_spf_cb, isis_run_spf_arg(area, level),
|
||||
timer, &area->spf_timer[level - 1]);
|
||||
event_add_timer(master, isis_run_spf_cb, isis_run_spf_arg(area, level),
|
||||
timer, &area->spf_timer[level - 1]);
|
||||
|
||||
if (IS_DEBUG_SPF_EVENTS)
|
||||
zlog_debug("ISIS-SPF (%s) L%d SPF scheduled %ld sec from now",
|
||||
|
@ -1108,8 +1108,8 @@ int isis_sr_start(struct isis_area *area)
|
||||
if (!isis_zebra_label_manager_ready())
|
||||
if (isis_zebra_label_manager_connect() < 0) {
|
||||
/* Re-attempt to connect to Label Manager in 1 sec. */
|
||||
thread_add_timer(master, sr_start_label_manager, area,
|
||||
1, &srdb->t_start_lm);
|
||||
event_add_timer(master, sr_start_label_manager, area, 1,
|
||||
&srdb->t_start_lm);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -105,7 +105,7 @@ static void tx_queue_send_event(struct event *thread)
|
||||
struct isis_tx_queue_entry *e = THREAD_ARG(thread);
|
||||
struct isis_tx_queue *queue = e->queue;
|
||||
|
||||
thread_add_timer(master, tx_queue_send_event, e, 5, &e->retry);
|
||||
event_add_timer(master, tx_queue_send_event, e, 5, &e->retry);
|
||||
|
||||
if (e->is_retry)
|
||||
queue->circuit->area->lsp_rxmt_count++;
|
||||
@ -148,7 +148,7 @@ void _isis_tx_queue_add(struct isis_tx_queue *queue,
|
||||
e->type = type;
|
||||
|
||||
THREAD_OFF(e->retry);
|
||||
thread_add_event(master, tx_queue_send_event, e, 0, &e->retry);
|
||||
event_add_event(master, tx_queue_send_event, e, 0, &e->retry);
|
||||
|
||||
e->is_retry = false;
|
||||
}
|
||||
|
@ -325,7 +325,7 @@ struct isis_area *isis_area_create(const char *area_tag, const char *vrf_name)
|
||||
area->area_addrs->del = delete_area_addr;
|
||||
|
||||
if (!CHECK_FLAG(im->options, F_ISIS_UNIT_TEST))
|
||||
thread_add_timer(master, lsp_tick, area, 1, &area->t_tick);
|
||||
event_add_timer(master, lsp_tick, area, 1, &area->t_tick);
|
||||
flags_initialize(&area->flags);
|
||||
|
||||
isis_sr_area_init(area);
|
||||
|
@ -46,7 +46,7 @@ int accept_add(int fd, void (*cb)(struct event *), void *arg)
|
||||
av->arg = arg;
|
||||
LIST_INSERT_HEAD(&accept_queue.queue, av, entry);
|
||||
|
||||
thread_add_read(master, accept_cb, av, av->fd, &av->ev);
|
||||
event_add_read(master, accept_cb, av, av->fd, &av->ev);
|
||||
|
||||
log_debug("%s: accepting on fd %d", __func__, fd);
|
||||
|
||||
@ -73,7 +73,7 @@ accept_pause(void)
|
||||
{
|
||||
log_debug(__func__);
|
||||
accept_unarm();
|
||||
thread_add_timer(master, accept_timeout, NULL, 1, &accept_queue.evt);
|
||||
event_add_timer(master, accept_timeout, NULL, 1, &accept_queue.evt);
|
||||
}
|
||||
|
||||
void
|
||||
@ -91,7 +91,7 @@ accept_arm(void)
|
||||
{
|
||||
struct accept_ev *av;
|
||||
LIST_FOREACH(av, &accept_queue.queue, entry) {
|
||||
thread_add_read(master, accept_cb, av, av->fd, &av->ev);
|
||||
event_add_read(master, accept_cb, av, av->fd, &av->ev);
|
||||
}
|
||||
}
|
||||
|
||||
@ -106,7 +106,7 @@ accept_unarm(void)
|
||||
static void accept_cb(struct event *thread)
|
||||
{
|
||||
struct accept_ev *av = THREAD_ARG(thread);
|
||||
thread_add_read(master, accept_cb, av, av->fd, &av->ev);
|
||||
event_add_read(master, accept_cb, av, av->fd, &av->ev);
|
||||
av->accept_cb(thread);
|
||||
}
|
||||
|
||||
|
@ -187,8 +187,8 @@ adj_start_itimer(struct adj *adj)
|
||||
{
|
||||
THREAD_OFF(adj->inactivity_timer);
|
||||
adj->inactivity_timer = NULL;
|
||||
thread_add_timer(master, adj_itimer, adj, adj->holdtime,
|
||||
&adj->inactivity_timer);
|
||||
event_add_timer(master, adj_itimer, adj, adj->holdtime,
|
||||
&adj->inactivity_timer);
|
||||
}
|
||||
|
||||
void
|
||||
@ -345,8 +345,8 @@ tnbr_start_hello_timer(struct tnbr *tnbr)
|
||||
{
|
||||
THREAD_OFF(tnbr->hello_timer);
|
||||
tnbr->hello_timer = NULL;
|
||||
thread_add_timer(master, tnbr_hello_timer, tnbr, tnbr_get_hello_interval(tnbr),
|
||||
&tnbr->hello_timer);
|
||||
event_add_timer(master, tnbr_hello_timer, tnbr,
|
||||
tnbr_get_hello_interval(tnbr), &tnbr->hello_timer);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -122,8 +122,8 @@ static void control_accept(struct event *thread)
|
||||
imsg_init(&c->iev.ibuf, connfd);
|
||||
c->iev.handler_read = control_dispatch_imsg;
|
||||
c->iev.ev_read = NULL;
|
||||
thread_add_read(master, c->iev.handler_read, &c->iev, c->iev.ibuf.fd,
|
||||
&c->iev.ev_read);
|
||||
event_add_read(master, c->iev.handler_read, &c->iev, c->iev.ibuf.fd,
|
||||
&c->iev.ev_read);
|
||||
c->iev.handler_write = ldp_write_handler;
|
||||
c->iev.ev_write = NULL;
|
||||
|
||||
|
@ -457,8 +457,8 @@ static void
|
||||
if_start_hello_timer(struct iface_af *ia)
|
||||
{
|
||||
THREAD_OFF(ia->hello_timer);
|
||||
thread_add_timer(master, if_hello_timer, ia, if_get_hello_interval(ia),
|
||||
&ia->hello_timer);
|
||||
event_add_timer(master, if_hello_timer, ia, if_get_hello_interval(ia),
|
||||
&ia->hello_timer);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -733,7 +733,7 @@ static void start_wait_for_ldp_sync_timer(struct iface *iface)
|
||||
return;
|
||||
|
||||
THREAD_OFF(iface->ldp_sync.wait_for_sync_timer);
|
||||
thread_add_timer(master, iface_wait_for_ldp_sync_timer, iface,
|
||||
event_add_timer(master, iface_wait_for_ldp_sync_timer, iface,
|
||||
if_get_wait_for_sync_interval(),
|
||||
&iface->ldp_sync.wait_for_sync_timer);
|
||||
}
|
||||
|
10
ldpd/lde.c
10
ldpd/lde.c
@ -134,8 +134,8 @@ lde(void)
|
||||
fatal(NULL);
|
||||
imsg_init(&iev_main->ibuf, LDPD_FD_ASYNC);
|
||||
iev_main->handler_read = lde_dispatch_parent;
|
||||
thread_add_read(master, iev_main->handler_read, iev_main, iev_main->ibuf.fd,
|
||||
&iev_main->ev_read);
|
||||
event_add_read(master, iev_main->handler_read, iev_main,
|
||||
iev_main->ibuf.fd, &iev_main->ev_read);
|
||||
iev_main->handler_write = ldp_write_handler;
|
||||
|
||||
memset(&iev_main_sync_data, 0, sizeof(iev_main_sync_data));
|
||||
@ -523,8 +523,8 @@ static void lde_dispatch_parent(struct event *thread)
|
||||
fatal(NULL);
|
||||
imsg_init(&iev_ldpe->ibuf, fd);
|
||||
iev_ldpe->handler_read = lde_dispatch_imsg;
|
||||
thread_add_read(master, iev_ldpe->handler_read, iev_ldpe, iev_ldpe->ibuf.fd,
|
||||
&iev_ldpe->ev_read);
|
||||
event_add_read(master, iev_ldpe->handler_read, iev_ldpe,
|
||||
iev_ldpe->ibuf.fd, &iev_ldpe->ev_read);
|
||||
iev_ldpe->handler_write = ldp_write_handler;
|
||||
iev_ldpe->ev_write = NULL;
|
||||
break;
|
||||
@ -2178,7 +2178,7 @@ retry:
|
||||
zclient_sync = NULL;
|
||||
|
||||
/* Retry using a timer */
|
||||
thread_add_timer(master, zclient_sync_retry, NULL, 1, NULL);
|
||||
event_add_timer(master, zclient_sync_retry, NULL, 1, NULL);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -1058,8 +1058,7 @@ void
|
||||
lde_gc_start_timer(void)
|
||||
{
|
||||
THREAD_OFF(gc_timer);
|
||||
thread_add_timer(master, lde_gc_timer, NULL, LDE_GC_INTERVAL,
|
||||
&gc_timer);
|
||||
event_add_timer(master, lde_gc_timer, NULL, LDE_GC_INTERVAL, &gc_timer);
|
||||
}
|
||||
|
||||
void
|
||||
|
30
ldpd/ldpd.c
30
ldpd/ldpd.c
@ -383,7 +383,7 @@ main(int argc, char *argv[])
|
||||
frr_config_fork();
|
||||
|
||||
/* apply configuration */
|
||||
thread_add_event(master, ldp_config_fork_apply, NULL, 0, NULL);
|
||||
event_add_event(master, ldp_config_fork_apply, NULL, 0, NULL);
|
||||
|
||||
/* setup pipes to children */
|
||||
if ((iev_ldpe = calloc(1, sizeof(struct imsgev))) == NULL ||
|
||||
@ -394,26 +394,26 @@ main(int argc, char *argv[])
|
||||
|
||||
imsg_init(&iev_ldpe->ibuf, pipe_parent2ldpe[0]);
|
||||
iev_ldpe->handler_read = main_dispatch_ldpe;
|
||||
thread_add_read(master, iev_ldpe->handler_read, iev_ldpe, iev_ldpe->ibuf.fd,
|
||||
&iev_ldpe->ev_read);
|
||||
event_add_read(master, iev_ldpe->handler_read, iev_ldpe,
|
||||
iev_ldpe->ibuf.fd, &iev_ldpe->ev_read);
|
||||
iev_ldpe->handler_write = ldp_write_handler;
|
||||
|
||||
imsg_init(&iev_ldpe_sync->ibuf, pipe_parent2ldpe_sync[0]);
|
||||
iev_ldpe_sync->handler_read = main_dispatch_ldpe;
|
||||
thread_add_read(master, iev_ldpe_sync->handler_read, iev_ldpe_sync, iev_ldpe_sync->ibuf.fd,
|
||||
&iev_ldpe_sync->ev_read);
|
||||
event_add_read(master, iev_ldpe_sync->handler_read, iev_ldpe_sync,
|
||||
iev_ldpe_sync->ibuf.fd, &iev_ldpe_sync->ev_read);
|
||||
iev_ldpe_sync->handler_write = ldp_write_handler;
|
||||
|
||||
imsg_init(&iev_lde->ibuf, pipe_parent2lde[0]);
|
||||
iev_lde->handler_read = main_dispatch_lde;
|
||||
thread_add_read(master, iev_lde->handler_read, iev_lde, iev_lde->ibuf.fd,
|
||||
&iev_lde->ev_read);
|
||||
event_add_read(master, iev_lde->handler_read, iev_lde, iev_lde->ibuf.fd,
|
||||
&iev_lde->ev_read);
|
||||
iev_lde->handler_write = ldp_write_handler;
|
||||
|
||||
imsg_init(&iev_lde_sync->ibuf, pipe_parent2lde_sync[0]);
|
||||
iev_lde_sync->handler_read = main_dispatch_lde;
|
||||
thread_add_read(master, iev_lde_sync->handler_read, iev_lde_sync, iev_lde_sync->ibuf.fd,
|
||||
&iev_lde_sync->ev_read);
|
||||
event_add_read(master, iev_lde_sync->handler_read, iev_lde_sync,
|
||||
iev_lde_sync->ibuf.fd, &iev_lde_sync->ev_read);
|
||||
iev_lde_sync->handler_write = ldp_write_handler;
|
||||
|
||||
if (main_imsg_send_ipc_sockets(&iev_ldpe->ibuf, &iev_lde->ibuf))
|
||||
@ -787,12 +787,12 @@ void
|
||||
imsg_event_add(struct imsgev *iev)
|
||||
{
|
||||
if (iev->handler_read)
|
||||
thread_add_read(master, iev->handler_read, iev, iev->ibuf.fd,
|
||||
&iev->ev_read);
|
||||
event_add_read(master, iev->handler_read, iev, iev->ibuf.fd,
|
||||
&iev->ev_read);
|
||||
|
||||
if (iev->handler_write && iev->ibuf.w.queued)
|
||||
thread_add_write(master, iev->handler_write, iev,
|
||||
iev->ibuf.fd, &iev->ev_write);
|
||||
event_add_write(master, iev->handler_write, iev, iev->ibuf.fd,
|
||||
&iev->ev_write);
|
||||
}
|
||||
|
||||
int
|
||||
@ -819,8 +819,8 @@ void
|
||||
evbuf_event_add(struct evbuf *eb)
|
||||
{
|
||||
if (eb->wbuf.queued)
|
||||
thread_add_write(master, eb->handler, eb->arg, eb->wbuf.fd,
|
||||
&eb->ev);
|
||||
event_add_write(master, eb->handler, eb->arg, eb->wbuf.fd,
|
||||
&eb->ev);
|
||||
}
|
||||
|
||||
void evbuf_init(struct evbuf *eb, int fd, void (*handler)(struct event *),
|
||||
|
24
ldpd/ldpe.c
24
ldpd/ldpe.c
@ -111,8 +111,8 @@ ldpe(void)
|
||||
fatal(NULL);
|
||||
imsg_init(&iev_main->ibuf, LDPD_FD_ASYNC);
|
||||
iev_main->handler_read = ldpe_dispatch_main;
|
||||
thread_add_read(master, iev_main->handler_read, iev_main, iev_main->ibuf.fd,
|
||||
&iev_main->ev_read);
|
||||
event_add_read(master, iev_main->handler_read, iev_main,
|
||||
iev_main->ibuf.fd, &iev_main->ev_read);
|
||||
iev_main->handler_write = ldp_write_handler;
|
||||
|
||||
memset(&iev_main_data, 0, sizeof(iev_main_data));
|
||||
@ -137,8 +137,8 @@ ldpe_init(struct ldpd_init *init)
|
||||
/* This socket must be open before dropping privileges. */
|
||||
global.pfkeysock = pfkey_init();
|
||||
if (sysdep.no_pfkey == 0) {
|
||||
thread_add_read(master, ldpe_dispatch_pfkey, NULL, global.pfkeysock,
|
||||
&pfkey_ev);
|
||||
event_add_read(master, ldpe_dispatch_pfkey, NULL,
|
||||
global.pfkeysock, &pfkey_ev);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -363,8 +363,8 @@ static void ldpe_dispatch_main(struct event *thread)
|
||||
fatal(NULL);
|
||||
imsg_init(&iev_lde->ibuf, fd);
|
||||
iev_lde->handler_read = ldpe_dispatch_lde;
|
||||
thread_add_read(master, iev_lde->handler_read, iev_lde, iev_lde->ibuf.fd,
|
||||
&iev_lde->ev_read);
|
||||
event_add_read(master, iev_lde->handler_read, iev_lde,
|
||||
iev_lde->ibuf.fd, &iev_lde->ev_read);
|
||||
iev_lde->handler_write = ldp_write_handler;
|
||||
iev_lde->ev_write = NULL;
|
||||
break;
|
||||
@ -763,8 +763,8 @@ static void ldpe_dispatch_pfkey(struct event *thread)
|
||||
{
|
||||
int fd = THREAD_FD(thread);
|
||||
|
||||
thread_add_read(master, ldpe_dispatch_pfkey, NULL, global.pfkeysock,
|
||||
&pfkey_ev);
|
||||
event_add_read(master, ldpe_dispatch_pfkey, NULL, global.pfkeysock,
|
||||
&pfkey_ev);
|
||||
|
||||
if (pfkey_read(fd, NULL) == -1)
|
||||
fatal("pfkey_read failed, exiting...");
|
||||
@ -781,13 +781,13 @@ ldpe_setup_sockets(int af, int disc_socket, int edisc_socket,
|
||||
|
||||
/* discovery socket */
|
||||
af_global->ldp_disc_socket = disc_socket;
|
||||
thread_add_read(master, disc_recv_packet, &af_global->disc_ev, af_global->ldp_disc_socket,
|
||||
&af_global->disc_ev);
|
||||
event_add_read(master, disc_recv_packet, &af_global->disc_ev,
|
||||
af_global->ldp_disc_socket, &af_global->disc_ev);
|
||||
|
||||
/* extended discovery socket */
|
||||
af_global->ldp_edisc_socket = edisc_socket;
|
||||
thread_add_read(master, disc_recv_packet, &af_global->edisc_ev, af_global->ldp_edisc_socket,
|
||||
&af_global->edisc_ev);
|
||||
event_add_read(master, disc_recv_packet, &af_global->edisc_ev,
|
||||
af_global->ldp_edisc_socket, &af_global->edisc_ev);
|
||||
|
||||
/* session socket */
|
||||
af_global->ldp_session_socket = session_socket;
|
||||
|
@ -425,7 +425,7 @@ nbr_start_ktimer(struct nbr *nbr)
|
||||
secs = nbr->keepalive / KEEPALIVE_PER_PERIOD;
|
||||
THREAD_OFF(nbr->keepalive_timer);
|
||||
nbr->keepalive_timer = NULL;
|
||||
thread_add_timer(master, nbr_ktimer, nbr, secs, &nbr->keepalive_timer);
|
||||
event_add_timer(master, nbr_ktimer, nbr, secs, &nbr->keepalive_timer);
|
||||
}
|
||||
|
||||
void
|
||||
@ -452,8 +452,8 @@ nbr_start_ktimeout(struct nbr *nbr)
|
||||
{
|
||||
THREAD_OFF(nbr->keepalive_timeout);
|
||||
nbr->keepalive_timeout = NULL;
|
||||
thread_add_timer(master, nbr_ktimeout, nbr, nbr->keepalive,
|
||||
&nbr->keepalive_timeout);
|
||||
event_add_timer(master, nbr_ktimeout, nbr, nbr->keepalive,
|
||||
&nbr->keepalive_timeout);
|
||||
}
|
||||
|
||||
void
|
||||
@ -481,7 +481,7 @@ nbr_start_itimeout(struct nbr *nbr)
|
||||
secs = INIT_FSM_TIMEOUT;
|
||||
THREAD_OFF(nbr->init_timeout);
|
||||
nbr->init_timeout = NULL;
|
||||
thread_add_timer(master, nbr_itimeout, nbr, secs, &nbr->init_timeout);
|
||||
event_add_timer(master, nbr_itimeout, nbr, secs, &nbr->init_timeout);
|
||||
}
|
||||
|
||||
void
|
||||
@ -527,8 +527,7 @@ nbr_start_idtimer(struct nbr *nbr)
|
||||
|
||||
THREAD_OFF(nbr->initdelay_timer);
|
||||
nbr->initdelay_timer = NULL;
|
||||
thread_add_timer(master, nbr_idtimer, nbr, secs,
|
||||
&nbr->initdelay_timer);
|
||||
event_add_timer(master, nbr_idtimer, nbr, secs, &nbr->initdelay_timer);
|
||||
}
|
||||
|
||||
void
|
||||
@ -650,8 +649,8 @@ nbr_establish_connection(struct nbr *nbr)
|
||||
if (connect(nbr->fd, &remote_su.sa, sockaddr_len(&remote_su.sa))
|
||||
== -1) {
|
||||
if (errno == EINPROGRESS) {
|
||||
thread_add_write(master, nbr_connect_cb, nbr, nbr->fd,
|
||||
&nbr->ev_connect);
|
||||
event_add_write(master, nbr_connect_cb, nbr, nbr->fd,
|
||||
&nbr->ev_connect);
|
||||
return (0);
|
||||
}
|
||||
log_warn("%s: error while connecting to %s", __func__,
|
||||
|
@ -129,7 +129,7 @@ void disc_recv_packet(struct event *thread)
|
||||
struct in_addr lsr_id;
|
||||
|
||||
/* reschedule read */
|
||||
thread_add_read(master, disc_recv_packet, threadp, fd, threadp);
|
||||
event_add_read(master, disc_recv_packet, threadp, fd, threadp);
|
||||
|
||||
/* setup buffer */
|
||||
memset(&m, 0, sizeof(m));
|
||||
@ -406,7 +406,7 @@ static void session_read(struct event *thread)
|
||||
uint16_t pdu_len, msg_len, msg_size, max_pdu_len;
|
||||
int ret;
|
||||
|
||||
thread_add_read(master, session_read, nbr, fd, &tcp->rev);
|
||||
event_add_read(master, session_read, nbr, fd, &tcp->rev);
|
||||
|
||||
if ((n = read(fd, tcp->rbuf->buf + tcp->rbuf->wpos,
|
||||
sizeof(tcp->rbuf->buf) - tcp->rbuf->wpos)) == -1) {
|
||||
@ -721,7 +721,7 @@ tcp_new(int fd, struct nbr *nbr)
|
||||
if ((tcp->rbuf = calloc(1, sizeof(struct ibuf_read))) == NULL)
|
||||
fatal(__func__);
|
||||
|
||||
thread_add_read(master, session_read, nbr, tcp->fd, &tcp->rev);
|
||||
event_add_read(master, session_read, nbr, tcp->fd, &tcp->rev);
|
||||
tcp->nbr = nbr;
|
||||
}
|
||||
|
||||
@ -768,8 +768,8 @@ pending_conn_new(int fd, int af, union ldpd_addr *addr)
|
||||
pconn->addr = *addr;
|
||||
TAILQ_INSERT_TAIL(&global.pending_conns, pconn, entry);
|
||||
pconn->ev_timeout = NULL;
|
||||
thread_add_timer(master, pending_conn_timeout, pconn, PENDING_CONN_TIMEOUT,
|
||||
&pconn->ev_timeout);
|
||||
event_add_timer(master, pending_conn_timeout, pconn,
|
||||
PENDING_CONN_TIMEOUT, &pconn->ev_timeout);
|
||||
|
||||
return (pconn);
|
||||
}
|
||||
|
@ -103,8 +103,8 @@ static void agentx_events_update(void)
|
||||
snmp_select_info(&maxfd, &fds, &timeout, &block);
|
||||
|
||||
if (!block) {
|
||||
thread_add_timer_tv(agentx_tm, agentx_timeout, NULL, &timeout,
|
||||
&timeout_thr);
|
||||
event_add_timer_tv(agentx_tm, agentx_timeout, NULL, &timeout,
|
||||
&timeout_thr);
|
||||
}
|
||||
|
||||
ln = listhead(events);
|
||||
@ -133,7 +133,7 @@ static void agentx_events_update(void)
|
||||
|
||||
thr = XCALLOC(MTYPE_TMP, sizeof(struct event *));
|
||||
newln = listnode_add_before(events, ln, thr);
|
||||
thread_add_read(agentx_tm, agentx_read, newln, fd, thr);
|
||||
event_add_read(agentx_tm, agentx_read, newln, fd, thr);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -75,7 +75,7 @@ struct bfd_session_params {
|
||||
* Next event.
|
||||
*
|
||||
* This variable controls what action to execute when the command batch
|
||||
* finishes. Normally we'd use `thread_add_event` value, however since
|
||||
* finishes. Normally we'd use `event_add_event` value, however since
|
||||
* that function is going to be called multiple times and the value
|
||||
* might be different we'll use this variable to keep track of it.
|
||||
*/
|
||||
@ -733,13 +733,13 @@ void bfd_sess_set_auto_source(struct bfd_session_params *bsp, bool enable)
|
||||
void bfd_sess_install(struct bfd_session_params *bsp)
|
||||
{
|
||||
bsp->lastev = BSE_INSTALL;
|
||||
thread_add_event(bsglobal.tm, _bfd_sess_send, bsp, 0, &bsp->installev);
|
||||
event_add_event(bsglobal.tm, _bfd_sess_send, bsp, 0, &bsp->installev);
|
||||
}
|
||||
|
||||
void bfd_sess_uninstall(struct bfd_session_params *bsp)
|
||||
{
|
||||
bsp->lastev = BSE_UNINSTALL;
|
||||
thread_add_event(bsglobal.tm, _bfd_sess_send, bsp, 0, &bsp->installev);
|
||||
event_add_event(bsglobal.tm, _bfd_sess_send, bsp, 0, &bsp->installev);
|
||||
}
|
||||
|
||||
enum bfd_session_state bfd_sess_status(const struct bfd_session_params *bsp)
|
||||
|
50
lib/event.c
50
lib/event.c
@ -948,10 +948,10 @@ done:
|
||||
}
|
||||
|
||||
/* Add new read thread. */
|
||||
void _thread_add_read_write(const struct xref_threadsched *xref,
|
||||
struct thread_master *m,
|
||||
void (*func)(struct event *), void *arg, int fd,
|
||||
struct event **t_ptr)
|
||||
void _event_add_read_write(const struct xref_threadsched *xref,
|
||||
struct thread_master *m,
|
||||
void (*func)(struct event *), void *arg, int fd,
|
||||
struct event **t_ptr)
|
||||
{
|
||||
int dir = xref->thread_type;
|
||||
struct event *thread = NULL;
|
||||
@ -1028,11 +1028,11 @@ void _thread_add_read_write(const struct xref_threadsched *xref,
|
||||
}
|
||||
}
|
||||
|
||||
static void _thread_add_timer_timeval(const struct xref_threadsched *xref,
|
||||
struct thread_master *m,
|
||||
void (*func)(struct event *), void *arg,
|
||||
struct timeval *time_relative,
|
||||
struct event **t_ptr)
|
||||
static void _event_add_timer_timeval(const struct xref_threadsched *xref,
|
||||
struct thread_master *m,
|
||||
void (*func)(struct event *), void *arg,
|
||||
struct timeval *time_relative,
|
||||
struct event **t_ptr)
|
||||
{
|
||||
struct event *thread;
|
||||
struct timeval t;
|
||||
@ -1082,9 +1082,9 @@ static void _thread_add_timer_timeval(const struct xref_threadsched *xref,
|
||||
|
||||
|
||||
/* Add timer event thread. */
|
||||
void _thread_add_timer(const struct xref_threadsched *xref,
|
||||
struct thread_master *m, void (*func)(struct event *),
|
||||
void *arg, long timer, struct event **t_ptr)
|
||||
void _event_add_timer(const struct xref_threadsched *xref,
|
||||
struct thread_master *m, void (*func)(struct event *),
|
||||
void *arg, long timer, struct event **t_ptr)
|
||||
{
|
||||
struct timeval trel;
|
||||
|
||||
@ -1093,14 +1093,14 @@ void _thread_add_timer(const struct xref_threadsched *xref,
|
||||
trel.tv_sec = timer;
|
||||
trel.tv_usec = 0;
|
||||
|
||||
_thread_add_timer_timeval(xref, m, func, arg, &trel, t_ptr);
|
||||
_event_add_timer_timeval(xref, m, func, arg, &trel, t_ptr);
|
||||
}
|
||||
|
||||
/* Add timer event thread with "millisecond" resolution */
|
||||
void _thread_add_timer_msec(const struct xref_threadsched *xref,
|
||||
struct thread_master *m,
|
||||
void (*func)(struct event *), void *arg, long timer,
|
||||
struct event **t_ptr)
|
||||
void _event_add_timer_msec(const struct xref_threadsched *xref,
|
||||
struct thread_master *m,
|
||||
void (*func)(struct event *), void *arg, long timer,
|
||||
struct event **t_ptr)
|
||||
{
|
||||
struct timeval trel;
|
||||
|
||||
@ -1109,21 +1109,21 @@ void _thread_add_timer_msec(const struct xref_threadsched *xref,
|
||||
trel.tv_sec = timer / 1000;
|
||||
trel.tv_usec = 1000 * (timer % 1000);
|
||||
|
||||
_thread_add_timer_timeval(xref, m, func, arg, &trel, t_ptr);
|
||||
_event_add_timer_timeval(xref, m, func, arg, &trel, t_ptr);
|
||||
}
|
||||
|
||||
/* Add timer event thread with "timeval" resolution */
|
||||
void _thread_add_timer_tv(const struct xref_threadsched *xref,
|
||||
struct thread_master *m, void (*func)(struct event *),
|
||||
void *arg, struct timeval *tv, struct event **t_ptr)
|
||||
void _event_add_timer_tv(const struct xref_threadsched *xref,
|
||||
struct thread_master *m, void (*func)(struct event *),
|
||||
void *arg, struct timeval *tv, struct event **t_ptr)
|
||||
{
|
||||
_thread_add_timer_timeval(xref, m, func, arg, tv, t_ptr);
|
||||
_event_add_timer_timeval(xref, m, func, arg, tv, t_ptr);
|
||||
}
|
||||
|
||||
/* Add simple event thread. */
|
||||
void _thread_add_event(const struct xref_threadsched *xref,
|
||||
struct thread_master *m, void (*func)(struct event *),
|
||||
void *arg, int val, struct event **t_ptr)
|
||||
void _event_add_event(const struct xref_threadsched *xref,
|
||||
struct thread_master *m, void (*func)(struct event *),
|
||||
void *arg, int val, struct event **t_ptr)
|
||||
{
|
||||
struct event *thread = NULL;
|
||||
|
||||
|
66
lib/event.h
66
lib/event.h
@ -172,24 +172,26 @@ struct cpu_thread_history {
|
||||
|
||||
#define _xref_t_a(addfn, type, m, f, a, v, t) \
|
||||
({ \
|
||||
static const struct xref_threadsched _xref \
|
||||
__attribute__((used)) = { \
|
||||
static const struct xref_threadsched _xref __attribute__( \
|
||||
(used)) = { \
|
||||
.xref = XREF_INIT(XREFT_THREADSCHED, NULL, __func__), \
|
||||
.funcname = #f, \
|
||||
.dest = #t, \
|
||||
.thread_type = THREAD_ ## type, \
|
||||
.thread_type = THREAD_##type, \
|
||||
}; \
|
||||
XREF_LINK(_xref.xref); \
|
||||
_thread_add_ ## addfn(&_xref, m, f, a, v, t); \
|
||||
}) \
|
||||
/* end */
|
||||
_event_add_##addfn(&_xref, m, f, a, v, t); \
|
||||
}) /* end */
|
||||
|
||||
#define thread_add_read(m,f,a,v,t) _xref_t_a(read_write, READ, m,f,a,v,t)
|
||||
#define thread_add_write(m,f,a,v,t) _xref_t_a(read_write, WRITE, m,f,a,v,t)
|
||||
#define thread_add_timer(m,f,a,v,t) _xref_t_a(timer, TIMER, m,f,a,v,t)
|
||||
#define thread_add_timer_msec(m,f,a,v,t) _xref_t_a(timer_msec, TIMER, m,f,a,v,t)
|
||||
#define thread_add_timer_tv(m,f,a,v,t) _xref_t_a(timer_tv, TIMER, m,f,a,v,t)
|
||||
#define thread_add_event(m,f,a,v,t) _xref_t_a(event, EVENT, m,f,a,v,t)
|
||||
#define event_add_read(m, f, a, v, t) _xref_t_a(read_write, READ, m, f, a, v, t)
|
||||
#define event_add_write(m, f, a, v, t) \
|
||||
_xref_t_a(read_write, WRITE, m, f, a, v, t)
|
||||
#define event_add_timer(m, f, a, v, t) _xref_t_a(timer, TIMER, m, f, a, v, t)
|
||||
#define event_add_timer_msec(m, f, a, v, t) \
|
||||
_xref_t_a(timer_msec, TIMER, m, f, a, v, t)
|
||||
#define event_add_timer_tv(m, f, a, v, t) \
|
||||
_xref_t_a(timer_tv, TIMER, m, f, a, v, t)
|
||||
#define event_add_event(m, f, a, v, t) _xref_t_a(event, EVENT, m, f, a, v, t)
|
||||
|
||||
#define thread_execute(m,f,a,v) \
|
||||
({ \
|
||||
@ -210,30 +212,30 @@ void thread_master_set_name(struct thread_master *master, const char *name);
|
||||
extern void thread_master_free(struct thread_master *);
|
||||
extern void thread_master_free_unused(struct thread_master *);
|
||||
|
||||
extern void _thread_add_read_write(const struct xref_threadsched *xref,
|
||||
struct thread_master *master,
|
||||
void (*fn)(struct event *), void *arg,
|
||||
int fd, struct event **tref);
|
||||
extern void _event_add_read_write(const struct xref_threadsched *xref,
|
||||
struct thread_master *master,
|
||||
void (*fn)(struct event *), void *arg, int fd,
|
||||
struct event **tref);
|
||||
|
||||
extern void _thread_add_timer(const struct xref_threadsched *xref,
|
||||
struct thread_master *master,
|
||||
void (*fn)(struct event *), void *arg, long t,
|
||||
struct event **tref);
|
||||
extern void _event_add_timer(const struct xref_threadsched *xref,
|
||||
struct thread_master *master,
|
||||
void (*fn)(struct event *), void *arg, long t,
|
||||
struct event **tref);
|
||||
|
||||
extern void _thread_add_timer_msec(const struct xref_threadsched *xref,
|
||||
struct thread_master *master,
|
||||
void (*fn)(struct event *), void *arg,
|
||||
long t, struct event **tref);
|
||||
extern void _event_add_timer_msec(const struct xref_threadsched *xref,
|
||||
struct thread_master *master,
|
||||
void (*fn)(struct event *), void *arg, long t,
|
||||
struct event **tref);
|
||||
|
||||
extern void _thread_add_timer_tv(const struct xref_threadsched *xref,
|
||||
struct thread_master *master,
|
||||
void (*fn)(struct event *), void *arg,
|
||||
struct timeval *tv, struct event **tref);
|
||||
extern void _event_add_timer_tv(const struct xref_threadsched *xref,
|
||||
struct thread_master *master,
|
||||
void (*fn)(struct event *), void *arg,
|
||||
struct timeval *tv, struct event **tref);
|
||||
|
||||
extern void _thread_add_event(const struct xref_threadsched *xref,
|
||||
struct thread_master *master,
|
||||
void (*fn)(struct event *), void *arg, int val,
|
||||
struct event **tref);
|
||||
extern void _event_add_event(const struct xref_threadsched *xref,
|
||||
struct thread_master *master,
|
||||
void (*fn)(struct event *), void *arg, int val,
|
||||
struct event **tref);
|
||||
|
||||
extern void _thread_execute(const struct xref_threadsched *xref,
|
||||
struct thread_master *master,
|
||||
|
@ -239,7 +239,7 @@ static void fpt_finish(struct event *thread)
|
||||
/* stop function, called from other threads to halt this one */
|
||||
static int fpt_halt(struct frr_pthread *fpt, void **res)
|
||||
{
|
||||
thread_add_event(fpt->master, &fpt_finish, fpt, 0, NULL);
|
||||
event_add_event(fpt->master, &fpt_finish, fpt, 0, NULL);
|
||||
pthread_join(fpt->thread, res);
|
||||
|
||||
return 0;
|
||||
@ -281,7 +281,7 @@ static void *fpt_run(void *arg)
|
||||
|
||||
int sleeper[2];
|
||||
pipe(sleeper);
|
||||
thread_add_read(fpt->master, &fpt_dummy, NULL, sleeper[0], NULL);
|
||||
event_add_read(fpt->master, &fpt_dummy, NULL, sleeper[0], NULL);
|
||||
|
||||
fpt->master->handle_signals = false;
|
||||
|
||||
|
@ -138,8 +138,8 @@ static void frrzmq_read_msg(struct event *t)
|
||||
if (read)
|
||||
frrzmq_check_events(cbp, &cb->write, ZMQ_POLLOUT);
|
||||
|
||||
thread_add_read(t->master, frrzmq_read_msg, cbp,
|
||||
cb->fd, &cb->read.thread);
|
||||
event_add_read(t->master, frrzmq_read_msg, cbp, cb->fd,
|
||||
&cb->read.thread);
|
||||
return;
|
||||
|
||||
out_err:
|
||||
@ -149,14 +149,13 @@ out_err:
|
||||
cb->read.cb_error(cb->read.arg, cb->zmqsock);
|
||||
}
|
||||
|
||||
int _frrzmq_thread_add_read(const struct xref_threadsched *xref,
|
||||
struct thread_master *master,
|
||||
void (*msgfunc)(void *arg, void *zmqsock),
|
||||
void (*partfunc)(void *arg, void *zmqsock,
|
||||
zmq_msg_t *msg, unsigned partnum),
|
||||
void (*errfunc)(void *arg, void *zmqsock),
|
||||
void *arg, void *zmqsock,
|
||||
struct frrzmq_cb **cbp)
|
||||
int _frrzmq_event_add_read(const struct xref_threadsched *xref,
|
||||
struct thread_master *master,
|
||||
void (*msgfunc)(void *arg, void *zmqsock),
|
||||
void (*partfunc)(void *arg, void *zmqsock,
|
||||
zmq_msg_t *msg, unsigned partnum),
|
||||
void (*errfunc)(void *arg, void *zmqsock), void *arg,
|
||||
void *zmqsock, struct frrzmq_cb **cbp)
|
||||
{
|
||||
int fd, events;
|
||||
size_t len;
|
||||
@ -193,11 +192,11 @@ int _frrzmq_thread_add_read(const struct xref_threadsched *xref,
|
||||
if (events & ZMQ_POLLIN) {
|
||||
thread_cancel(&cb->read.thread);
|
||||
|
||||
thread_add_event(master, frrzmq_read_msg, cbp, fd,
|
||||
&cb->read.thread);
|
||||
} else
|
||||
thread_add_read(master, frrzmq_read_msg, cbp, fd,
|
||||
event_add_event(master, frrzmq_read_msg, cbp, fd,
|
||||
&cb->read.thread);
|
||||
} else
|
||||
event_add_read(master, frrzmq_read_msg, cbp, fd,
|
||||
&cb->read.thread);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -247,8 +246,8 @@ static void frrzmq_write_msg(struct event *t)
|
||||
if (written)
|
||||
frrzmq_check_events(cbp, &cb->read, ZMQ_POLLIN);
|
||||
|
||||
thread_add_write(t->master, frrzmq_write_msg, cbp,
|
||||
cb->fd, &cb->write.thread);
|
||||
event_add_write(t->master, frrzmq_write_msg, cbp, cb->fd,
|
||||
&cb->write.thread);
|
||||
return;
|
||||
|
||||
out_err:
|
||||
@ -258,11 +257,11 @@ out_err:
|
||||
cb->write.cb_error(cb->write.arg, cb->zmqsock);
|
||||
}
|
||||
|
||||
int _frrzmq_thread_add_write(const struct xref_threadsched *xref,
|
||||
struct thread_master *master,
|
||||
void (*msgfunc)(void *arg, void *zmqsock),
|
||||
void (*errfunc)(void *arg, void *zmqsock),
|
||||
void *arg, void *zmqsock, struct frrzmq_cb **cbp)
|
||||
int _frrzmq_event_add_write(const struct xref_threadsched *xref,
|
||||
struct thread_master *master,
|
||||
void (*msgfunc)(void *arg, void *zmqsock),
|
||||
void (*errfunc)(void *arg, void *zmqsock),
|
||||
void *arg, void *zmqsock, struct frrzmq_cb **cbp)
|
||||
{
|
||||
int fd, events;
|
||||
size_t len;
|
||||
@ -299,11 +298,11 @@ int _frrzmq_thread_add_write(const struct xref_threadsched *xref,
|
||||
if (events & ZMQ_POLLOUT) {
|
||||
thread_cancel(&cb->write.thread);
|
||||
|
||||
_thread_add_event(xref, master, frrzmq_write_msg, cbp, fd,
|
||||
&cb->write.thread);
|
||||
} else
|
||||
thread_add_write(master, frrzmq_write_msg, cbp, fd,
|
||||
_event_add_event(xref, master, frrzmq_write_msg, cbp, fd,
|
||||
&cb->write.thread);
|
||||
} else
|
||||
event_add_write(master, frrzmq_write_msg, cbp, fd,
|
||||
&cb->write.thread);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -348,10 +347,10 @@ void frrzmq_check_events(struct frrzmq_cb **cbp, struct cb_core *core,
|
||||
thread_cancel(&core->thread);
|
||||
|
||||
if (event == ZMQ_POLLIN)
|
||||
thread_add_event(tm, frrzmq_read_msg,
|
||||
cbp, cb->fd, &core->thread);
|
||||
event_add_event(tm, frrzmq_read_msg, cbp, cb->fd,
|
||||
&core->thread);
|
||||
else
|
||||
thread_add_event(tm, frrzmq_write_msg,
|
||||
cbp, cb->fd, &core->thread);
|
||||
event_add_event(tm, frrzmq_write_msg, cbp, cb->fd,
|
||||
&core->thread);
|
||||
}
|
||||
}
|
||||
|
@ -72,17 +72,17 @@ extern void frrzmq_finish(void);
|
||||
/* end */
|
||||
|
||||
/* core event registration, one of these 2 macros should be used */
|
||||
#define frrzmq_thread_add_read_msg(m, f, e, a, z, d) \
|
||||
#define frrzmq_event_add_read_msg(m, f, e, a, z, d) \
|
||||
_xref_zmq_a(READ, f, d, \
|
||||
_frrzmq_thread_add_read(&_xref, m, f, NULL, e, a, z, d))
|
||||
_frrzmq_event_add_read(&_xref, m, f, NULL, e, a, z, d))
|
||||
|
||||
#define frrzmq_thread_add_read_part(m, f, e, a, z, d) \
|
||||
#define frrzmq_event_add_read_part(m, f, e, a, z, d) \
|
||||
_xref_zmq_a(READ, f, d, \
|
||||
_frrzmq_thread_add_read(&_xref, m, NULL, f, e, a, z, d))
|
||||
_frrzmq_event_add_read(&_xref, m, NULL, f, e, a, z, d))
|
||||
|
||||
#define frrzmq_thread_add_write_msg(m, f, e, a, z, d) \
|
||||
#define frrzmq_event_add_write_msg(m, f, e, a, z, d) \
|
||||
_xref_zmq_a(WRITE, f, d, \
|
||||
_frrzmq_thread_add_write(&_xref, m, f, e, a, z, d))
|
||||
_frrzmq_event_add_write(&_xref, m, f, e, a, z, d))
|
||||
|
||||
struct cb_core;
|
||||
struct frrzmq_cb;
|
||||
@ -108,18 +108,20 @@ struct frrzmq_cb;
|
||||
* may schedule the event to run as soon as libfrr is back in its main
|
||||
* loop.
|
||||
*/
|
||||
extern int _frrzmq_thread_add_read(
|
||||
const struct xref_threadsched *xref, struct thread_master *master,
|
||||
void (*msgfunc)(void *arg, void *zmqsock),
|
||||
void (*partfunc)(void *arg, void *zmqsock, zmq_msg_t *msg,
|
||||
unsigned partnum),
|
||||
void (*errfunc)(void *arg, void *zmqsock), void *arg, void *zmqsock,
|
||||
struct frrzmq_cb **cb);
|
||||
extern int _frrzmq_thread_add_write(
|
||||
const struct xref_threadsched *xref, struct thread_master *master,
|
||||
void (*msgfunc)(void *arg, void *zmqsock),
|
||||
void (*errfunc)(void *arg, void *zmqsock), void *arg, void *zmqsock,
|
||||
struct frrzmq_cb **cb);
|
||||
extern int
|
||||
_frrzmq_event_add_read(const struct xref_threadsched *xref,
|
||||
struct thread_master *master,
|
||||
void (*msgfunc)(void *arg, void *zmqsock),
|
||||
void (*partfunc)(void *arg, void *zmqsock,
|
||||
zmq_msg_t *msg, unsigned partnum),
|
||||
void (*errfunc)(void *arg, void *zmqsock), void *arg,
|
||||
void *zmqsock, struct frrzmq_cb **cb);
|
||||
extern int _frrzmq_event_add_write(const struct xref_threadsched *xref,
|
||||
struct thread_master *master,
|
||||
void (*msgfunc)(void *arg, void *zmqsock),
|
||||
void (*errfunc)(void *arg, void *zmqsock),
|
||||
void *arg, void *zmqsock,
|
||||
struct frrzmq_cb **cb);
|
||||
|
||||
extern void frrzmq_thread_cancel(struct frrzmq_cb **cb, struct cb_core *core);
|
||||
|
||||
|
12
lib/libfrr.c
12
lib/libfrr.c
@ -1015,8 +1015,8 @@ void frr_config_fork(void)
|
||||
exit(0);
|
||||
}
|
||||
|
||||
thread_add_event(master, frr_config_read_in, NULL, 0,
|
||||
&di->read_in);
|
||||
event_add_event(master, frr_config_read_in, NULL, 0,
|
||||
&di->read_in);
|
||||
}
|
||||
|
||||
if (di->daemon_mode || di->terminal)
|
||||
@ -1129,8 +1129,8 @@ static void frr_daemon_ctl(struct event *t)
|
||||
}
|
||||
|
||||
out:
|
||||
thread_add_read(master, frr_daemon_ctl, NULL, daemon_ctl_sock,
|
||||
&daemon_ctl_thread);
|
||||
event_add_read(master, frr_daemon_ctl, NULL, daemon_ctl_sock,
|
||||
&daemon_ctl_thread);
|
||||
}
|
||||
|
||||
void frr_detach(void)
|
||||
@ -1158,8 +1158,8 @@ void frr_run(struct thread_master *master)
|
||||
vty_stdio(frr_terminal_close);
|
||||
if (daemon_ctl_sock != -1) {
|
||||
set_nonblocking(daemon_ctl_sock);
|
||||
thread_add_read(master, frr_daemon_ctl, NULL,
|
||||
daemon_ctl_sock, &daemon_ctl_thread);
|
||||
event_add_read(master, frr_daemon_ctl, NULL,
|
||||
daemon_ctl_sock, &daemon_ctl_thread);
|
||||
}
|
||||
} else if (di->daemon_mode) {
|
||||
int nullfd = open("/dev/null", O_RDONLY | O_NOCTTY);
|
||||
|
@ -1053,25 +1053,25 @@ mgmt_be_client_register_event(struct mgmt_be_client_ctx *client_ctx,
|
||||
|
||||
switch (event) {
|
||||
case MGMTD_BE_CONN_READ:
|
||||
thread_add_read(client_ctx->tm, mgmt_be_client_read,
|
||||
event_add_read(client_ctx->tm, mgmt_be_client_read,
|
||||
client_ctx, client_ctx->conn_fd,
|
||||
&client_ctx->conn_read_ev);
|
||||
assert(client_ctx->conn_read_ev);
|
||||
break;
|
||||
case MGMTD_BE_CONN_WRITE:
|
||||
thread_add_write(client_ctx->tm, mgmt_be_client_write,
|
||||
event_add_write(client_ctx->tm, mgmt_be_client_write,
|
||||
client_ctx, client_ctx->conn_fd,
|
||||
&client_ctx->conn_write_ev);
|
||||
assert(client_ctx->conn_write_ev);
|
||||
break;
|
||||
case MGMTD_BE_PROC_MSG:
|
||||
tv.tv_usec = MGMTD_BE_MSG_PROC_DELAY_USEC;
|
||||
thread_add_timer_tv(client_ctx->tm, mgmt_be_client_proc_msgbufs,
|
||||
event_add_timer_tv(client_ctx->tm, mgmt_be_client_proc_msgbufs,
|
||||
client_ctx, &tv, &client_ctx->msg_proc_ev);
|
||||
assert(client_ctx->msg_proc_ev);
|
||||
break;
|
||||
case MGMTD_BE_CONN_WRITES_ON:
|
||||
thread_add_timer_msec(client_ctx->tm,
|
||||
event_add_timer_msec(client_ctx->tm,
|
||||
mgmt_be_client_resume_writes, client_ctx,
|
||||
MGMTD_BE_MSG_WRITE_DELAY_MSEC,
|
||||
&client_ctx->conn_writes_on);
|
||||
@ -1095,7 +1095,7 @@ mgmt_be_client_schedule_conn_retry(struct mgmt_be_client_ctx *client_ctx,
|
||||
MGMTD_BE_CLIENT_DBG(
|
||||
"Scheduling MGMTD Backend server connection retry after %lu seconds",
|
||||
intvl_secs);
|
||||
thread_add_timer(client_ctx->tm, mgmt_be_client_conn_timeout,
|
||||
event_add_timer(client_ctx->tm, mgmt_be_client_conn_timeout,
|
||||
(void *)client_ctx, intvl_secs,
|
||||
&client_ctx->conn_retry_tmr);
|
||||
}
|
||||
|
@ -738,26 +738,26 @@ mgmt_fe_client_register_event(struct mgmt_fe_client_ctx *client_ctx,
|
||||
|
||||
switch (event) {
|
||||
case MGMTD_FE_CONN_READ:
|
||||
thread_add_read(client_ctx->tm, mgmt_fe_client_read,
|
||||
event_add_read(client_ctx->tm, mgmt_fe_client_read,
|
||||
client_ctx, client_ctx->conn_fd,
|
||||
&client_ctx->conn_read_ev);
|
||||
assert(client_ctx->conn_read_ev);
|
||||
break;
|
||||
case MGMTD_FE_CONN_WRITE:
|
||||
thread_add_write(client_ctx->tm, mgmt_fe_client_write,
|
||||
event_add_write(client_ctx->tm, mgmt_fe_client_write,
|
||||
client_ctx, client_ctx->conn_fd,
|
||||
&client_ctx->conn_write_ev);
|
||||
assert(client_ctx->conn_write_ev);
|
||||
break;
|
||||
case MGMTD_FE_PROC_MSG:
|
||||
tv.tv_usec = MGMTD_FE_MSG_PROC_DELAY_USEC;
|
||||
thread_add_timer_tv(client_ctx->tm,
|
||||
event_add_timer_tv(client_ctx->tm,
|
||||
mgmt_fe_client_proc_msgbufs, client_ctx,
|
||||
&tv, &client_ctx->msg_proc_ev);
|
||||
assert(client_ctx->msg_proc_ev);
|
||||
break;
|
||||
case MGMTD_FE_CONN_WRITES_ON:
|
||||
thread_add_timer_msec(
|
||||
event_add_timer_msec(
|
||||
client_ctx->tm, mgmt_fe_client_resume_writes,
|
||||
client_ctx, MGMTD_FE_MSG_WRITE_DELAY_MSEC,
|
||||
&client_ctx->conn_writes_on);
|
||||
@ -775,7 +775,7 @@ static void mgmt_fe_client_schedule_conn_retry(
|
||||
MGMTD_FE_CLIENT_DBG(
|
||||
"Scheduling MGMTD Frontend server connection retry after %lu seconds",
|
||||
intvl_secs);
|
||||
thread_add_timer(client_ctx->tm, mgmt_fe_client_conn_timeout,
|
||||
event_add_timer(client_ctx->tm, mgmt_fe_client_conn_timeout,
|
||||
(void *)client_ctx, intvl_secs,
|
||||
&client_ctx->conn_retry_tmr);
|
||||
}
|
||||
|
@ -329,10 +329,9 @@ static int nb_cli_commit(struct vty *vty, bool force,
|
||||
confirmed_timeout);
|
||||
|
||||
thread_cancel(&vty->t_confirmed_commit_timeout);
|
||||
thread_add_timer(master,
|
||||
nb_cli_confirmed_commit_timeout, vty,
|
||||
confirmed_timeout * 60,
|
||||
&vty->t_confirmed_commit_timeout);
|
||||
event_add_timer(master, nb_cli_confirmed_commit_timeout,
|
||||
vty, confirmed_timeout * 60,
|
||||
&vty->t_confirmed_commit_timeout);
|
||||
} else {
|
||||
/* Accept commit confirmation. */
|
||||
vty_out(vty, "%% Commit complete.\n\n");
|
||||
@ -355,9 +354,9 @@ static int nb_cli_commit(struct vty *vty, bool force,
|
||||
vty->confirmed_commit_rollback = nb_config_dup(running_config);
|
||||
|
||||
vty->t_confirmed_commit_timeout = NULL;
|
||||
thread_add_timer(master, nb_cli_confirmed_commit_timeout, vty,
|
||||
confirmed_timeout * 60,
|
||||
&vty->t_confirmed_commit_timeout);
|
||||
event_add_timer(master, nb_cli_confirmed_commit_timeout, vty,
|
||||
confirmed_timeout * 60,
|
||||
&vty->t_confirmed_commit_timeout);
|
||||
}
|
||||
|
||||
context.client = NB_CLIENT_CLI;
|
||||
|
@ -409,7 +409,7 @@ static void frr_confd_cdb_read_cb(struct event *thread)
|
||||
int *subp = NULL;
|
||||
int reslen = 0;
|
||||
|
||||
thread_add_read(master, frr_confd_cdb_read_cb, NULL, fd, &t_cdb_sub);
|
||||
event_add_read(master, frr_confd_cdb_read_cb, NULL, fd, &t_cdb_sub);
|
||||
|
||||
if (cdb_read_subscription_socket2(fd, &cdb_ev, &flags, &subp, &reslen)
|
||||
!= CONFD_OK) {
|
||||
@ -574,8 +574,8 @@ static int frr_confd_init_cdb(void)
|
||||
}
|
||||
pthread_detach(cdb_trigger_thread);
|
||||
|
||||
thread_add_read(master, frr_confd_cdb_read_cb, NULL, cdb_sub_sock,
|
||||
&t_cdb_sub);
|
||||
event_add_read(master, frr_confd_cdb_read_cb, NULL, cdb_sub_sock,
|
||||
&t_cdb_sub);
|
||||
|
||||
return 0;
|
||||
|
||||
@ -1178,7 +1178,7 @@ static void frr_confd_dp_ctl_read(struct event *thread)
|
||||
struct confd_daemon_ctx *dctx = THREAD_ARG(thread);
|
||||
int fd = THREAD_FD(thread);
|
||||
|
||||
thread_add_read(master, frr_confd_dp_ctl_read, dctx, fd, &t_dp_ctl);
|
||||
event_add_read(master, frr_confd_dp_ctl_read, dctx, fd, &t_dp_ctl);
|
||||
|
||||
frr_confd_dp_read(dctx, fd);
|
||||
}
|
||||
@ -1188,7 +1188,8 @@ static void frr_confd_dp_worker_read(struct event *thread)
|
||||
struct confd_daemon_ctx *dctx = THREAD_ARG(thread);
|
||||
int fd = THREAD_FD(thread);
|
||||
|
||||
thread_add_read(master, frr_confd_dp_worker_read, dctx, fd, &t_dp_worker);
|
||||
event_add_read(master, frr_confd_dp_worker_read, dctx, fd,
|
||||
&t_dp_worker);
|
||||
|
||||
frr_confd_dp_read(dctx, fd);
|
||||
}
|
||||
@ -1320,10 +1321,10 @@ static int frr_confd_init_dp(const char *program_name)
|
||||
goto error;
|
||||
}
|
||||
|
||||
thread_add_read(master, frr_confd_dp_ctl_read, dctx, dp_ctl_sock,
|
||||
&t_dp_ctl);
|
||||
thread_add_read(master, frr_confd_dp_worker_read, dctx, dp_worker_sock,
|
||||
&t_dp_worker);
|
||||
event_add_read(master, frr_confd_dp_ctl_read, dctx, dp_ctl_sock,
|
||||
&t_dp_ctl);
|
||||
event_add_read(master, frr_confd_dp_worker_read, dctx, dp_worker_sock,
|
||||
&t_dp_worker);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -157,8 +157,7 @@ class RpcStateBase
|
||||
* state will either be MORE or FINISH. It will always be FINISH
|
||||
* for Unary RPCs.
|
||||
*/
|
||||
thread_add_event(main_master, c_callback, (void *)this, 0,
|
||||
NULL);
|
||||
event_add_event(main_master, c_callback, (void *)this, 0, NULL);
|
||||
|
||||
pthread_mutex_lock(&this->cmux);
|
||||
while (this->state == PROCESS)
|
||||
@ -1303,7 +1302,7 @@ static int frr_grpc_module_late_init(struct thread_master *tm)
|
||||
{
|
||||
main_master = tm;
|
||||
hook_register(frr_fini, frr_grpc_finish);
|
||||
thread_add_event(tm, frr_grpc_module_very_late_init, NULL, 0, NULL);
|
||||
event_add_event(tm, frr_grpc_module_very_late_init, NULL, 0, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -528,7 +528,7 @@ static void frr_sr_read_cb(struct event *thread)
|
||||
return;
|
||||
}
|
||||
|
||||
thread_add_read(master, frr_sr_read_cb, module, fd, &module->sr_thread);
|
||||
event_add_read(master, frr_sr_read_cb, module, fd, &module->sr_thread);
|
||||
}
|
||||
|
||||
static void frr_sr_subscribe_config(struct yang_module *module)
|
||||
@ -688,8 +688,8 @@ static int frr_sr_init(void)
|
||||
sr_strerror(ret));
|
||||
goto cleanup;
|
||||
}
|
||||
thread_add_read(master, frr_sr_read_cb, module,
|
||||
event_pipe, &module->sr_thread);
|
||||
event_add_read(master, frr_sr_read_cb, module, event_pipe,
|
||||
&module->sr_thread);
|
||||
}
|
||||
|
||||
hook_register(nb_notification_send, frr_sr_notification_send);
|
||||
|
@ -80,7 +80,7 @@ void pullwr_bump(struct pullwr *pullwr)
|
||||
if (pullwr->writer)
|
||||
return;
|
||||
|
||||
thread_add_timer(pullwr->tm, pullwr_run, pullwr, 0, &pullwr->writer);
|
||||
event_add_timer(pullwr->tm, pullwr_run, pullwr, 0, &pullwr->writer);
|
||||
}
|
||||
|
||||
static size_t pullwr_iov(struct pullwr *pullwr, struct iovec *iov)
|
||||
@ -206,7 +206,7 @@ static void pullwr_run(struct event *t)
|
||||
if (pullwr->valid == 0) {
|
||||
/* we made a fill() call above that didn't feed any
|
||||
* data in, and we have nothing more queued, so we go
|
||||
* into idle, i.e. no calling thread_add_write()
|
||||
* into idle, i.e. no calling event_add_write()
|
||||
*/
|
||||
pullwr_resize(pullwr, 0);
|
||||
return;
|
||||
@ -237,7 +237,7 @@ static void pullwr_run(struct event *t)
|
||||
* is full and we go wait until it's available for writing again.
|
||||
*/
|
||||
|
||||
thread_add_write(pullwr->tm, pullwr_run, pullwr, pullwr->fd,
|
||||
event_add_write(pullwr->tm, pullwr_run, pullwr, pullwr->fd,
|
||||
&pullwr->writer);
|
||||
|
||||
/* if we hit the time limit, just keep the buffer, we'll probably need
|
||||
|
@ -113,8 +113,8 @@ static void resolver_cb_socket_readable(struct event *t)
|
||||
struct resolver_fd *resfd = THREAD_ARG(t);
|
||||
struct resolver_state *r = resfd->state;
|
||||
|
||||
thread_add_read(r->master, resolver_cb_socket_readable, resfd,
|
||||
resfd->fd, &resfd->t_read);
|
||||
event_add_read(r->master, resolver_cb_socket_readable, resfd, resfd->fd,
|
||||
&resfd->t_read);
|
||||
/* ^ ordering important:
|
||||
* ares_process_fd may transitively call THREAD_OFF(resfd->t_read)
|
||||
* combined with resolver_fd_drop_maybe, so resfd may be free'd after!
|
||||
@ -128,8 +128,8 @@ static void resolver_cb_socket_writable(struct event *t)
|
||||
struct resolver_fd *resfd = THREAD_ARG(t);
|
||||
struct resolver_state *r = resfd->state;
|
||||
|
||||
thread_add_write(r->master, resolver_cb_socket_writable, resfd,
|
||||
resfd->fd, &resfd->t_write);
|
||||
event_add_write(r->master, resolver_cb_socket_writable, resfd,
|
||||
resfd->fd, &resfd->t_write);
|
||||
/* ^ ordering important:
|
||||
* ares_process_fd may transitively call THREAD_OFF(resfd->t_write)
|
||||
* combined with resolver_fd_drop_maybe, so resfd may be free'd after!
|
||||
@ -147,8 +147,8 @@ static void resolver_update_timeouts(struct resolver_state *r)
|
||||
if (tv) {
|
||||
unsigned int timeoutms = tv->tv_sec * 1000 + tv->tv_usec / 1000;
|
||||
|
||||
thread_add_timer_msec(r->master, resolver_cb_timeout, r,
|
||||
timeoutms, &r->timeout);
|
||||
event_add_timer_msec(r->master, resolver_cb_timeout, r,
|
||||
timeoutms, &r->timeout);
|
||||
}
|
||||
}
|
||||
|
||||
@ -167,14 +167,14 @@ static void ares_socket_cb(void *data, ares_socket_t fd, int readable,
|
||||
if (!readable)
|
||||
THREAD_OFF(resfd->t_read);
|
||||
else if (!resfd->t_read)
|
||||
thread_add_read(r->master, resolver_cb_socket_readable, resfd,
|
||||
fd, &resfd->t_read);
|
||||
event_add_read(r->master, resolver_cb_socket_readable, resfd,
|
||||
fd, &resfd->t_read);
|
||||
|
||||
if (!writable)
|
||||
THREAD_OFF(resfd->t_write);
|
||||
else if (!resfd->t_write)
|
||||
thread_add_write(r->master, resolver_cb_socket_writable, resfd,
|
||||
fd, &resfd->t_write);
|
||||
event_add_write(r->master, resolver_cb_socket_writable, resfd,
|
||||
fd, &resfd->t_write);
|
||||
|
||||
resolver_fd_drop_maybe(resfd);
|
||||
}
|
||||
@ -264,8 +264,8 @@ void resolver_resolve(struct resolver_query *query, int af, vrf_id_t vrf_id,
|
||||
/* for consistency with proper name lookup, don't call the
|
||||
* callback immediately; defer to thread loop
|
||||
*/
|
||||
thread_add_timer_msec(state.master, resolver_cb_literal,
|
||||
query, 0, &query->literal_cb);
|
||||
event_add_timer_msec(state.master, resolver_cb_literal, query,
|
||||
0, &query->literal_cb);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -133,8 +133,8 @@ void frr_signal_timer(struct event *t)
|
||||
|
||||
sigm = THREAD_ARG(t);
|
||||
sigm->t = NULL;
|
||||
thread_add_timer(sigm->t->master, frr_signal_timer, &sigmaster,
|
||||
FRR_SIGNAL_TIMER_INTERVAL, &sigm->t);
|
||||
event_add_timer(sigm->t->master, frr_signal_timer, &sigmaster,
|
||||
FRR_SIGNAL_TIMER_INTERVAL, &sigm->t);
|
||||
frr_sigevent_process();
|
||||
}
|
||||
#endif /* SIGEVENT_SCHEDULE_THREAD */
|
||||
@ -354,7 +354,7 @@ void signal_init(struct thread_master *m, int sigc,
|
||||
|
||||
#ifdef SIGEVENT_SCHEDULE_THREAD
|
||||
sigmaster.t = NULL;
|
||||
thread_add_timer(m, frr_signal_timer, &sigmaster,
|
||||
FRR_SIGNAL_TIMER_INTERVAL, &sigmaster.t);
|
||||
event_add_timer(m, frr_signal_timer, &sigmaster,
|
||||
FRR_SIGNAL_TIMER_INTERVAL, &sigmaster.t);
|
||||
#endif /* SIGEVENT_SCHEDULE_THREAD */
|
||||
}
|
||||
|
@ -139,21 +139,21 @@ long spf_backoff_schedule(struct spf_backoff *backoff)
|
||||
switch (backoff->state) {
|
||||
case SPF_BACKOFF_QUIET:
|
||||
backoff->state = SPF_BACKOFF_SHORT_WAIT;
|
||||
thread_add_timer_msec(
|
||||
event_add_timer_msec(
|
||||
backoff->m, spf_backoff_timetolearn_elapsed, backoff,
|
||||
backoff->timetolearn, &backoff->t_timetolearn);
|
||||
thread_add_timer_msec(backoff->m, spf_backoff_holddown_elapsed,
|
||||
backoff, backoff->holddown,
|
||||
&backoff->t_holddown);
|
||||
event_add_timer_msec(backoff->m, spf_backoff_holddown_elapsed,
|
||||
backoff, backoff->holddown,
|
||||
&backoff->t_holddown);
|
||||
backoff->first_event_time = now;
|
||||
rv = backoff->init_delay;
|
||||
break;
|
||||
case SPF_BACKOFF_SHORT_WAIT:
|
||||
case SPF_BACKOFF_LONG_WAIT:
|
||||
thread_cancel(&backoff->t_holddown);
|
||||
thread_add_timer_msec(backoff->m, spf_backoff_holddown_elapsed,
|
||||
backoff, backoff->holddown,
|
||||
&backoff->t_holddown);
|
||||
event_add_timer_msec(backoff->m, spf_backoff_holddown_elapsed,
|
||||
backoff, backoff->holddown,
|
||||
&backoff->t_holddown);
|
||||
if (backoff->state == SPF_BACKOFF_SHORT_WAIT)
|
||||
rv = backoff->short_delay;
|
||||
else
|
||||
|
@ -70,8 +70,8 @@ static void systemd_send_watchdog(struct event *t)
|
||||
systemd_send_information("WATCHDOG=1");
|
||||
|
||||
assert(watchdog_msec > 0);
|
||||
thread_add_timer_msec(systemd_master, systemd_send_watchdog, NULL,
|
||||
watchdog_msec, NULL);
|
||||
event_add_timer_msec(systemd_master, systemd_send_watchdog, NULL,
|
||||
watchdog_msec, NULL);
|
||||
}
|
||||
|
||||
void systemd_send_started(struct thread_master *m)
|
||||
|
32
lib/vty.c
32
lib/vty.c
@ -2807,13 +2807,13 @@ static void vty_event_serv(enum vty_event event, struct vty_serv *vty_serv)
|
||||
{
|
||||
switch (event) {
|
||||
case VTY_SERV:
|
||||
thread_add_read(vty_master, vty_accept, vty_serv,
|
||||
vty_serv->sock, &vty_serv->t_accept);
|
||||
event_add_read(vty_master, vty_accept, vty_serv, vty_serv->sock,
|
||||
&vty_serv->t_accept);
|
||||
break;
|
||||
#ifdef VTYSH
|
||||
case VTYSH_SERV:
|
||||
thread_add_read(vty_master, vtysh_accept, vty_serv,
|
||||
vty_serv->sock, &vty_serv->t_accept);
|
||||
event_add_read(vty_master, vtysh_accept, vty_serv,
|
||||
vty_serv->sock, &vty_serv->t_accept);
|
||||
break;
|
||||
#endif /* VTYSH */
|
||||
case VTY_READ:
|
||||
@ -2830,34 +2830,34 @@ static void vty_event(enum vty_event event, struct vty *vty)
|
||||
switch (event) {
|
||||
#ifdef VTYSH
|
||||
case VTYSH_READ:
|
||||
thread_add_read(vty_master, vtysh_read, vty, vty->fd,
|
||||
&vty->t_read);
|
||||
event_add_read(vty_master, vtysh_read, vty, vty->fd,
|
||||
&vty->t_read);
|
||||
break;
|
||||
case VTYSH_WRITE:
|
||||
thread_add_write(vty_master, vtysh_write, vty, vty->wfd,
|
||||
&vty->t_write);
|
||||
event_add_write(vty_master, vtysh_write, vty, vty->wfd,
|
||||
&vty->t_write);
|
||||
break;
|
||||
#endif /* VTYSH */
|
||||
case VTY_READ:
|
||||
thread_add_read(vty_master, vty_read, vty, vty->fd,
|
||||
&vty->t_read);
|
||||
event_add_read(vty_master, vty_read, vty, vty->fd,
|
||||
&vty->t_read);
|
||||
|
||||
/* Time out treatment. */
|
||||
if (vty->v_timeout) {
|
||||
THREAD_OFF(vty->t_timeout);
|
||||
thread_add_timer(vty_master, vty_timeout, vty,
|
||||
vty->v_timeout, &vty->t_timeout);
|
||||
event_add_timer(vty_master, vty_timeout, vty,
|
||||
vty->v_timeout, &vty->t_timeout);
|
||||
}
|
||||
break;
|
||||
case VTY_WRITE:
|
||||
thread_add_write(vty_master, vty_flush, vty, vty->wfd,
|
||||
&vty->t_write);
|
||||
event_add_write(vty_master, vty_flush, vty, vty->wfd,
|
||||
&vty->t_write);
|
||||
break;
|
||||
case VTY_TIMEOUT_RESET:
|
||||
THREAD_OFF(vty->t_timeout);
|
||||
if (vty->v_timeout)
|
||||
thread_add_timer(vty_master, vty_timeout, vty,
|
||||
vty->v_timeout, &vty->t_timeout);
|
||||
event_add_timer(vty_master, vty_timeout, vty,
|
||||
vty->v_timeout, &vty->t_timeout);
|
||||
break;
|
||||
case VTY_SERV:
|
||||
case VTYSH_SERV:
|
||||
|
@ -47,8 +47,8 @@ static void wheel_timer_thread_helper(struct event *t)
|
||||
slots_to_skip++;
|
||||
|
||||
wheel->slots_to_skip = slots_to_skip;
|
||||
thread_add_timer_msec(wheel->master, wheel_timer_thread, wheel,
|
||||
wheel->nexttime * slots_to_skip, &wheel->timer);
|
||||
event_add_timer_msec(wheel->master, wheel_timer_thread, wheel,
|
||||
wheel->nexttime * slots_to_skip, &wheel->timer);
|
||||
}
|
||||
|
||||
static void wheel_timer_thread(struct event *t)
|
||||
@ -85,8 +85,8 @@ struct timer_wheel *wheel_init(struct thread_master *master, int period,
|
||||
for (i = 0; i < slots; i++)
|
||||
wheel->wheel_slot_lists[i] = list_new();
|
||||
|
||||
thread_add_timer_msec(wheel->master, wheel_timer_thread, wheel,
|
||||
wheel->nexttime, &wheel->timer);
|
||||
event_add_timer_msec(wheel->master, wheel_timer_thread, wheel,
|
||||
wheel->nexttime, &wheel->timer);
|
||||
|
||||
return wheel;
|
||||
}
|
||||
|
@ -118,12 +118,12 @@ static int work_queue_schedule(struct work_queue *wq, unsigned int delay)
|
||||
* as an 'event'
|
||||
*/
|
||||
if (delay > 0) {
|
||||
thread_add_timer_msec(wq->master, work_queue_run, wq,
|
||||
delay, &wq->thread);
|
||||
event_add_timer_msec(wq->master, work_queue_run, wq,
|
||||
delay, &wq->thread);
|
||||
thread_ignore_late_timer(wq->thread);
|
||||
} else
|
||||
thread_add_event(wq->master, work_queue_run, wq, 0,
|
||||
&wq->thread);
|
||||
event_add_event(wq->master, work_queue_run, wq, 0,
|
||||
&wq->thread);
|
||||
|
||||
/* set thread yield time, if needed */
|
||||
if (thread_is_scheduled(wq->thread) &&
|
||||
|
@ -266,8 +266,8 @@ static void zclient_flush_data(struct event *thread)
|
||||
return;
|
||||
case BUFFER_PENDING:
|
||||
zclient->t_write = NULL;
|
||||
thread_add_write(zclient->master, zclient_flush_data, zclient,
|
||||
zclient->sock, &zclient->t_write);
|
||||
event_add_write(zclient->master, zclient_flush_data, zclient,
|
||||
zclient->sock, &zclient->t_write);
|
||||
break;
|
||||
case BUFFER_EMPTY:
|
||||
if (zclient->zebra_buffer_write_ready)
|
||||
@ -298,8 +298,8 @@ enum zclient_send_status zclient_send_message(struct zclient *zclient)
|
||||
THREAD_OFF(zclient->t_write);
|
||||
return ZCLIENT_SEND_SUCCESS;
|
||||
case BUFFER_PENDING:
|
||||
thread_add_write(zclient->master, zclient_flush_data, zclient,
|
||||
zclient->sock, &zclient->t_write);
|
||||
event_add_write(zclient->master, zclient_flush_data, zclient,
|
||||
zclient->sock, &zclient->t_write);
|
||||
return ZCLIENT_SEND_BUFFERED;
|
||||
}
|
||||
|
||||
@ -4204,22 +4204,22 @@ static void zclient_event(enum zclient_event event, struct zclient *zclient)
|
||||
{
|
||||
switch (event) {
|
||||
case ZCLIENT_SCHEDULE:
|
||||
thread_add_event(zclient->master, zclient_connect, zclient, 0,
|
||||
&zclient->t_connect);
|
||||
event_add_event(zclient->master, zclient_connect, zclient, 0,
|
||||
&zclient->t_connect);
|
||||
break;
|
||||
case ZCLIENT_CONNECT:
|
||||
if (zclient_debug)
|
||||
zlog_debug(
|
||||
"zclient connect failures: %d schedule interval is now %d",
|
||||
zclient->fail, zclient->fail < 3 ? 10 : 60);
|
||||
thread_add_timer(zclient->master, zclient_connect, zclient,
|
||||
zclient->fail < 3 ? 10 : 60,
|
||||
&zclient->t_connect);
|
||||
event_add_timer(zclient->master, zclient_connect, zclient,
|
||||
zclient->fail < 3 ? 10 : 60,
|
||||
&zclient->t_connect);
|
||||
break;
|
||||
case ZCLIENT_READ:
|
||||
zclient->t_read = NULL;
|
||||
thread_add_read(zclient->master, zclient_read, zclient,
|
||||
zclient->sock, &zclient->t_read);
|
||||
event_add_read(zclient->master, zclient_read, zclient,
|
||||
zclient->sock, &zclient->t_read);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -800,8 +800,8 @@ static void zlog_5424_reconnect(struct event *t)
|
||||
ret = read(fd, dummy, sizeof(dummy));
|
||||
if (ret > 0) {
|
||||
/* logger is sending us something?!?! */
|
||||
thread_add_read(t->master, zlog_5424_reconnect, zcf, fd,
|
||||
&zcf->t_reconnect);
|
||||
event_add_read(t->master, zlog_5424_reconnect, zcf, fd,
|
||||
&zcf->t_reconnect);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1030,14 +1030,14 @@ static int zlog_5424_open(struct zlog_cfg_5424 *zcf, int sock_type)
|
||||
assert(zcf->master);
|
||||
|
||||
if (fd != -1) {
|
||||
thread_add_read(zcf->master, zlog_5424_reconnect, zcf,
|
||||
fd, &zcf->t_reconnect);
|
||||
event_add_read(zcf->master, zlog_5424_reconnect, zcf,
|
||||
fd, &zcf->t_reconnect);
|
||||
zcf->reconn_backoff_cur = zcf->reconn_backoff;
|
||||
|
||||
} else {
|
||||
thread_add_timer_msec(zcf->master, zlog_5424_reconnect,
|
||||
zcf, zcf->reconn_backoff_cur,
|
||||
&zcf->t_reconnect);
|
||||
event_add_timer_msec(zcf->master, zlog_5424_reconnect,
|
||||
zcf, zcf->reconn_backoff_cur,
|
||||
&zcf->t_reconnect);
|
||||
|
||||
zcf->reconn_backoff_cur += zcf->reconn_backoff_cur / 2;
|
||||
if (zcf->reconn_backoff_cur > zcf->reconn_backoff_max)
|
||||
|
@ -733,14 +733,14 @@ mgmt_be_adapter_register_event(struct mgmt_be_client_adapter *adapter,
|
||||
|
||||
switch (event) {
|
||||
case MGMTD_BE_CONN_INIT:
|
||||
thread_add_timer_msec(mgmt_be_adapter_tm,
|
||||
event_add_timer_msec(mgmt_be_adapter_tm,
|
||||
mgmt_be_adapter_conn_init, adapter,
|
||||
MGMTD_BE_CONN_INIT_DELAY_MSEC,
|
||||
&adapter->conn_init_ev);
|
||||
assert(adapter->conn_init_ev);
|
||||
break;
|
||||
case MGMTD_BE_CONN_READ:
|
||||
thread_add_read(mgmt_be_adapter_tm, mgmt_be_adapter_read,
|
||||
event_add_read(mgmt_be_adapter_tm, mgmt_be_adapter_read,
|
||||
adapter, adapter->conn_fd, &adapter->conn_read_ev);
|
||||
assert(adapter->conn_read_ev);
|
||||
break;
|
||||
@ -753,19 +753,19 @@ mgmt_be_adapter_register_event(struct mgmt_be_client_adapter *adapter,
|
||||
MGMTD_BE_ADAPTER_DBG(
|
||||
"scheduling write ready notify for client %s",
|
||||
adapter->name);
|
||||
thread_add_write(mgmt_be_adapter_tm, mgmt_be_adapter_write,
|
||||
event_add_write(mgmt_be_adapter_tm, mgmt_be_adapter_write,
|
||||
adapter, adapter->conn_fd, &adapter->conn_write_ev);
|
||||
assert(adapter->conn_write_ev);
|
||||
break;
|
||||
case MGMTD_BE_PROC_MSG:
|
||||
tv.tv_usec = MGMTD_BE_MSG_PROC_DELAY_USEC;
|
||||
thread_add_timer_tv(mgmt_be_adapter_tm,
|
||||
event_add_timer_tv(mgmt_be_adapter_tm,
|
||||
mgmt_be_adapter_proc_msgbufs, adapter, &tv,
|
||||
&adapter->proc_msg_ev);
|
||||
assert(adapter->proc_msg_ev);
|
||||
break;
|
||||
case MGMTD_BE_CONN_WRITES_ON:
|
||||
thread_add_timer_msec(mgmt_be_adapter_tm,
|
||||
event_add_timer_msec(mgmt_be_adapter_tm,
|
||||
mgmt_be_adapter_resume_writes, adapter,
|
||||
MGMTD_BE_MSG_WRITE_DELAY_MSEC,
|
||||
&adapter->conn_writes_on);
|
||||
|
@ -65,7 +65,7 @@ static void mgmt_be_conn_accept(struct event *thread)
|
||||
static void mgmt_be_server_register_event(enum mgmt_be_event event)
|
||||
{
|
||||
if (event == MGMTD_BE_SERVER) {
|
||||
thread_add_read(mgmt_be_listen_tm, mgmt_be_conn_accept,
|
||||
event_add_read(mgmt_be_listen_tm, mgmt_be_conn_accept,
|
||||
NULL, mgmt_be_listen_fd,
|
||||
&mgmt_be_listen_ev);
|
||||
assert(mgmt_be_listen_ev);
|
||||
|
@ -656,13 +656,13 @@ mgmt_fe_session_register_event(struct mgmt_fe_session_ctx *session,
|
||||
|
||||
switch (event) {
|
||||
case MGMTD_FE_SESSION_CFG_TXN_CLNUP:
|
||||
thread_add_timer_tv(mgmt_fe_adapter_tm,
|
||||
event_add_timer_tv(mgmt_fe_adapter_tm,
|
||||
mgmt_fe_session_cfg_txn_clnup, session,
|
||||
&tv, &session->proc_cfg_txn_clnp);
|
||||
assert(session->proc_cfg_txn_clnp);
|
||||
break;
|
||||
case MGMTD_FE_SESSION_SHOW_TXN_CLNUP:
|
||||
thread_add_timer_tv(mgmt_fe_adapter_tm,
|
||||
event_add_timer_tv(mgmt_fe_adapter_tm,
|
||||
mgmt_fe_session_show_txn_clnup, session,
|
||||
&tv, &session->proc_show_txn_clnp);
|
||||
assert(session->proc_show_txn_clnp);
|
||||
@ -1495,25 +1495,25 @@ mgmt_fe_adapter_register_event(struct mgmt_fe_client_adapter *adapter,
|
||||
|
||||
switch (event) {
|
||||
case MGMTD_FE_CONN_READ:
|
||||
thread_add_read(mgmt_fe_adapter_tm, mgmt_fe_adapter_read,
|
||||
event_add_read(mgmt_fe_adapter_tm, mgmt_fe_adapter_read,
|
||||
adapter, adapter->conn_fd, &adapter->conn_read_ev);
|
||||
assert(adapter->conn_read_ev);
|
||||
break;
|
||||
case MGMTD_FE_CONN_WRITE:
|
||||
thread_add_write(mgmt_fe_adapter_tm,
|
||||
event_add_write(mgmt_fe_adapter_tm,
|
||||
mgmt_fe_adapter_write, adapter,
|
||||
adapter->conn_fd, &adapter->conn_write_ev);
|
||||
assert(adapter->conn_write_ev);
|
||||
break;
|
||||
case MGMTD_FE_PROC_MSG:
|
||||
tv.tv_usec = MGMTD_FE_MSG_PROC_DELAY_USEC;
|
||||
thread_add_timer_tv(mgmt_fe_adapter_tm,
|
||||
event_add_timer_tv(mgmt_fe_adapter_tm,
|
||||
mgmt_fe_adapter_proc_msgbufs, adapter,
|
||||
&tv, &adapter->proc_msg_ev);
|
||||
assert(adapter->proc_msg_ev);
|
||||
break;
|
||||
case MGMTD_FE_CONN_WRITES_ON:
|
||||
thread_add_timer_msec(mgmt_fe_adapter_tm,
|
||||
event_add_timer_msec(mgmt_fe_adapter_tm,
|
||||
mgmt_fe_adapter_resume_writes, adapter,
|
||||
MGMTD_FE_MSG_WRITE_DELAY_MSEC,
|
||||
&adapter->conn_writes_on);
|
||||
|
@ -65,7 +65,7 @@ static void mgmt_fe_conn_accept(struct event *thread)
|
||||
static void mgmt_fe_server_register_event(enum mgmt_fe_event event)
|
||||
{
|
||||
if (event == MGMTD_FE_SERVER) {
|
||||
thread_add_read(mgmt_fe_listen_tm, mgmt_fe_conn_accept,
|
||||
event_add_read(mgmt_fe_listen_tm, mgmt_fe_conn_accept,
|
||||
NULL, mgmt_fe_listen_fd,
|
||||
&mgmt_fe_listen_ev);
|
||||
assert(mgmt_fe_listen_ev);
|
||||
|
@ -2226,27 +2226,27 @@ static void mgmt_txn_register_event(struct mgmt_txn_ctx *txn,
|
||||
|
||||
switch (event) {
|
||||
case MGMTD_TXN_PROC_SETCFG:
|
||||
thread_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_set_cfg,
|
||||
event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_set_cfg,
|
||||
txn, &tv, &txn->proc_set_cfg);
|
||||
assert(txn->proc_set_cfg);
|
||||
break;
|
||||
case MGMTD_TXN_PROC_COMMITCFG:
|
||||
thread_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_commit_cfg,
|
||||
event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_commit_cfg,
|
||||
txn, &tv, &txn->proc_comm_cfg);
|
||||
assert(txn->proc_comm_cfg);
|
||||
break;
|
||||
case MGMTD_TXN_PROC_GETCFG:
|
||||
thread_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_get_cfg,
|
||||
event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_get_cfg,
|
||||
txn, &tv, &txn->proc_get_cfg);
|
||||
assert(txn->proc_get_cfg);
|
||||
break;
|
||||
case MGMTD_TXN_PROC_GETDATA:
|
||||
thread_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_get_data,
|
||||
event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_get_data,
|
||||
txn, &tv, &txn->proc_get_data);
|
||||
assert(txn->proc_get_data);
|
||||
break;
|
||||
case MGMTD_TXN_COMMITCFG_TIMEOUT:
|
||||
thread_add_timer_msec(mgmt_txn_tm,
|
||||
event_add_timer_msec(mgmt_txn_tm,
|
||||
mgmt_txn_cfg_commit_timedout, txn,
|
||||
MGMTD_TXN_CFG_COMMIT_MAX_DELAY_MSEC,
|
||||
&txn->comm_cfg_timeout);
|
||||
@ -2254,7 +2254,7 @@ static void mgmt_txn_register_event(struct mgmt_txn_ctx *txn,
|
||||
break;
|
||||
case MGMTD_TXN_CLEANUP:
|
||||
tv.tv_usec = MGMTD_TXN_CLEANUP_DELAY_USEC;
|
||||
thread_add_timer_tv(mgmt_txn_tm, mgmt_txn_cleanup, txn, &tv,
|
||||
event_add_timer_tv(mgmt_txn_tm, mgmt_txn_cleanup, txn, &tv,
|
||||
&txn->clnup);
|
||||
assert(txn->clnup);
|
||||
}
|
||||
|
@ -118,8 +118,8 @@ static void netlink_log_recv(struct event *t)
|
||||
}
|
||||
}
|
||||
|
||||
thread_add_read(master, netlink_log_recv, 0, netlink_log_fd,
|
||||
&netlink_log_thread);
|
||||
event_add_read(master, netlink_log_recv, 0, netlink_log_fd,
|
||||
&netlink_log_thread);
|
||||
}
|
||||
|
||||
void netlink_set_nflog_group(int nlgroup)
|
||||
@ -136,8 +136,8 @@ void netlink_set_nflog_group(int nlgroup)
|
||||
return;
|
||||
|
||||
netlink_log_register(netlink_log_fd, nlgroup);
|
||||
thread_add_read(master, netlink_log_recv, 0, netlink_log_fd,
|
||||
&netlink_log_thread);
|
||||
event_add_read(master, netlink_log_recv, 0, netlink_log_fd,
|
||||
&netlink_log_thread);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -325,8 +325,8 @@ static void nhrp_cache_update_timers(struct nhrp_cache *c)
|
||||
switch (c->cur.type) {
|
||||
case NHRP_CACHE_INVALID:
|
||||
if (!c->t_auth)
|
||||
thread_add_timer_msec(master, nhrp_cache_do_free, c, 10,
|
||||
&c->t_timeout);
|
||||
event_add_timer_msec(master, nhrp_cache_do_free, c, 10,
|
||||
&c->t_timeout);
|
||||
break;
|
||||
case NHRP_CACHE_INCOMPLETE:
|
||||
case NHRP_CACHE_NEGATIVE:
|
||||
@ -337,9 +337,9 @@ static void nhrp_cache_update_timers(struct nhrp_cache *c)
|
||||
case NHRP_CACHE_LOCAL:
|
||||
case NHRP_CACHE_NUM_TYPES:
|
||||
if (c->cur.expires)
|
||||
thread_add_timer(master, nhrp_cache_do_timeout, c,
|
||||
c->cur.expires - monotime(NULL),
|
||||
&c->t_timeout);
|
||||
event_add_timer(master, nhrp_cache_do_timeout, c,
|
||||
c->cur.expires - monotime(NULL),
|
||||
&c->t_timeout);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -413,8 +413,8 @@ static void nhrp_cache_newpeer_notifier(struct notifier_block *n,
|
||||
if (nhrp_peer_check(c->new.peer, 1)) {
|
||||
evmgr_notify("authorize-binding", c,
|
||||
nhrp_cache_authorize_binding);
|
||||
thread_add_timer(master, nhrp_cache_do_auth_timeout, c,
|
||||
10, &c->t_auth);
|
||||
event_add_timer(master, nhrp_cache_do_auth_timeout, c,
|
||||
10, &c->t_auth);
|
||||
}
|
||||
break;
|
||||
case NOTIFY_PEER_DOWN:
|
||||
@ -506,8 +506,8 @@ int nhrp_cache_update_binding(struct nhrp_cache *c, enum nhrp_cache_type type,
|
||||
nhrp_cache_newpeer_notifier);
|
||||
nhrp_cache_newpeer_notifier(&c->newpeer_notifier,
|
||||
NOTIFY_PEER_UP);
|
||||
thread_add_timer(master, nhrp_cache_do_auth_timeout, c,
|
||||
60, &c->t_auth);
|
||||
event_add_timer(master, nhrp_cache_do_auth_timeout, c,
|
||||
60, &c->t_auth);
|
||||
}
|
||||
}
|
||||
nhrp_cache_update_timers(c);
|
||||
|
@ -40,8 +40,8 @@ static void evmgr_connection_error(struct event_manager *evmgr)
|
||||
close(evmgr->fd);
|
||||
evmgr->fd = -1;
|
||||
if (nhrp_event_socket_path)
|
||||
thread_add_timer_msec(master, evmgr_reconnect, evmgr, 10,
|
||||
&evmgr->t_reconnect);
|
||||
event_add_timer_msec(master, evmgr_reconnect, evmgr, 10,
|
||||
&evmgr->t_reconnect);
|
||||
}
|
||||
|
||||
static void evmgr_recv_message(struct event_manager *evmgr, struct zbuf *zb)
|
||||
@ -89,7 +89,7 @@ static void evmgr_read(struct event *t)
|
||||
while (zbuf_may_pull_until(ibuf, "\n\n", &msg))
|
||||
evmgr_recv_message(evmgr, &msg);
|
||||
|
||||
thread_add_read(master, evmgr_read, evmgr, evmgr->fd, &evmgr->t_read);
|
||||
event_add_read(master, evmgr_read, evmgr, evmgr->fd, &evmgr->t_read);
|
||||
}
|
||||
|
||||
static void evmgr_write(struct event *t)
|
||||
@ -99,8 +99,8 @@ static void evmgr_write(struct event *t)
|
||||
|
||||
r = zbufq_write(&evmgr->obuf, evmgr->fd);
|
||||
if (r > 0) {
|
||||
thread_add_write(master, evmgr_write, evmgr, evmgr->fd,
|
||||
&evmgr->t_write);
|
||||
event_add_write(master, evmgr_write, evmgr, evmgr->fd,
|
||||
&evmgr->t_write);
|
||||
} else if (r < 0) {
|
||||
evmgr_connection_error(evmgr);
|
||||
}
|
||||
@ -175,8 +175,8 @@ static void evmgr_submit(struct event_manager *evmgr, struct zbuf *obuf)
|
||||
zbuf_put(obuf, "\n", 1);
|
||||
zbufq_queue(&evmgr->obuf, obuf);
|
||||
if (evmgr->fd >= 0)
|
||||
thread_add_write(master, evmgr_write, evmgr, evmgr->fd,
|
||||
&evmgr->t_write);
|
||||
event_add_write(master, evmgr_write, evmgr, evmgr->fd,
|
||||
&evmgr->t_write);
|
||||
}
|
||||
|
||||
static void evmgr_reconnect(struct event *t)
|
||||
@ -192,14 +192,14 @@ static void evmgr_reconnect(struct event *t)
|
||||
zlog_warn("%s: failure connecting nhrp-event socket: %s",
|
||||
__func__, strerror(errno));
|
||||
zbufq_reset(&evmgr->obuf);
|
||||
thread_add_timer(master, evmgr_reconnect, evmgr, 10,
|
||||
&evmgr->t_reconnect);
|
||||
event_add_timer(master, evmgr_reconnect, evmgr, 10,
|
||||
&evmgr->t_reconnect);
|
||||
return;
|
||||
}
|
||||
|
||||
zlog_info("Connected to Event Manager");
|
||||
evmgr->fd = fd;
|
||||
thread_add_read(master, evmgr_read, evmgr, evmgr->fd, &evmgr->t_read);
|
||||
event_add_read(master, evmgr_read, evmgr, evmgr->fd, &evmgr->t_read);
|
||||
}
|
||||
|
||||
static struct event_manager evmgr_connection;
|
||||
@ -211,8 +211,8 @@ void evmgr_init(void)
|
||||
evmgr->fd = -1;
|
||||
zbuf_init(&evmgr->ibuf, evmgr->ibuf_data, sizeof(evmgr->ibuf_data), 0);
|
||||
zbufq_init(&evmgr->obuf);
|
||||
thread_add_timer_msec(master, evmgr_reconnect, evmgr, 10,
|
||||
&evmgr->t_reconnect);
|
||||
event_add_timer_msec(master, evmgr_reconnect, evmgr, 10,
|
||||
&evmgr->t_reconnect);
|
||||
}
|
||||
|
||||
void evmgr_set_socket(const char *socket)
|
||||
|
@ -160,8 +160,8 @@ static void netlink_mcast_log_recv(struct event *t)
|
||||
}
|
||||
}
|
||||
|
||||
thread_add_read(master, netlink_mcast_log_recv, 0, netlink_mcast_log_fd,
|
||||
&netlink_mcast_log_thread);
|
||||
event_add_read(master, netlink_mcast_log_recv, 0, netlink_mcast_log_fd,
|
||||
&netlink_mcast_log_thread);
|
||||
}
|
||||
|
||||
static void netlink_mcast_log_register(int fd, int group)
|
||||
@ -202,9 +202,8 @@ void netlink_mcast_set_nflog_group(int nlgroup)
|
||||
return;
|
||||
|
||||
netlink_mcast_log_register(netlink_mcast_log_fd, nlgroup);
|
||||
thread_add_read(master, netlink_mcast_log_recv, 0,
|
||||
netlink_mcast_log_fd,
|
||||
&netlink_mcast_log_thread);
|
||||
event_add_read(master, netlink_mcast_log_recv, 0,
|
||||
netlink_mcast_log_fd, &netlink_mcast_log_thread);
|
||||
debugf(NHRP_DEBUG_COMMON, "Register nflog group: %d",
|
||||
netlink_mcast_nflog_group);
|
||||
}
|
||||
|
@ -92,8 +92,8 @@ static void nhrp_reg_reply(struct nhrp_reqid *reqid, void *arg)
|
||||
|
||||
/* RFC 2332 5.2.3 - Registration is recommend to be renewed
|
||||
* every one third of holdtime */
|
||||
thread_add_timer(master, nhrp_reg_send_req, r, holdtime / 3,
|
||||
&r->t_register);
|
||||
event_add_timer(master, nhrp_reg_send_req, r, holdtime / 3,
|
||||
&r->t_register);
|
||||
|
||||
r->proto_addr = p->dst_proto;
|
||||
c = nhrp_cache_get(ifp, &p->dst_proto, 1);
|
||||
@ -133,7 +133,7 @@ static void nhrp_reg_timeout(struct event *t)
|
||||
}
|
||||
r->timeout = 2;
|
||||
}
|
||||
thread_add_timer_msec(master, nhrp_reg_send_req, r, 10, &r->t_register);
|
||||
event_add_timer_msec(master, nhrp_reg_send_req, r, 10, &r->t_register);
|
||||
}
|
||||
|
||||
static void nhrp_reg_peer_notify(struct notifier_block *n, unsigned long cmd)
|
||||
@ -149,8 +149,8 @@ static void nhrp_reg_peer_notify(struct notifier_block *n, unsigned long cmd)
|
||||
debugf(NHRP_DEBUG_COMMON, "NHS: Flush timer for %pSU",
|
||||
&r->peer->vc->remote.nbma);
|
||||
THREAD_OFF(r->t_register);
|
||||
thread_add_timer_msec(master, nhrp_reg_send_req, r, 10,
|
||||
&r->t_register);
|
||||
event_add_timer_msec(master, nhrp_reg_send_req, r, 10,
|
||||
&r->t_register);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -171,13 +171,13 @@ static void nhrp_reg_send_req(struct event *t)
|
||||
if (!nhrp_peer_check(r->peer, 2)) {
|
||||
debugf(NHRP_DEBUG_COMMON, "NHS: Waiting link for %pSU",
|
||||
&r->peer->vc->remote.nbma);
|
||||
thread_add_timer(master, nhrp_reg_send_req, r, 120,
|
||||
&r->t_register);
|
||||
event_add_timer(master, nhrp_reg_send_req, r, 120,
|
||||
&r->t_register);
|
||||
return;
|
||||
}
|
||||
|
||||
thread_add_timer(master, nhrp_reg_timeout, r, r->timeout,
|
||||
&r->t_register);
|
||||
event_add_timer(master, nhrp_reg_timeout, r, r->timeout,
|
||||
&r->t_register);
|
||||
|
||||
/* RFC2332 5.2.3 NHC uses it's own address as dst if NHS is unknown */
|
||||
dst_proto = &nhs->proto_addr;
|
||||
@ -269,13 +269,13 @@ static void nhrp_nhs_resolve_cb(struct resolver_query *q, const char *errstr,
|
||||
|
||||
if (n < 0) {
|
||||
/* Failed, retry in a moment */
|
||||
thread_add_timer(master, nhrp_nhs_resolve, nhs, 5,
|
||||
&nhs->t_resolve);
|
||||
event_add_timer(master, nhrp_nhs_resolve, nhs, 5,
|
||||
&nhs->t_resolve);
|
||||
return;
|
||||
}
|
||||
|
||||
thread_add_timer(master, nhrp_nhs_resolve, nhs, 2 * 60 * 60,
|
||||
&nhs->t_resolve);
|
||||
event_add_timer(master, nhrp_nhs_resolve, nhs, 2 * 60 * 60,
|
||||
&nhs->t_resolve);
|
||||
|
||||
frr_each (nhrp_reglist, &nhs->reglist_head, reg)
|
||||
reg->mark = 1;
|
||||
@ -300,8 +300,8 @@ static void nhrp_nhs_resolve_cb(struct resolver_query *q, const char *errstr,
|
||||
nhrp_reglist_add_tail(&nhs->reglist_head, reg);
|
||||
nhrp_peer_notify_add(reg->peer, ®->peer_notifier,
|
||||
nhrp_reg_peer_notify);
|
||||
thread_add_timer_msec(master, nhrp_reg_send_req, reg, 50,
|
||||
®->t_register);
|
||||
event_add_timer_msec(master, nhrp_reg_send_req, reg, 50,
|
||||
®->t_register);
|
||||
}
|
||||
|
||||
frr_each_safe (nhrp_reglist, &nhs->reglist_head, reg)
|
||||
@ -347,8 +347,8 @@ int nhrp_nhs_add(struct interface *ifp, afi_t afi, union sockunion *proto_addr,
|
||||
.reglist_head = INIT_DLIST(nhs->reglist_head),
|
||||
};
|
||||
nhrp_nhslist_add_tail(&nifp->afi[afi].nhslist_head, nhs);
|
||||
thread_add_timer_msec(master, nhrp_nhs_resolve, nhs, 1000,
|
||||
&nhs->t_resolve);
|
||||
event_add_timer_msec(master, nhrp_nhs_resolve, nhs, 1000,
|
||||
&nhs->t_resolve);
|
||||
|
||||
return NHRP_OK;
|
||||
}
|
||||
|
@ -296,7 +296,7 @@ static void nhrp_packet_recvraw(struct event *t)
|
||||
uint8_t addr[64];
|
||||
size_t len, addrlen;
|
||||
|
||||
thread_add_read(master, nhrp_packet_recvraw, 0, fd, NULL);
|
||||
event_add_read(master, nhrp_packet_recvraw, 0, fd, NULL);
|
||||
|
||||
zb = zbuf_alloc(1500);
|
||||
if (!zb)
|
||||
@ -336,6 +336,6 @@ err:
|
||||
|
||||
int nhrp_packet_init(void)
|
||||
{
|
||||
thread_add_read(master, nhrp_packet_recvraw, 0, os_socket(), NULL);
|
||||
event_add_read(master, nhrp_packet_recvraw, 0, os_socket(), NULL);
|
||||
return 0;
|
||||
}
|
||||
|
@ -82,8 +82,8 @@ static void __nhrp_peer_check(struct nhrp_peer *p)
|
||||
* the up notification a bit to allow things
|
||||
* settle down. This allows IKE to install
|
||||
* SPDs and SAs. */
|
||||
thread_add_timer_msec(master, nhrp_peer_notify_up, p,
|
||||
50, &p->t_fallback);
|
||||
event_add_timer_msec(master, nhrp_peer_notify_up, p, 50,
|
||||
&p->t_fallback);
|
||||
} else {
|
||||
nhrp_peer_ref(p);
|
||||
p->online = online;
|
||||
@ -264,8 +264,8 @@ static void nhrp_peer_request_timeout(struct event *t)
|
||||
p->fallback_requested = 1;
|
||||
vici_request_vc(nifp->ipsec_fallback_profile, &vc->local.nbma,
|
||||
&vc->remote.nbma, p->prio);
|
||||
thread_add_timer(master, nhrp_peer_request_timeout, p, 30,
|
||||
&p->t_fallback);
|
||||
event_add_timer(master, nhrp_peer_request_timeout, p, 30,
|
||||
&p->t_fallback);
|
||||
} else {
|
||||
p->requested = p->fallback_requested = 0;
|
||||
}
|
||||
@ -287,10 +287,10 @@ static void nhrp_peer_defer_vici_request(struct event *t)
|
||||
} else {
|
||||
vici_request_vc(nifp->ipsec_profile, &vc->local.nbma,
|
||||
&vc->remote.nbma, p->prio);
|
||||
thread_add_timer(
|
||||
master, nhrp_peer_request_timeout, p,
|
||||
(nifp->ipsec_fallback_profile && !p->prio) ? 15 : 30,
|
||||
&p->t_fallback);
|
||||
event_add_timer(master, nhrp_peer_request_timeout, p,
|
||||
(nifp->ipsec_fallback_profile && !p->prio) ? 15
|
||||
: 30,
|
||||
&p->t_fallback);
|
||||
}
|
||||
}
|
||||
|
||||
@ -320,10 +320,10 @@ int nhrp_peer_check(struct nhrp_peer *p, int establish)
|
||||
if (p->prio) {
|
||||
vici_request_vc(nifp->ipsec_profile, &vc->local.nbma,
|
||||
&vc->remote.nbma, p->prio);
|
||||
thread_add_timer(
|
||||
master, nhrp_peer_request_timeout, p,
|
||||
(nifp->ipsec_fallback_profile && !p->prio) ? 15 : 30,
|
||||
&p->t_fallback);
|
||||
event_add_timer(master, nhrp_peer_request_timeout, p,
|
||||
(nifp->ipsec_fallback_profile && !p->prio) ? 15
|
||||
: 30,
|
||||
&p->t_fallback);
|
||||
} else {
|
||||
/* Maximum timeout is 1 second */
|
||||
int r_time_ms = frr_weak_random() % 1000;
|
||||
@ -331,8 +331,8 @@ int nhrp_peer_check(struct nhrp_peer *p, int establish)
|
||||
debugf(NHRP_DEBUG_COMMON,
|
||||
"Initiating IPsec connection request to %pSU after %d ms:",
|
||||
&vc->remote.nbma, r_time_ms);
|
||||
thread_add_timer_msec(master, nhrp_peer_defer_vici_request,
|
||||
p, r_time_ms, &p->t_timer);
|
||||
event_add_timer_msec(master, nhrp_peer_defer_vici_request, p,
|
||||
r_time_ms, &p->t_timer);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user