mirror of
https://git.proxmox.com/git/mirror_frr
synced 2025-08-13 14:42:06 +00:00
bgpd: Start abstraction of struct peer_connection
BGP tracks connections based upon the peer. But the problem with this is that the doppelganger structure for it is being created. This has introduced a bunch of fragileness in that the peer exists independently of the connections to it. The whole point of the doppelganger structure was to allow BGP to both accept and initiate tcp connections and then when we get one to a `good` state we collapse into the appropriate one. The problem with this is that having 2 peer structures for this creates a situation where we have to make sure we are configing the `right` one and also make sure that we collapse the two independent peer structures into 1 acting peer. This makes no sense let's abstract out the peer into having 2 connection one for incoming connections and one for outgoing connections then we can easily collapse down without having to do crazy stuff. In addition people adding new features don't need to have to go touch a million places in the code. This is the start of this abstraction. In this commit we'll just pull out the fd and input/output buffers into a connection data structure. Future commits will abstract further. Signed-off-by: Donald Sharp <sharpd@nvidia.com>
This commit is contained in:
parent
bd6a00e8f7
commit
1f32eb30d9
179
bgpd/bgp_fsm.c
179
bgpd/bgp_fsm.c
@ -146,8 +146,8 @@ static struct peer *peer_xfer_conn(struct peer *from_peer)
|
||||
|
||||
if (bgp_debug_neighbor_events(peer))
|
||||
zlog_debug("%s: peer transfer %p fd %d -> %p fd %d)",
|
||||
from_peer->host, from_peer, from_peer->fd, peer,
|
||||
peer->fd);
|
||||
from_peer->host, from_peer, from_peer->connection.fd,
|
||||
peer, peer->connection.fd);
|
||||
|
||||
bgp_writes_off(peer);
|
||||
bgp_reads_off(peer);
|
||||
@ -179,13 +179,14 @@ static struct peer *peer_xfer_conn(struct peer *from_peer)
|
||||
* on various buffers. Those need to be transferred or dropped,
|
||||
* otherwise we'll get spurious failures during session establishment.
|
||||
*/
|
||||
frr_with_mutex (&peer->io_mtx, &from_peer->io_mtx) {
|
||||
fd = peer->fd;
|
||||
peer->fd = from_peer->fd;
|
||||
from_peer->fd = fd;
|
||||
frr_with_mutex (&peer->connection.io_mtx,
|
||||
&from_peer->connection.io_mtx) {
|
||||
fd = peer->connection.fd;
|
||||
peer->connection.fd = from_peer->connection.fd;
|
||||
from_peer->connection.fd = fd;
|
||||
|
||||
stream_fifo_clean(peer->ibuf);
|
||||
stream_fifo_clean(peer->obuf);
|
||||
stream_fifo_clean(peer->connection.ibuf);
|
||||
stream_fifo_clean(peer->connection.obuf);
|
||||
|
||||
/*
|
||||
* this should never happen, since bgp_process_packet() is the
|
||||
@ -207,18 +208,21 @@ static struct peer *peer_xfer_conn(struct peer *from_peer)
|
||||
}
|
||||
|
||||
// copy each packet from old peer's output queue to new peer
|
||||
while (from_peer->obuf->head)
|
||||
stream_fifo_push(peer->obuf,
|
||||
stream_fifo_pop(from_peer->obuf));
|
||||
while (from_peer->connection.obuf->head)
|
||||
stream_fifo_push(peer->connection.obuf,
|
||||
stream_fifo_pop(
|
||||
from_peer->connection.obuf));
|
||||
|
||||
// copy each packet from old peer's input queue to new peer
|
||||
while (from_peer->ibuf->head)
|
||||
stream_fifo_push(peer->ibuf,
|
||||
stream_fifo_pop(from_peer->ibuf));
|
||||
while (from_peer->connection.ibuf->head)
|
||||
stream_fifo_push(peer->connection.ibuf,
|
||||
stream_fifo_pop(
|
||||
from_peer->connection.ibuf));
|
||||
|
||||
ringbuf_wipe(peer->ibuf_work);
|
||||
ringbuf_copy(peer->ibuf_work, from_peer->ibuf_work,
|
||||
ringbuf_remain(from_peer->ibuf_work));
|
||||
ringbuf_wipe(peer->connection.ibuf_work);
|
||||
ringbuf_copy(peer->connection.ibuf_work,
|
||||
from_peer->connection.ibuf_work,
|
||||
ringbuf_remain(from_peer->connection.ibuf_work));
|
||||
}
|
||||
|
||||
peer->as = from_peer->as;
|
||||
@ -295,28 +299,28 @@ static struct peer *peer_xfer_conn(struct peer *from_peer)
|
||||
}
|
||||
|
||||
if (bgp_getsockname(peer) < 0) {
|
||||
flog_err(
|
||||
EC_LIB_SOCKET,
|
||||
"%%bgp_getsockname() failed for %s peer %s fd %d (from_peer fd %d)",
|
||||
(CHECK_FLAG(peer->sflags, PEER_STATUS_ACCEPT_PEER)
|
||||
? "accept"
|
||||
: ""),
|
||||
peer->host, peer->fd, from_peer->fd);
|
||||
flog_err(EC_LIB_SOCKET,
|
||||
"%%bgp_getsockname() failed for %s peer %s fd %d (from_peer fd %d)",
|
||||
(CHECK_FLAG(peer->sflags, PEER_STATUS_ACCEPT_PEER)
|
||||
? "accept"
|
||||
: ""),
|
||||
peer->host, peer->connection.fd,
|
||||
from_peer->connection.fd);
|
||||
BGP_EVENT_ADD(peer, BGP_Stop);
|
||||
BGP_EVENT_ADD(from_peer, BGP_Stop);
|
||||
return NULL;
|
||||
}
|
||||
if (from_peer->status > Active) {
|
||||
if (bgp_getsockname(from_peer) < 0) {
|
||||
flog_err(
|
||||
EC_LIB_SOCKET,
|
||||
"%%bgp_getsockname() failed for %s from_peer %s fd %d (peer fd %d)",
|
||||
flog_err(EC_LIB_SOCKET,
|
||||
"%%bgp_getsockname() failed for %s from_peer %s fd %d (peer fd %d)",
|
||||
|
||||
(CHECK_FLAG(from_peer->sflags,
|
||||
PEER_STATUS_ACCEPT_PEER)
|
||||
? "accept"
|
||||
: ""),
|
||||
from_peer->host, from_peer->fd, peer->fd);
|
||||
(CHECK_FLAG(from_peer->sflags,
|
||||
PEER_STATUS_ACCEPT_PEER)
|
||||
? "accept"
|
||||
: ""),
|
||||
from_peer->host, from_peer->connection.fd,
|
||||
peer->connection.fd);
|
||||
bgp_stop(from_peer);
|
||||
from_peer = NULL;
|
||||
}
|
||||
@ -554,7 +558,7 @@ static void bgp_holdtime_timer(struct event *thread)
|
||||
* for systems where we are heavily loaded for one
|
||||
* reason or another.
|
||||
*/
|
||||
inq_count = atomic_load_explicit(&peer->ibuf->count,
|
||||
inq_count = atomic_load_explicit(&peer->connection.ibuf->count,
|
||||
memory_order_relaxed);
|
||||
if (inq_count)
|
||||
BGP_TIMER_ON(peer->t_holdtime, bgp_holdtime_timer,
|
||||
@ -1332,7 +1336,8 @@ void bgp_fsm_change_status(struct peer *peer, enum bgp_fsm_status status)
|
||||
bgp_update_delay_process_status_change(peer);
|
||||
|
||||
if (bgp_debug_neighbor_events(peer))
|
||||
zlog_debug("%s fd %d went from %s to %s", peer->host, peer->fd,
|
||||
zlog_debug("%s fd %d went from %s to %s", peer->host,
|
||||
peer->connection.fd,
|
||||
lookup_msg(bgp_status_msg, peer->ostatus, NULL),
|
||||
lookup_msg(bgp_status_msg, peer->status, NULL));
|
||||
}
|
||||
@ -1509,14 +1514,14 @@ enum bgp_fsm_state_progress bgp_stop(struct peer *peer)
|
||||
EVENT_OFF(peer->t_delayopen);
|
||||
|
||||
/* Clear input and output buffer. */
|
||||
frr_with_mutex (&peer->io_mtx) {
|
||||
if (peer->ibuf)
|
||||
stream_fifo_clean(peer->ibuf);
|
||||
if (peer->obuf)
|
||||
stream_fifo_clean(peer->obuf);
|
||||
frr_with_mutex (&peer->connection.io_mtx) {
|
||||
if (peer->connection.ibuf)
|
||||
stream_fifo_clean(peer->connection.ibuf);
|
||||
if (peer->connection.obuf)
|
||||
stream_fifo_clean(peer->connection.obuf);
|
||||
|
||||
if (peer->ibuf_work)
|
||||
ringbuf_wipe(peer->ibuf_work);
|
||||
if (peer->connection.ibuf_work)
|
||||
ringbuf_wipe(peer->connection.ibuf_work);
|
||||
|
||||
if (peer->curr) {
|
||||
stream_free(peer->curr);
|
||||
@ -1525,9 +1530,9 @@ enum bgp_fsm_state_progress bgp_stop(struct peer *peer)
|
||||
}
|
||||
|
||||
/* Close of file descriptor. */
|
||||
if (peer->fd >= 0) {
|
||||
close(peer->fd);
|
||||
peer->fd = -1;
|
||||
if (peer->connection.fd >= 0) {
|
||||
close(peer->connection.fd);
|
||||
peer->connection.fd = -1;
|
||||
}
|
||||
|
||||
/* Reset capabilities. */
|
||||
@ -1662,8 +1667,8 @@ static void bgp_connect_check(struct event *thread)
|
||||
|
||||
/* Check file descriptor. */
|
||||
slen = sizeof(status);
|
||||
ret = getsockopt(peer->fd, SOL_SOCKET, SO_ERROR, (void *)&status,
|
||||
&slen);
|
||||
ret = getsockopt(peer->connection.fd, SOL_SOCKET, SO_ERROR,
|
||||
(void *)&status, &slen);
|
||||
|
||||
/* If getsockopt is fail, this is fatal error. */
|
||||
if (ret < 0) {
|
||||
@ -1693,16 +1698,16 @@ static void bgp_connect_check(struct event *thread)
|
||||
add read thread for reading open message. */
|
||||
static enum bgp_fsm_state_progress bgp_connect_success(struct peer *peer)
|
||||
{
|
||||
if (peer->fd < 0) {
|
||||
if (peer->connection.fd < 0) {
|
||||
flog_err(EC_BGP_CONNECT, "%s peer's fd is negative value %d",
|
||||
__func__, peer->fd);
|
||||
__func__, peer->connection.fd);
|
||||
return bgp_stop(peer);
|
||||
}
|
||||
|
||||
if (bgp_getsockname(peer) < 0) {
|
||||
flog_err_sys(EC_LIB_SOCKET,
|
||||
"%s: bgp_getsockname(): failed for peer %s, fd %d",
|
||||
__func__, peer->host, peer->fd);
|
||||
__func__, peer->host, peer->connection.fd);
|
||||
bgp_notify_send(peer, BGP_NOTIFY_FSM_ERR,
|
||||
bgp_fsm_error_subcode(peer->status));
|
||||
bgp_writes_on(peer);
|
||||
@ -1737,16 +1742,16 @@ static enum bgp_fsm_state_progress bgp_connect_success(struct peer *peer)
|
||||
static enum bgp_fsm_state_progress
|
||||
bgp_connect_success_w_delayopen(struct peer *peer)
|
||||
{
|
||||
if (peer->fd < 0) {
|
||||
if (peer->connection.fd < 0) {
|
||||
flog_err(EC_BGP_CONNECT, "%s: peer's fd is negative value %d",
|
||||
__func__, peer->fd);
|
||||
__func__, peer->connection.fd);
|
||||
return bgp_stop(peer);
|
||||
}
|
||||
|
||||
if (bgp_getsockname(peer) < 0) {
|
||||
flog_err_sys(EC_LIB_SOCKET,
|
||||
"%s: bgp_getsockname(): failed for peer %s, fd %d",
|
||||
__func__, peer->host, peer->fd);
|
||||
__func__, peer->host, peer->connection.fd);
|
||||
bgp_notify_send(peer, BGP_NOTIFY_FSM_ERR,
|
||||
bgp_fsm_error_subcode(peer->status));
|
||||
bgp_writes_on(peer);
|
||||
@ -1903,9 +1908,8 @@ enum bgp_fsm_state_progress bgp_start(struct peer *peer)
|
||||
break;
|
||||
case connect_success:
|
||||
if (bgp_debug_neighbor_events(peer))
|
||||
zlog_debug(
|
||||
"%s [FSM] Connect immediately success, fd %d",
|
||||
peer->host, peer->fd);
|
||||
zlog_debug("%s [FSM] Connect immediately success, fd %d",
|
||||
peer->host, peer->connection.fd);
|
||||
|
||||
BGP_EVENT_ADD(peer, TCP_connection_open);
|
||||
break;
|
||||
@ -1913,13 +1917,11 @@ enum bgp_fsm_state_progress bgp_start(struct peer *peer)
|
||||
/* To check nonblocking connect, we wait until socket is
|
||||
readable or writable. */
|
||||
if (bgp_debug_neighbor_events(peer))
|
||||
zlog_debug(
|
||||
"%s [FSM] Non blocking connect waiting result, fd %d",
|
||||
peer->host, peer->fd);
|
||||
if (peer->fd < 0) {
|
||||
flog_err(EC_BGP_FSM,
|
||||
"%s peer's fd is negative value %d", __func__,
|
||||
peer->fd);
|
||||
zlog_debug("%s [FSM] Non blocking connect waiting result, fd %d",
|
||||
peer->host, peer->connection.fd);
|
||||
if (peer->connection.fd < 0) {
|
||||
flog_err(EC_BGP_FSM, "%s peer's fd is negative value %d",
|
||||
__func__, peer->connection.fd);
|
||||
return BGP_FSM_FAILURE;
|
||||
}
|
||||
/*
|
||||
@ -1931,10 +1933,10 @@ enum bgp_fsm_state_progress bgp_start(struct peer *peer)
|
||||
* bgp_connect_check() as the handler for each and cancel the
|
||||
* unused event in that function.
|
||||
*/
|
||||
event_add_read(bm->master, bgp_connect_check, peer, peer->fd,
|
||||
&peer->t_connect_check_r);
|
||||
event_add_write(bm->master, bgp_connect_check, peer, peer->fd,
|
||||
&peer->t_connect_check_w);
|
||||
event_add_read(bm->master, bgp_connect_check, peer,
|
||||
peer->connection.fd, &peer->t_connect_check_r);
|
||||
event_add_write(bm->master, bgp_connect_check, peer,
|
||||
peer->connection.fd, &peer->t_connect_check_w);
|
||||
break;
|
||||
}
|
||||
return BGP_FSM_SUCCESS;
|
||||
@ -2327,26 +2329,24 @@ static enum bgp_fsm_state_progress bgp_fsm_update(struct peer *peer)
|
||||
/* This is empty event. */
|
||||
static enum bgp_fsm_state_progress bgp_ignore(struct peer *peer)
|
||||
{
|
||||
flog_err(
|
||||
EC_BGP_FSM,
|
||||
"%s [FSM] Ignoring event %s in state %s, prior events %s, %s, fd %d",
|
||||
peer->host, bgp_event_str[peer->cur_event],
|
||||
lookup_msg(bgp_status_msg, peer->status, NULL),
|
||||
bgp_event_str[peer->last_event],
|
||||
bgp_event_str[peer->last_major_event], peer->fd);
|
||||
flog_err(EC_BGP_FSM,
|
||||
"%s [FSM] Ignoring event %s in state %s, prior events %s, %s, fd %d",
|
||||
peer->host, bgp_event_str[peer->cur_event],
|
||||
lookup_msg(bgp_status_msg, peer->status, NULL),
|
||||
bgp_event_str[peer->last_event],
|
||||
bgp_event_str[peer->last_major_event], peer->connection.fd);
|
||||
return BGP_FSM_SUCCESS;
|
||||
}
|
||||
|
||||
/* This is to handle unexpected events.. */
|
||||
static enum bgp_fsm_state_progress bgp_fsm_exception(struct peer *peer)
|
||||
{
|
||||
flog_err(
|
||||
EC_BGP_FSM,
|
||||
"%s [FSM] Unexpected event %s in state %s, prior events %s, %s, fd %d",
|
||||
peer->host, bgp_event_str[peer->cur_event],
|
||||
lookup_msg(bgp_status_msg, peer->status, NULL),
|
||||
bgp_event_str[peer->last_event],
|
||||
bgp_event_str[peer->last_major_event], peer->fd);
|
||||
flog_err(EC_BGP_FSM,
|
||||
"%s [FSM] Unexpected event %s in state %s, prior events %s, %s, fd %d",
|
||||
peer->host, bgp_event_str[peer->cur_event],
|
||||
lookup_msg(bgp_status_msg, peer->status, NULL),
|
||||
bgp_event_str[peer->last_event],
|
||||
bgp_event_str[peer->last_major_event], peer->connection.fd);
|
||||
return bgp_stop(peer);
|
||||
}
|
||||
|
||||
@ -2590,7 +2590,8 @@ int bgp_event_update(struct peer *peer, enum bgp_fsm_events event)
|
||||
zlog_debug("%s [FSM] %s (%s->%s), fd %d", peer->host,
|
||||
bgp_event_str[event],
|
||||
lookup_msg(bgp_status_msg, peer->status, NULL),
|
||||
lookup_msg(bgp_status_msg, next, NULL), peer->fd);
|
||||
lookup_msg(bgp_status_msg, next, NULL),
|
||||
peer->connection.fd);
|
||||
|
||||
peer->last_event = peer->cur_event;
|
||||
peer->cur_event = event;
|
||||
@ -2637,14 +2638,14 @@ int bgp_event_update(struct peer *peer, enum bgp_fsm_events event)
|
||||
*/
|
||||
if (!dyn_nbr && !passive_conn && peer->bgp &&
|
||||
ret != BGP_FSM_FAILURE_AND_DELETE) {
|
||||
flog_err(
|
||||
EC_BGP_FSM,
|
||||
"%s [FSM] Failure handling event %s in state %s, prior events %s, %s, fd %d, last reset: %s",
|
||||
peer->host, bgp_event_str[peer->cur_event],
|
||||
lookup_msg(bgp_status_msg, peer->status, NULL),
|
||||
bgp_event_str[peer->last_event],
|
||||
bgp_event_str[peer->last_major_event], peer->fd,
|
||||
peer_down_str[peer->last_reset]);
|
||||
flog_err(EC_BGP_FSM,
|
||||
"%s [FSM] Failure handling event %s in state %s, prior events %s, %s, fd %d, last reset: %s",
|
||||
peer->host, bgp_event_str[peer->cur_event],
|
||||
lookup_msg(bgp_status_msg, peer->status, NULL),
|
||||
bgp_event_str[peer->last_event],
|
||||
bgp_event_str[peer->last_major_event],
|
||||
peer->connection.fd,
|
||||
peer_down_str[peer->last_reset]);
|
||||
bgp_stop(peer);
|
||||
bgp_fsm_change_status(peer, Idle);
|
||||
bgp_timer_set(peer);
|
||||
|
@ -48,15 +48,15 @@ void bgp_writes_on(struct peer *peer)
|
||||
assert(fpt->running);
|
||||
|
||||
assert(peer->status != Deleted);
|
||||
assert(peer->obuf);
|
||||
assert(peer->ibuf);
|
||||
assert(peer->ibuf_work);
|
||||
assert(peer->connection.obuf);
|
||||
assert(peer->connection.ibuf);
|
||||
assert(peer->connection.ibuf_work);
|
||||
assert(!peer->t_connect_check_r);
|
||||
assert(!peer->t_connect_check_w);
|
||||
assert(peer->fd);
|
||||
assert(peer->connection.fd);
|
||||
|
||||
event_add_write(fpt->master, bgp_process_writes, peer, peer->fd,
|
||||
&peer->t_write);
|
||||
event_add_write(fpt->master, bgp_process_writes, peer,
|
||||
peer->connection.fd, &peer->t_write);
|
||||
SET_FLAG(peer->thread_flags, PEER_THREAD_WRITES_ON);
|
||||
}
|
||||
|
||||
@ -77,16 +77,16 @@ void bgp_reads_on(struct peer *peer)
|
||||
assert(fpt->running);
|
||||
|
||||
assert(peer->status != Deleted);
|
||||
assert(peer->ibuf);
|
||||
assert(peer->fd);
|
||||
assert(peer->ibuf_work);
|
||||
assert(peer->obuf);
|
||||
assert(peer->connection.ibuf);
|
||||
assert(peer->connection.fd);
|
||||
assert(peer->connection.ibuf_work);
|
||||
assert(peer->connection.obuf);
|
||||
assert(!peer->t_connect_check_r);
|
||||
assert(!peer->t_connect_check_w);
|
||||
assert(peer->fd);
|
||||
assert(peer->connection.fd);
|
||||
|
||||
event_add_read(fpt->master, bgp_process_reads, peer, peer->fd,
|
||||
&peer->t_read);
|
||||
event_add_read(fpt->master, bgp_process_reads, peer,
|
||||
peer->connection.fd, &peer->t_read);
|
||||
|
||||
SET_FLAG(peer->thread_flags, PEER_THREAD_READS_ON);
|
||||
}
|
||||
@ -116,14 +116,14 @@ static void bgp_process_writes(struct event *thread)
|
||||
bool reschedule;
|
||||
bool fatal = false;
|
||||
|
||||
if (peer->fd < 0)
|
||||
if (peer->connection.fd < 0)
|
||||
return;
|
||||
|
||||
struct frr_pthread *fpt = bgp_pth_io;
|
||||
|
||||
frr_with_mutex (&peer->io_mtx) {
|
||||
frr_with_mutex (&peer->connection.io_mtx) {
|
||||
status = bgp_write(peer);
|
||||
reschedule = (stream_fifo_head(peer->obuf) != NULL);
|
||||
reschedule = (stream_fifo_head(peer->connection.obuf) != NULL);
|
||||
}
|
||||
|
||||
/* no problem */
|
||||
@ -142,8 +142,8 @@ static void bgp_process_writes(struct event *thread)
|
||||
* sent in the update message
|
||||
*/
|
||||
if (reschedule) {
|
||||
event_add_write(fpt->master, bgp_process_writes, peer, peer->fd,
|
||||
&peer->t_write);
|
||||
event_add_write(fpt->master, bgp_process_writes, peer,
|
||||
peer->connection.fd, &peer->t_write);
|
||||
} else if (!fatal) {
|
||||
BGP_UPDATE_GROUP_TIMER_ON(&peer->t_generate_updgrp_packets,
|
||||
bgp_generate_updgrp_packets);
|
||||
@ -154,14 +154,14 @@ static int read_ibuf_work(struct peer *peer)
|
||||
{
|
||||
/* static buffer for transferring packets */
|
||||
/* shorter alias to peer's input buffer */
|
||||
struct ringbuf *ibw = peer->ibuf_work;
|
||||
struct ringbuf *ibw = peer->connection.ibuf_work;
|
||||
/* packet size as given by header */
|
||||
uint16_t pktsize = 0;
|
||||
struct stream *pkt;
|
||||
|
||||
/* ============================================== */
|
||||
frr_with_mutex (&peer->io_mtx) {
|
||||
if (peer->ibuf->count >= bm->inq_limit)
|
||||
frr_with_mutex (&peer->connection.io_mtx) {
|
||||
if (peer->connection.ibuf->count >= bm->inq_limit)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -196,8 +196,8 @@ static int read_ibuf_work(struct peer *peer)
|
||||
stream_set_endp(pkt, pktsize);
|
||||
|
||||
frrtrace(2, frr_bgp, packet_read, peer, pkt);
|
||||
frr_with_mutex (&peer->io_mtx) {
|
||||
stream_fifo_push(peer->ibuf, pkt);
|
||||
frr_with_mutex (&peer->connection.io_mtx) {
|
||||
stream_fifo_push(peer->connection.ibuf, pkt);
|
||||
}
|
||||
|
||||
return pktsize;
|
||||
@ -208,7 +208,8 @@ static int read_ibuf_work(struct peer *peer)
|
||||
* or has hung up.
|
||||
*
|
||||
* We read as much data as possible, process as many packets as we can and
|
||||
* place them on peer->ibuf for secondary processing by the main thread.
|
||||
* place them on peer->connection.ibuf for secondary processing by the main
|
||||
* thread.
|
||||
*/
|
||||
static void bgp_process_reads(struct event *thread)
|
||||
{
|
||||
@ -216,7 +217,7 @@ static void bgp_process_reads(struct event *thread)
|
||||
static struct peer *peer; /* peer to read from */
|
||||
uint16_t status; /* bgp_read status code */
|
||||
bool fatal = false; /* whether fatal error occurred */
|
||||
bool added_pkt = false; /* whether we pushed onto ->ibuf */
|
||||
bool added_pkt = false; /* whether we pushed onto ->connection.ibuf */
|
||||
int code = 0; /* FSM code if error occurred */
|
||||
static bool ibuf_full_logged; /* Have we logged full already */
|
||||
int ret = 1;
|
||||
@ -224,12 +225,12 @@ static void bgp_process_reads(struct event *thread)
|
||||
|
||||
peer = EVENT_ARG(thread);
|
||||
|
||||
if (bm->terminating || peer->fd < 0)
|
||||
if (bm->terminating || peer->connection.fd < 0)
|
||||
return;
|
||||
|
||||
struct frr_pthread *fpt = bgp_pth_io;
|
||||
|
||||
frr_with_mutex (&peer->io_mtx) {
|
||||
frr_with_mutex (&peer->connection.io_mtx) {
|
||||
status = bgp_read(peer, &code);
|
||||
}
|
||||
|
||||
@ -282,12 +283,12 @@ done:
|
||||
/* handle invalid header */
|
||||
if (fatal) {
|
||||
/* wipe buffer just in case someone screwed up */
|
||||
ringbuf_wipe(peer->ibuf_work);
|
||||
ringbuf_wipe(peer->connection.ibuf_work);
|
||||
return;
|
||||
}
|
||||
|
||||
event_add_read(fpt->master, bgp_process_reads, peer, peer->fd,
|
||||
&peer->t_read);
|
||||
event_add_read(fpt->master, bgp_process_reads, peer,
|
||||
peer->connection.fd, &peer->t_read);
|
||||
if (added_pkt)
|
||||
event_add_event(bm->master, bgp_process_packet, peer, 0,
|
||||
&peer->t_process_packet);
|
||||
@ -296,9 +297,10 @@ done:
|
||||
/*
|
||||
* Flush peer output buffer.
|
||||
*
|
||||
* This function pops packets off of peer->obuf and writes them to peer->fd.
|
||||
* The amount of packets written is equal to the minimum of peer->wpkt_quanta
|
||||
* and the number of packets on the output buffer, unless an error occurs.
|
||||
* This function pops packets off of peer->connection.obuf and writes them to
|
||||
* peer->connection.fd. The amount of packets written is equal to the minimum of
|
||||
* peer->wpkt_quanta and the number of packets on the output buffer, unless an
|
||||
* error occurs.
|
||||
*
|
||||
* If write() returns an error, the appropriate FSM event is generated.
|
||||
*
|
||||
@ -328,7 +330,7 @@ static uint16_t bgp_write(struct peer *peer)
|
||||
struct stream **streams = ostreams;
|
||||
struct iovec iov[wpkt_quanta_old];
|
||||
|
||||
s = stream_fifo_head(peer->obuf);
|
||||
s = stream_fifo_head(peer->connection.obuf);
|
||||
|
||||
if (!s)
|
||||
goto done;
|
||||
@ -348,7 +350,7 @@ static uint16_t bgp_write(struct peer *peer)
|
||||
total_written = 0;
|
||||
|
||||
do {
|
||||
num = writev(peer->fd, iov, iovsz);
|
||||
num = writev(peer->connection.fd, iov, iovsz);
|
||||
|
||||
if (num < 0) {
|
||||
if (!ERRNO_IO_RETRY(errno)) {
|
||||
@ -397,7 +399,7 @@ static uint16_t bgp_write(struct peer *peer)
|
||||
|
||||
/* Handle statistics */
|
||||
for (unsigned int i = 0; i < total_written; i++) {
|
||||
s = stream_fifo_pop(peer->obuf);
|
||||
s = stream_fifo_pop(peer->connection.obuf);
|
||||
|
||||
assert(s == ostreams[i]);
|
||||
|
||||
@ -476,7 +478,8 @@ done : {
|
||||
|
||||
uint8_t ibuf_scratch[BGP_EXTENDED_MESSAGE_MAX_PACKET_SIZE * BGP_READ_PACKET_MAX];
|
||||
/*
|
||||
* Reads a chunk of data from peer->fd into peer->ibuf_work.
|
||||
* Reads a chunk of data from peer->connection.fd into
|
||||
* peer->connection.ibuf_work.
|
||||
*
|
||||
* code_p
|
||||
* Pointer to location to store FSM event code in case of fatal error.
|
||||
@ -494,7 +497,7 @@ static uint16_t bgp_read(struct peer *peer, int *code_p)
|
||||
size_t ibuf_work_space; /* space we can read into the work buf */
|
||||
uint16_t status = 0;
|
||||
|
||||
ibuf_work_space = ringbuf_space(peer->ibuf_work);
|
||||
ibuf_work_space = ringbuf_space(peer->connection.ibuf_work);
|
||||
|
||||
if (ibuf_work_space == 0) {
|
||||
SET_FLAG(status, BGP_IO_WORK_FULL_ERR);
|
||||
@ -503,7 +506,7 @@ static uint16_t bgp_read(struct peer *peer, int *code_p)
|
||||
|
||||
readsize = MIN(ibuf_work_space, sizeof(ibuf_scratch));
|
||||
|
||||
nbytes = read(peer->fd, ibuf_scratch, readsize);
|
||||
nbytes = read(peer->connection.fd, ibuf_scratch, readsize);
|
||||
|
||||
/* EAGAIN or EWOULDBLOCK; come back later */
|
||||
if (nbytes < 0 && ERRNO_IO_RETRY(errno)) {
|
||||
@ -524,7 +527,7 @@ static uint16_t bgp_read(struct peer *peer, int *code_p)
|
||||
/* Received EOF / TCP session closed */
|
||||
if (bgp_debug_neighbor_events(peer))
|
||||
zlog_debug("%s [Event] BGP connection closed fd %d",
|
||||
peer->host, peer->fd);
|
||||
peer->host, peer->connection.fd);
|
||||
|
||||
/* Handle the error in the main pthread. */
|
||||
if (code_p)
|
||||
@ -532,8 +535,8 @@ static uint16_t bgp_read(struct peer *peer, int *code_p)
|
||||
|
||||
SET_FLAG(status, BGP_IO_FATAL_ERR);
|
||||
} else {
|
||||
assert(ringbuf_put(peer->ibuf_work, ibuf_scratch, nbytes) ==
|
||||
(size_t)nbytes);
|
||||
assert(ringbuf_put(peer->connection.ibuf_work, ibuf_scratch,
|
||||
nbytes) == (size_t)nbytes);
|
||||
}
|
||||
|
||||
return status;
|
||||
@ -550,7 +553,7 @@ static bool validate_header(struct peer *peer)
|
||||
{
|
||||
uint16_t size;
|
||||
uint8_t type;
|
||||
struct ringbuf *pkt = peer->ibuf_work;
|
||||
struct ringbuf *pkt = peer->connection.ibuf_work;
|
||||
|
||||
static const uint8_t m_correct[BGP_MARKER_SIZE] = {
|
||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||
|
@ -429,7 +429,7 @@ static void bgp_accept(struct event *thread)
|
||||
peer1 = peer_lookup_dynamic_neighbor(bgp, &su);
|
||||
if (peer1) {
|
||||
/* Dynamic neighbor has been created, let it proceed */
|
||||
peer1->fd = bgp_sock;
|
||||
peer1->connection.fd = bgp_sock;
|
||||
|
||||
/* Set the user configured MSS to TCP socket */
|
||||
if (CHECK_FLAG(peer1->flags, PEER_FLAG_TCP_MSS))
|
||||
@ -521,10 +521,9 @@ static void bgp_accept(struct event *thread)
|
||||
}
|
||||
|
||||
if (bgp_debug_neighbor_events(peer1))
|
||||
zlog_debug(
|
||||
"[Event] connection from %s fd %d, active peer status %d fd %d",
|
||||
inet_sutop(&su, buf), bgp_sock, peer1->status,
|
||||
peer1->fd);
|
||||
zlog_debug("[Event] connection from %s fd %d, active peer status %d fd %d",
|
||||
inet_sutop(&su, buf), bgp_sock, peer1->status,
|
||||
peer1->connection.fd);
|
||||
|
||||
if (peer1->doppelganger) {
|
||||
/* We have an existing connection. Kill the existing one and run
|
||||
@ -563,7 +562,7 @@ static void bgp_accept(struct event *thread)
|
||||
|
||||
peer->doppelganger = peer1;
|
||||
peer1->doppelganger = peer;
|
||||
peer->fd = bgp_sock;
|
||||
peer->connection.fd = bgp_sock;
|
||||
frr_with_privs(&bgpd_privs) {
|
||||
vrf_bind(peer->bgp->vrf_id, bgp_sock, bgp_get_bound_name(peer));
|
||||
}
|
||||
@ -684,13 +683,13 @@ static int bgp_update_source(struct peer *peer)
|
||||
if (bgp_update_address(ifp, &peer->su, &addr))
|
||||
return -1;
|
||||
|
||||
ret = sockunion_bind(peer->fd, &addr, 0, &addr);
|
||||
ret = sockunion_bind(peer->connection.fd, &addr, 0, &addr);
|
||||
}
|
||||
|
||||
/* Source is specified with IP address. */
|
||||
if (peer->update_source)
|
||||
ret = sockunion_bind(peer->fd, peer->update_source, 0,
|
||||
peer->update_source);
|
||||
ret = sockunion_bind(peer->connection.fd, peer->update_source,
|
||||
0, peer->update_source);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -708,11 +707,12 @@ int bgp_connect(struct peer *peer)
|
||||
return 0;
|
||||
}
|
||||
frr_with_privs(&bgpd_privs) {
|
||||
/* Make socket for the peer. */
|
||||
peer->fd = vrf_sockunion_socket(&peer->su, peer->bgp->vrf_id,
|
||||
bgp_get_bound_name(peer));
|
||||
/* Make socket for the peer. */
|
||||
peer->connection.fd =
|
||||
vrf_sockunion_socket(&peer->su, peer->bgp->vrf_id,
|
||||
bgp_get_bound_name(peer));
|
||||
}
|
||||
if (peer->fd < 0) {
|
||||
if (peer->connection.fd < 0) {
|
||||
peer->last_reset = PEER_DOWN_SOCKET_ERROR;
|
||||
if (bgp_debug_neighbor_events(peer))
|
||||
zlog_debug("%s: Failure to create socket for connection to %s, error received: %s(%d)",
|
||||
@ -721,18 +721,18 @@ int bgp_connect(struct peer *peer)
|
||||
return -1;
|
||||
}
|
||||
|
||||
set_nonblocking(peer->fd);
|
||||
set_nonblocking(peer->connection.fd);
|
||||
|
||||
/* Set the user configured MSS to TCP socket */
|
||||
if (CHECK_FLAG(peer->flags, PEER_FLAG_TCP_MSS))
|
||||
sockopt_tcp_mss_set(peer->fd, peer->tcp_mss);
|
||||
sockopt_tcp_mss_set(peer->connection.fd, peer->tcp_mss);
|
||||
|
||||
bgp_socket_set_buffer_size(peer->fd);
|
||||
bgp_socket_set_buffer_size(peer->connection.fd);
|
||||
|
||||
/* Set TCP keepalive when TCP keepalive is enabled */
|
||||
bgp_update_setsockopt_tcp_keepalive(peer->bgp, peer->fd);
|
||||
bgp_update_setsockopt_tcp_keepalive(peer->bgp, peer->connection.fd);
|
||||
|
||||
if (bgp_set_socket_ttl(peer, peer->fd) < 0) {
|
||||
if (bgp_set_socket_ttl(peer, peer->connection.fd) < 0) {
|
||||
peer->last_reset = PEER_DOWN_SOCKET_ERROR;
|
||||
if (bgp_debug_neighbor_events(peer))
|
||||
zlog_debug("%s: Failure to set socket ttl for connection to %s, error received: %s(%d)",
|
||||
@ -742,15 +742,16 @@ int bgp_connect(struct peer *peer)
|
||||
return -1;
|
||||
}
|
||||
|
||||
sockopt_reuseaddr(peer->fd);
|
||||
sockopt_reuseport(peer->fd);
|
||||
sockopt_reuseaddr(peer->connection.fd);
|
||||
sockopt_reuseport(peer->connection.fd);
|
||||
|
||||
#ifdef IPTOS_PREC_INTERNETCONTROL
|
||||
frr_with_privs(&bgpd_privs) {
|
||||
if (sockunion_family(&peer->su) == AF_INET)
|
||||
setsockopt_ipv4_tos(peer->fd, bm->tcp_dscp);
|
||||
setsockopt_ipv4_tos(peer->connection.fd, bm->tcp_dscp);
|
||||
else if (sockunion_family(&peer->su) == AF_INET6)
|
||||
setsockopt_ipv6_tclass(peer->fd, bm->tcp_dscp);
|
||||
setsockopt_ipv6_tclass(peer->connection.fd,
|
||||
bm->tcp_dscp);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -762,7 +763,7 @@ int bgp_connect(struct peer *peer)
|
||||
if (!BGP_PEER_SU_UNSPEC(peer))
|
||||
bgp_md5_set(peer);
|
||||
|
||||
bgp_md5_set_connect(peer->fd, &peer->su, prefixlen,
|
||||
bgp_md5_set_connect(peer->connection.fd, &peer->su, prefixlen,
|
||||
peer->password);
|
||||
}
|
||||
|
||||
@ -779,11 +780,11 @@ int bgp_connect(struct peer *peer)
|
||||
|
||||
if (bgp_debug_neighbor_events(peer))
|
||||
zlog_debug("%s [Event] Connect start to %s fd %d", peer->host,
|
||||
peer->host, peer->fd);
|
||||
peer->host, peer->connection.fd);
|
||||
|
||||
/* Connect to the remote peer. */
|
||||
return sockunion_connect(peer->fd, &peer->su, htons(peer->port),
|
||||
ifindex);
|
||||
return sockunion_connect(peer->connection.fd, &peer->su,
|
||||
htons(peer->port), ifindex);
|
||||
}
|
||||
|
||||
/* After TCP connection is established. Get local address and port. */
|
||||
@ -799,10 +800,10 @@ int bgp_getsockname(struct peer *peer)
|
||||
peer->su_remote = NULL;
|
||||
}
|
||||
|
||||
peer->su_local = sockunion_getsockname(peer->fd);
|
||||
peer->su_local = sockunion_getsockname(peer->connection.fd);
|
||||
if (!peer->su_local)
|
||||
return -1;
|
||||
peer->su_remote = sockunion_getpeername(peer->fd);
|
||||
peer->su_remote = sockunion_getpeername(peer->connection.fd);
|
||||
if (!peer->su_remote)
|
||||
return -1;
|
||||
|
||||
|
@ -111,15 +111,15 @@ static void bgp_packet_add(struct peer *peer, struct stream *s)
|
||||
uint32_t holdtime;
|
||||
intmax_t sendholdtime;
|
||||
|
||||
frr_with_mutex (&peer->io_mtx) {
|
||||
frr_with_mutex (&peer->connection.io_mtx) {
|
||||
/* if the queue is empty, reset the "last OK" timestamp to
|
||||
* now, otherwise if we write another packet immediately
|
||||
* after it'll get confused
|
||||
*/
|
||||
if (!stream_fifo_count_safe(peer->obuf))
|
||||
if (!stream_fifo_count_safe(peer->connection.obuf))
|
||||
peer->last_sendq_ok = monotime(NULL);
|
||||
|
||||
stream_fifo_push(peer->obuf, s);
|
||||
stream_fifo_push(peer->connection.obuf, s);
|
||||
|
||||
delta = monotime(NULL) - peer->last_sendq_ok;
|
||||
|
||||
@ -477,7 +477,7 @@ void bgp_generate_updgrp_packets(struct event *thread)
|
||||
* let's stop adding to the outq if we are
|
||||
* already at the limit.
|
||||
*/
|
||||
if (peer->obuf->count >= bm->outq_limit) {
|
||||
if (peer->connection.obuf->count >= bm->outq_limit) {
|
||||
bgp_write_proceed_actions(peer);
|
||||
return;
|
||||
}
|
||||
@ -605,7 +605,7 @@ void bgp_generate_updgrp_packets(struct event *thread)
|
||||
bpacket_queue_advance_peer(paf);
|
||||
}
|
||||
} while (s && (++generated < wpq) &&
|
||||
(peer->obuf->count <= bm->outq_limit));
|
||||
(peer->connection.obuf->count <= bm->outq_limit));
|
||||
|
||||
if (generated)
|
||||
bgp_writes_on(peer);
|
||||
@ -713,11 +713,11 @@ void bgp_open_send(struct peer *peer)
|
||||
* Writes NOTIFICATION message directly to a peer socket without waiting for
|
||||
* the I/O thread.
|
||||
*
|
||||
* There must be exactly one stream on the peer->obuf FIFO, and the data within
|
||||
* this stream must match the format of a BGP NOTIFICATION message.
|
||||
* There must be exactly one stream on the peer->connection.obuf FIFO, and the
|
||||
* data within this stream must match the format of a BGP NOTIFICATION message.
|
||||
* Transmission is best-effort.
|
||||
*
|
||||
* @requires peer->io_mtx
|
||||
* @requires peer->connection.io_mtx
|
||||
* @param peer
|
||||
* @return 0
|
||||
*/
|
||||
@ -728,7 +728,7 @@ static void bgp_write_notify(struct peer *peer)
|
||||
struct stream *s;
|
||||
|
||||
/* There should be at least one packet. */
|
||||
s = stream_fifo_pop(peer->obuf);
|
||||
s = stream_fifo_pop(peer->connection.obuf);
|
||||
|
||||
if (!s)
|
||||
return;
|
||||
@ -739,7 +739,7 @@ static void bgp_write_notify(struct peer *peer)
|
||||
* socket is in nonblocking mode, if we can't deliver the NOTIFY, well,
|
||||
* we only care about getting a clean shutdown at this point.
|
||||
*/
|
||||
ret = write(peer->fd, STREAM_DATA(s), stream_get_endp(s));
|
||||
ret = write(peer->connection.fd, STREAM_DATA(s), stream_get_endp(s));
|
||||
|
||||
/*
|
||||
* only connection reset/close gets counted as TCP_fatal_error, failure
|
||||
@ -753,8 +753,8 @@ static void bgp_write_notify(struct peer *peer)
|
||||
|
||||
/* Disable Nagle, make NOTIFY packet go out right away */
|
||||
val = 1;
|
||||
(void)setsockopt(peer->fd, IPPROTO_TCP, TCP_NODELAY, (char *)&val,
|
||||
sizeof(val));
|
||||
(void)setsockopt(peer->connection.fd, IPPROTO_TCP, TCP_NODELAY,
|
||||
(char *)&val, sizeof(val));
|
||||
|
||||
/* Retrieve BGP packet type. */
|
||||
stream_set_getp(s, BGP_MARKER_SIZE + 2);
|
||||
@ -910,7 +910,7 @@ static void bgp_notify_send_internal(struct peer *peer, uint8_t code,
|
||||
bool hard_reset = bgp_notify_send_hard_reset(peer, code, sub_code);
|
||||
|
||||
/* Lock I/O mutex to prevent other threads from pushing packets */
|
||||
frr_mutex_lock_autounlock(&peer->io_mtx);
|
||||
frr_mutex_lock_autounlock(&peer->connection.io_mtx);
|
||||
/* ============================================== */
|
||||
|
||||
/* Allocate new stream. */
|
||||
@ -943,7 +943,7 @@ static void bgp_notify_send_internal(struct peer *peer, uint8_t code,
|
||||
bgp_packet_set_size(s);
|
||||
|
||||
/* wipe output buffer */
|
||||
stream_fifo_clean(peer->obuf);
|
||||
stream_fifo_clean(peer->connection.obuf);
|
||||
|
||||
/*
|
||||
* If possible, store last packet for debugging purposes. This check is
|
||||
@ -1028,7 +1028,7 @@ static void bgp_notify_send_internal(struct peer *peer, uint8_t code,
|
||||
peer->last_reset = PEER_DOWN_NOTIFY_SEND;
|
||||
|
||||
/* Add packet to peer's output queue */
|
||||
stream_fifo_push(peer->obuf, s);
|
||||
stream_fifo_push(peer->connection.obuf, s);
|
||||
|
||||
bgp_peer_gr_flags_update(peer);
|
||||
BGP_GR_ROUTER_DETECT_AND_SEND_CAPABILITY_TO_ZEBRA(peer->bgp,
|
||||
@ -1812,7 +1812,7 @@ static int bgp_open_receive(struct peer *peer, bgp_size_t size)
|
||||
return BGP_Stop;
|
||||
}
|
||||
}
|
||||
peer->rtt = sockopt_tcp_rtt(peer->fd);
|
||||
peer->rtt = sockopt_tcp_rtt(peer->connection.fd);
|
||||
|
||||
return Receive_OPEN_message;
|
||||
}
|
||||
@ -1831,7 +1831,7 @@ static int bgp_keepalive_receive(struct peer *peer, bgp_size_t size)
|
||||
|
||||
bgp_update_implicit_eors(peer);
|
||||
|
||||
peer->rtt = sockopt_tcp_rtt(peer->fd);
|
||||
peer->rtt = sockopt_tcp_rtt(peer->connection.fd);
|
||||
|
||||
/* If the peer's RTT is higher than expected, shutdown
|
||||
* the peer automatically.
|
||||
@ -3009,8 +3009,8 @@ void bgp_process_packet(struct event *thread)
|
||||
bgp_size_t size;
|
||||
char notify_data_length[2];
|
||||
|
||||
frr_with_mutex (&peer->io_mtx) {
|
||||
peer->curr = stream_fifo_pop(peer->ibuf);
|
||||
frr_with_mutex (&peer->connection.io_mtx) {
|
||||
peer->curr = stream_fifo_pop(peer->connection.ibuf);
|
||||
}
|
||||
|
||||
if (peer->curr == NULL) // no packets to process, hmm...
|
||||
@ -3136,9 +3136,9 @@ void bgp_process_packet(struct event *thread)
|
||||
|
||||
if (fsm_update_result != FSM_PEER_TRANSFERRED
|
||||
&& fsm_update_result != FSM_PEER_STOPPED) {
|
||||
frr_with_mutex (&peer->io_mtx) {
|
||||
frr_with_mutex (&peer->connection.io_mtx) {
|
||||
// more work to do, come back later
|
||||
if (peer->ibuf->count > 0)
|
||||
if (peer->connection.ibuf->count > 0)
|
||||
event_add_event(bm->master, bgp_process_packet,
|
||||
peer, 0,
|
||||
&peer->t_process_packet);
|
||||
@ -3171,8 +3171,8 @@ void bgp_packet_process_error(struct event *thread)
|
||||
code = EVENT_VAL(thread);
|
||||
|
||||
if (bgp_debug_neighbor_events(peer))
|
||||
zlog_debug("%s [Event] BGP error %d on fd %d",
|
||||
peer->host, code, peer->fd);
|
||||
zlog_debug("%s [Event] BGP error %d on fd %d", peer->host, code,
|
||||
peer->connection.fd);
|
||||
|
||||
/* Closed connection or error on the socket */
|
||||
if (peer_established(peer)) {
|
||||
|
@ -11748,12 +11748,14 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi,
|
||||
PEER_TOTAL_TX(peer));
|
||||
|
||||
atomic_size_t outq_count, inq_count;
|
||||
outq_count = atomic_load_explicit(
|
||||
&peer->obuf->count,
|
||||
memory_order_relaxed);
|
||||
inq_count = atomic_load_explicit(
|
||||
&peer->ibuf->count,
|
||||
memory_order_relaxed);
|
||||
outq_count =
|
||||
atomic_load_explicit(&peer->connection
|
||||
.obuf->count,
|
||||
memory_order_relaxed);
|
||||
inq_count =
|
||||
atomic_load_explicit(&peer->connection
|
||||
.ibuf->count,
|
||||
memory_order_relaxed);
|
||||
|
||||
json_object_int_add(
|
||||
json_peer, "tableVersion",
|
||||
@ -11916,12 +11918,14 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi,
|
||||
" ");
|
||||
|
||||
atomic_size_t outq_count, inq_count;
|
||||
outq_count = atomic_load_explicit(
|
||||
&peer->obuf->count,
|
||||
memory_order_relaxed);
|
||||
inq_count = atomic_load_explicit(
|
||||
&peer->ibuf->count,
|
||||
memory_order_relaxed);
|
||||
outq_count =
|
||||
atomic_load_explicit(&peer->connection
|
||||
.obuf->count,
|
||||
memory_order_relaxed);
|
||||
inq_count =
|
||||
atomic_load_explicit(&peer->connection
|
||||
.ibuf->count,
|
||||
memory_order_relaxed);
|
||||
|
||||
vty_out(vty, "4");
|
||||
vty_out(vty, ASN_FORMAT_SPACE(bgp->asnotation),
|
||||
@ -13656,7 +13660,7 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
|
||||
|
||||
/* Configured and Synced tcp-mss value for peer */
|
||||
if (CHECK_FLAG(p->flags, PEER_FLAG_TCP_MSS)) {
|
||||
sync_tcp_mss = sockopt_tcp_mss_get(p->fd);
|
||||
sync_tcp_mss = sockopt_tcp_mss_get(p->connection.fd);
|
||||
json_object_int_add(json_neigh, "bgpTcpMssConfigured",
|
||||
p->tcp_mss);
|
||||
json_object_int_add(json_neigh, "bgpTcpMssSynced",
|
||||
@ -13743,7 +13747,7 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
|
||||
|
||||
/* Configured and synced tcp-mss value for peer */
|
||||
if (CHECK_FLAG(p->flags, PEER_FLAG_TCP_MSS)) {
|
||||
sync_tcp_mss = sockopt_tcp_mss_get(p->fd);
|
||||
sync_tcp_mss = sockopt_tcp_mss_get(p->connection.fd);
|
||||
vty_out(vty, " Configured tcp-mss is %d", p->tcp_mss);
|
||||
vty_out(vty, ", synced tcp-mss is %d\n", sync_tcp_mss);
|
||||
}
|
||||
@ -14717,9 +14721,9 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
|
||||
/* Packet counts. */
|
||||
|
||||
atomic_size_t outq_count, inq_count;
|
||||
outq_count = atomic_load_explicit(&p->obuf->count,
|
||||
outq_count = atomic_load_explicit(&p->connection.obuf->count,
|
||||
memory_order_relaxed);
|
||||
inq_count = atomic_load_explicit(&p->ibuf->count,
|
||||
inq_count = atomic_load_explicit(&p->connection.ibuf->count,
|
||||
memory_order_relaxed);
|
||||
|
||||
json_object_int_add(json_stat, "depthInq",
|
||||
@ -14770,9 +14774,9 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
|
||||
notify_out, notify_in, update_out, update_in,
|
||||
keepalive_out, keepalive_in, refresh_out, refresh_in,
|
||||
dynamic_cap_out, dynamic_cap_in;
|
||||
outq_count = atomic_load_explicit(&p->obuf->count,
|
||||
outq_count = atomic_load_explicit(&p->connection.obuf->count,
|
||||
memory_order_relaxed);
|
||||
inq_count = atomic_load_explicit(&p->ibuf->count,
|
||||
inq_count = atomic_load_explicit(&p->connection.ibuf->count,
|
||||
memory_order_relaxed);
|
||||
open_out = atomic_load_explicit(&p->open_out,
|
||||
memory_order_relaxed);
|
||||
@ -15155,7 +15159,8 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
|
||||
p->t_read ? "on" : "off",
|
||||
CHECK_FLAG(p->thread_flags, PEER_THREAD_WRITES_ON)
|
||||
? "on"
|
||||
: "off", p->fd);
|
||||
: "off",
|
||||
p->connection.fd);
|
||||
}
|
||||
|
||||
if (p->notify.code == BGP_NOTIFY_OPEN_ERR
|
||||
|
99
bgpd/bgpd.c
99
bgpd/bgpd.c
@ -1132,7 +1132,7 @@ static void peer_free(struct peer *peer)
|
||||
assert(!peer->t_read);
|
||||
BGP_EVENT_FLUSH(peer);
|
||||
|
||||
pthread_mutex_destroy(&peer->io_mtx);
|
||||
pthread_mutex_destroy(&peer->connection.io_mtx);
|
||||
|
||||
/* Free connected nexthop, if present */
|
||||
if (CHECK_FLAG(peer->flags, PEER_FLAG_CONFIG_NODE)
|
||||
@ -1369,7 +1369,7 @@ struct peer *peer_new(struct bgp *bgp)
|
||||
peer = XCALLOC(MTYPE_BGP_PEER, sizeof(struct peer));
|
||||
|
||||
/* Set default value. */
|
||||
peer->fd = -1;
|
||||
peer->connection.fd = -1;
|
||||
peer->v_start = BGP_INIT_START_TIMER;
|
||||
peer->v_connect = bgp->default_connect_retry;
|
||||
peer->status = Idle;
|
||||
@ -1411,12 +1411,12 @@ struct peer *peer_new(struct bgp *bgp)
|
||||
bgp_peer_gr_init(peer);
|
||||
|
||||
/* Create buffers. */
|
||||
peer->ibuf = stream_fifo_new();
|
||||
peer->obuf = stream_fifo_new();
|
||||
pthread_mutex_init(&peer->io_mtx, NULL);
|
||||
peer->connection.ibuf = stream_fifo_new();
|
||||
peer->connection.obuf = stream_fifo_new();
|
||||
pthread_mutex_init(&peer->connection.io_mtx, NULL);
|
||||
|
||||
peer->ibuf_work =
|
||||
ringbuf_new(BGP_MAX_PACKET_SIZE + BGP_MAX_PACKET_SIZE/2);
|
||||
peer->connection.ibuf_work =
|
||||
ringbuf_new(BGP_MAX_PACKET_SIZE + BGP_MAX_PACKET_SIZE / 2);
|
||||
|
||||
/* Get service port number. */
|
||||
sp = getservbyname("bgp", "tcp");
|
||||
@ -2591,19 +2591,19 @@ int peer_delete(struct peer *peer)
|
||||
}
|
||||
|
||||
/* Buffers. */
|
||||
if (peer->ibuf) {
|
||||
stream_fifo_free(peer->ibuf);
|
||||
peer->ibuf = NULL;
|
||||
if (peer->connection.ibuf) {
|
||||
stream_fifo_free(peer->connection.ibuf);
|
||||
peer->connection.ibuf = NULL;
|
||||
}
|
||||
|
||||
if (peer->obuf) {
|
||||
stream_fifo_free(peer->obuf);
|
||||
peer->obuf = NULL;
|
||||
if (peer->connection.obuf) {
|
||||
stream_fifo_free(peer->connection.obuf);
|
||||
peer->connection.obuf = NULL;
|
||||
}
|
||||
|
||||
if (peer->ibuf_work) {
|
||||
ringbuf_del(peer->ibuf_work);
|
||||
peer->ibuf_work = NULL;
|
||||
if (peer->connection.ibuf_work) {
|
||||
ringbuf_del(peer->connection.ibuf_work);
|
||||
peer->connection.ibuf_work = NULL;
|
||||
}
|
||||
|
||||
/* Local and remote addresses. */
|
||||
@ -5110,7 +5110,7 @@ int peer_ebgp_multihop_unset(struct peer *peer)
|
||||
|
||||
peer->ttl = BGP_DEFAULT_TTL;
|
||||
|
||||
if (peer->fd >= 0) {
|
||||
if (peer->connection.fd >= 0) {
|
||||
if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->status))
|
||||
bgp_notify_send(
|
||||
peer, BGP_NOTIFY_CEASE,
|
||||
@ -7795,13 +7795,14 @@ int peer_ttl_security_hops_set(struct peer *peer, int gtsm_hops)
|
||||
if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
|
||||
peer->gtsm_hops = gtsm_hops;
|
||||
|
||||
if (peer->fd >= 0)
|
||||
sockopt_minttl(peer->su.sa.sa_family, peer->fd,
|
||||
MAXTTL + 1 - gtsm_hops);
|
||||
if ((peer->status < Established) && peer->doppelganger
|
||||
&& (peer->doppelganger->fd >= 0))
|
||||
if (peer->connection.fd >= 0)
|
||||
sockopt_minttl(peer->su.sa.sa_family,
|
||||
peer->doppelganger->fd,
|
||||
peer->connection.fd,
|
||||
MAXTTL + 1 - gtsm_hops);
|
||||
if ((peer->status < Established) && peer->doppelganger &&
|
||||
(peer->doppelganger->connection.fd >= 0))
|
||||
sockopt_minttl(peer->su.sa.sa_family,
|
||||
peer->doppelganger->connection.fd,
|
||||
MAXTTL + 1 - gtsm_hops);
|
||||
} else {
|
||||
group = peer->group;
|
||||
@ -7818,18 +7819,18 @@ int peer_ttl_security_hops_set(struct peer *peer, int gtsm_hops)
|
||||
* no session then do nothing (will get
|
||||
* handled by next connection)
|
||||
*/
|
||||
if (gpeer->fd >= 0
|
||||
&& gpeer->gtsm_hops
|
||||
!= BGP_GTSM_HOPS_DISABLED)
|
||||
sockopt_minttl(
|
||||
gpeer->su.sa.sa_family,
|
||||
gpeer->fd,
|
||||
MAXTTL + 1 - gpeer->gtsm_hops);
|
||||
if ((gpeer->status < Established)
|
||||
&& gpeer->doppelganger
|
||||
&& (gpeer->doppelganger->fd >= 0))
|
||||
if (gpeer->connection.fd >= 0 &&
|
||||
gpeer->gtsm_hops != BGP_GTSM_HOPS_DISABLED)
|
||||
sockopt_minttl(gpeer->su.sa.sa_family,
|
||||
gpeer->doppelganger->fd,
|
||||
gpeer->connection.fd,
|
||||
MAXTTL + 1 -
|
||||
gpeer->gtsm_hops);
|
||||
if ((gpeer->status < Established) &&
|
||||
gpeer->doppelganger &&
|
||||
(gpeer->doppelganger->connection.fd >= 0))
|
||||
sockopt_minttl(gpeer->su.sa.sa_family,
|
||||
gpeer->doppelganger
|
||||
->connection.fd,
|
||||
MAXTTL + 1 - gtsm_hops);
|
||||
}
|
||||
}
|
||||
@ -7862,14 +7863,15 @@ int peer_ttl_security_hops_unset(struct peer *peer)
|
||||
if (peer->sort == BGP_PEER_EBGP)
|
||||
ret = peer_ebgp_multihop_unset(peer);
|
||||
else {
|
||||
if (peer->fd >= 0)
|
||||
sockopt_minttl(peer->su.sa.sa_family, peer->fd,
|
||||
0);
|
||||
|
||||
if ((peer->status < Established) && peer->doppelganger
|
||||
&& (peer->doppelganger->fd >= 0))
|
||||
if (peer->connection.fd >= 0)
|
||||
sockopt_minttl(peer->su.sa.sa_family,
|
||||
peer->doppelganger->fd, 0);
|
||||
peer->connection.fd, 0);
|
||||
|
||||
if ((peer->status < Established) && peer->doppelganger &&
|
||||
(peer->doppelganger->connection.fd >= 0))
|
||||
sockopt_minttl(peer->su.sa.sa_family,
|
||||
peer->doppelganger->connection.fd,
|
||||
0);
|
||||
}
|
||||
} else {
|
||||
group = peer->group;
|
||||
@ -7878,15 +7880,16 @@ int peer_ttl_security_hops_unset(struct peer *peer)
|
||||
if (peer->sort == BGP_PEER_EBGP)
|
||||
ret = peer_ebgp_multihop_unset(peer);
|
||||
else {
|
||||
if (peer->fd >= 0)
|
||||
if (peer->connection.fd >= 0)
|
||||
sockopt_minttl(peer->su.sa.sa_family,
|
||||
peer->fd, 0);
|
||||
peer->connection.fd, 0);
|
||||
|
||||
if ((peer->status < Established)
|
||||
&& peer->doppelganger
|
||||
&& (peer->doppelganger->fd >= 0))
|
||||
if ((peer->status < Established) &&
|
||||
peer->doppelganger &&
|
||||
(peer->doppelganger->connection.fd >= 0))
|
||||
sockopt_minttl(peer->su.sa.sa_family,
|
||||
peer->doppelganger->fd,
|
||||
peer->doppelganger
|
||||
->connection.fd,
|
||||
0);
|
||||
}
|
||||
}
|
||||
@ -7958,7 +7961,7 @@ int peer_clear_soft(struct peer *peer, afi_t afi, safi_t safi,
|
||||
if (!peer->afc[afi][safi])
|
||||
return BGP_ERR_AF_UNCONFIGURED;
|
||||
|
||||
peer->rtt = sockopt_tcp_rtt(peer->fd);
|
||||
peer->rtt = sockopt_tcp_rtt(peer->connection.fd);
|
||||
|
||||
if (stype == BGP_CLEAR_SOFT_OUT || stype == BGP_CLEAR_SOFT_BOTH) {
|
||||
/* Clear the "neighbor x.x.x.x default-originate" flag */
|
||||
|
29
bgpd/bgpd.h
29
bgpd/bgpd.h
@ -1120,6 +1120,17 @@ struct llgr_info {
|
||||
uint8_t flags;
|
||||
};
|
||||
|
||||
struct peer_connection {
|
||||
int fd;
|
||||
|
||||
/* Packet receive and send buffer. */
|
||||
pthread_mutex_t io_mtx; // guards ibuf, obuf
|
||||
struct stream_fifo *ibuf; // packets waiting to be processed
|
||||
struct stream_fifo *obuf; // packets waiting to be written
|
||||
|
||||
struct ringbuf *ibuf_work; // WiP buffer used by bgp_read() only
|
||||
};
|
||||
|
||||
/* BGP neighbor structure. */
|
||||
struct peer {
|
||||
/* BGP structure. */
|
||||
@ -1160,13 +1171,6 @@ struct peer {
|
||||
/* Local router ID. */
|
||||
struct in_addr local_id;
|
||||
|
||||
/* Packet receive and send buffer. */
|
||||
pthread_mutex_t io_mtx; // guards ibuf, obuf
|
||||
struct stream_fifo *ibuf; // packets waiting to be processed
|
||||
struct stream_fifo *obuf; // packets waiting to be written
|
||||
|
||||
struct ringbuf *ibuf_work; // WiP buffer used by bgp_read() only
|
||||
|
||||
struct stream *curr; // the current packet being parsed
|
||||
|
||||
/* the doppelganger peer structure, due to dual TCP conn setup */
|
||||
@ -1187,7 +1191,16 @@ struct peer {
|
||||
uint16_t table_dump_index;
|
||||
|
||||
/* Peer information */
|
||||
int fd; /* File descriptor */
|
||||
|
||||
/*
|
||||
* We will have 2 `struct peer_connection` data structures
|
||||
* connection is our attempt to talk to our peer. incoming
|
||||
* is the peer attempting to talk to us. When it is
|
||||
* time to consolidate between the two, we'll solidify
|
||||
* into the connection variable being used.
|
||||
*/
|
||||
struct peer_connection connection;
|
||||
|
||||
int ttl; /* TTL of TCP connection to the peer. */
|
||||
int rtt; /* Estimated round-trip-time from TCP_INFO */
|
||||
int rtt_expected; /* Expected round-trip-time for a peer */
|
||||
|
@ -1242,19 +1242,19 @@ static int rfapi_open_inner(struct rfapi_descriptor *rfd, struct bgp *bgp,
|
||||
* since this peer is not on the I/O thread, this lock is not strictly
|
||||
* necessary, but serves as a reminder to those who may meddle...
|
||||
*/
|
||||
frr_with_mutex (&rfd->peer->io_mtx) {
|
||||
frr_with_mutex (&rfd->peer->connection.io_mtx) {
|
||||
// we don't need any I/O related facilities
|
||||
if (rfd->peer->ibuf)
|
||||
stream_fifo_free(rfd->peer->ibuf);
|
||||
if (rfd->peer->obuf)
|
||||
stream_fifo_free(rfd->peer->obuf);
|
||||
if (rfd->peer->connection.ibuf)
|
||||
stream_fifo_free(rfd->peer->connection.ibuf);
|
||||
if (rfd->peer->connection.obuf)
|
||||
stream_fifo_free(rfd->peer->connection.obuf);
|
||||
|
||||
if (rfd->peer->ibuf_work)
|
||||
ringbuf_del(rfd->peer->ibuf_work);
|
||||
if (rfd->peer->connection.ibuf_work)
|
||||
ringbuf_del(rfd->peer->connection.ibuf_work);
|
||||
|
||||
rfd->peer->ibuf = NULL;
|
||||
rfd->peer->obuf = NULL;
|
||||
rfd->peer->ibuf_work = NULL;
|
||||
rfd->peer->connection.ibuf = NULL;
|
||||
rfd->peer->connection.obuf = NULL;
|
||||
rfd->peer->connection.ibuf_work = NULL;
|
||||
}
|
||||
|
||||
{ /* base code assumes have valid host pointer */
|
||||
|
@ -179,19 +179,22 @@ static void vnc_redistribute_add(struct prefix *p, uint32_t metric,
|
||||
* is not strictly necessary, but serves as a reminder
|
||||
* to those who may meddle...
|
||||
*/
|
||||
frr_with_mutex (&vncHD1VR.peer->io_mtx) {
|
||||
frr_with_mutex (&vncHD1VR.peer->connection.io_mtx) {
|
||||
// we don't need any I/O related facilities
|
||||
if (vncHD1VR.peer->ibuf)
|
||||
stream_fifo_free(vncHD1VR.peer->ibuf);
|
||||
if (vncHD1VR.peer->obuf)
|
||||
stream_fifo_free(vncHD1VR.peer->obuf);
|
||||
if (vncHD1VR.peer->connection.ibuf)
|
||||
stream_fifo_free(
|
||||
vncHD1VR.peer->connection.ibuf);
|
||||
if (vncHD1VR.peer->connection.obuf)
|
||||
stream_fifo_free(
|
||||
vncHD1VR.peer->connection.obuf);
|
||||
|
||||
if (vncHD1VR.peer->ibuf_work)
|
||||
ringbuf_del(vncHD1VR.peer->ibuf_work);
|
||||
if (vncHD1VR.peer->connection.ibuf_work)
|
||||
ringbuf_del(vncHD1VR.peer->connection
|
||||
.ibuf_work);
|
||||
|
||||
vncHD1VR.peer->ibuf = NULL;
|
||||
vncHD1VR.peer->obuf = NULL;
|
||||
vncHD1VR.peer->ibuf_work = NULL;
|
||||
vncHD1VR.peer->connection.ibuf = NULL;
|
||||
vncHD1VR.peer->connection.obuf = NULL;
|
||||
vncHD1VR.peer->connection.ibuf_work = NULL;
|
||||
}
|
||||
|
||||
/* base code assumes have valid host pointer */
|
||||
|
@ -1343,10 +1343,10 @@ static int handle_attr_test(struct aspath_tests *t)
|
||||
bgp.asnotation = t->segment->asnotation;
|
||||
|
||||
peer.curr = stream_new(BGP_MAX_PACKET_SIZE);
|
||||
peer.obuf = stream_fifo_new();
|
||||
peer.connection.obuf = stream_fifo_new();
|
||||
peer.bgp = &bgp;
|
||||
peer.host = (char *)"none";
|
||||
peer.fd = -1;
|
||||
peer.connection.fd = -1;
|
||||
peer.cap = t->cap;
|
||||
peer.max_packet_size = BGP_STANDARD_MESSAGE_MAX_PACKET_SIZE;
|
||||
|
||||
|
@ -66,7 +66,7 @@ int main(int argc, char *argv[])
|
||||
SET_FLAG(peer->cap, PEER_CAP_DYNAMIC_ADV);
|
||||
peer->status = Established;
|
||||
|
||||
peer->fd = open(argv[1], O_RDONLY|O_NONBLOCK);
|
||||
peer->connection.fd = open(argv[1], O_RDONLY | O_NONBLOCK);
|
||||
t.arg = peer;
|
||||
peer->t_read = &t;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user