From 102bad0a9b65279358db787e2972a1711606d224 Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Mon, 24 Jul 2023 10:13:32 -0400 Subject: [PATCH 1/2] bgpd: With io limit allow parsing to continue even if memory is low Commit: a0b937de428e14e869b8541f0b7810113d619c2e Introduced the idea of a input Q packet limit. Say you read in 635000 bytes of data and the input Q is already at it's limit (currently 1000) then when bgp_process_reads runs it will assert because there is less then a BGP_MAX_PACKET_SIZE in ibuf_work. Don't assert as that it's irrelevant. Even if we can't read a full packet in let's let the whole system keep working as that as the input Q length comes down we will start pulling down the ibuf_work and it will be ok. Signed-off-by: Donald Sharp --- bgpd/bgp_io.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/bgpd/bgp_io.c b/bgpd/bgp_io.c index 215554af3e..a375bd6005 100644 --- a/bgpd/bgp_io.c +++ b/bgpd/bgp_io.c @@ -218,7 +218,6 @@ static void bgp_process_reads(struct event *thread) bool fatal = false; /* whether fatal error occurred */ bool added_pkt = false; /* whether we pushed onto ->ibuf */ int code = 0; /* FSM code if error occurred */ - bool ibuf_full = false; /* Is peer fifo IN Buffer full */ static bool ibuf_full_logged; /* Have we logged full already */ int ret = 1; /* clang-format on */ @@ -265,7 +264,6 @@ static void bgp_process_reads(struct event *thread) fatal = true; break; case -ENOMEM: - ibuf_full = true; if (!ibuf_full_logged) { if (bgp_debug_neighbor_events(peer)) zlog_debug( @@ -288,10 +286,6 @@ done: return; } - /* ringbuf should be fully drained unless ibuf is full */ - if (!ibuf_full) - assert(ringbuf_space(peer->ibuf_work) >= peer->max_packet_size); - event_add_read(fpt->master, bgp_process_reads, peer, peer->fd, &peer->t_read); if (added_pkt) From fe1c72a57315a7c9fc3c63dd2fbf3a10dafbc10b Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Mon, 24 Jul 2023 10:33:21 -0400 Subject: [PATCH 2/2] bgpd: Reduce size of ibuf_work ringbuf The ringbuf is 650k in size. This is obscenely large and in practical experimentation FRR never even approaches that size at all. Let's reduce this to 1.5 max packet sizes. If a BGP_MAX_PACKET_SIZE packet is ever received having a bit of extra space ensures that we can read at least 1 packet. This also will significantly reduce memory usage when the operator has a lot of peers. Signed-off-by: Donald Sharp --- bgpd/bgpd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c index fa28a3098b..c2b894bcbc 100644 --- a/bgpd/bgpd.c +++ b/bgpd/bgpd.c @@ -1414,7 +1414,7 @@ struct peer *peer_new(struct bgp *bgp) pthread_mutex_init(&peer->io_mtx, NULL); peer->ibuf_work = - ringbuf_new(BGP_MAX_PACKET_SIZE * BGP_READ_PACKET_MAX); + ringbuf_new(BGP_MAX_PACKET_SIZE + BGP_MAX_PACKET_SIZE/2); /* Get service port number. */ sp = getservbyname("bgp", "tcp");