bgpd: Adopt addpath functionality for labeled-unicast

The same as for RIB, labeled-unicast allocations should live in
unicast space.

Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
This commit is contained in:
Donatas Abraitis 2022-12-06 18:28:17 +02:00
parent 4d65410c54
commit 8ccee4b8cc
5 changed files with 51 additions and 9 deletions

View File

@ -111,6 +111,9 @@ void bgp_addpath_free_info_data(struct bgp_addpath_info_data *d,
uint32_t bgp_addpath_id_for_peer(struct peer *peer, afi_t afi, safi_t safi, uint32_t bgp_addpath_id_for_peer(struct peer *peer, afi_t afi, safi_t safi,
struct bgp_addpath_info_data *d) struct bgp_addpath_info_data *d)
{ {
if (safi == SAFI_LABELED_UNICAST)
safi = SAFI_UNICAST;
if (peer->addpath_type[afi][safi] < BGP_ADDPATH_MAX) if (peer->addpath_type[afi][safi] < BGP_ADDPATH_MAX)
return d->addpath_tx_id[peer->addpath_type[afi][safi]]; return d->addpath_tx_id[peer->addpath_type[afi][safi]];
else else
@ -182,6 +185,9 @@ static void bgp_addpath_flush_type_rn(struct bgp *bgp, afi_t afi, safi_t safi,
{ {
struct bgp_path_info *pi; struct bgp_path_info *pi;
if (safi == SAFI_LABELED_UNICAST)
safi = SAFI_UNICAST;
idalloc_drain_pool( idalloc_drain_pool(
bgp->tx_addpath.id_allocators[afi][safi][addpath_type], bgp->tx_addpath.id_allocators[afi][safi][addpath_type],
&(dest->tx_addpath.free_ids[addpath_type])); &(dest->tx_addpath.free_ids[addpath_type]));
@ -210,6 +216,9 @@ static void bgp_addpath_flush_type(struct bgp *bgp, afi_t afi, safi_t safi,
{ {
struct bgp_dest *dest, *ndest; struct bgp_dest *dest, *ndest;
if (safi == SAFI_LABELED_UNICAST)
safi = SAFI_UNICAST;
for (dest = bgp_table_top(bgp->rib[afi][safi]); dest; for (dest = bgp_table_top(bgp->rib[afi][safi]); dest;
dest = bgp_route_next(dest)) { dest = bgp_route_next(dest)) {
if (safi == SAFI_MPLS_VPN) { if (safi == SAFI_MPLS_VPN) {
@ -251,6 +260,7 @@ static void bgp_addpath_populate_path(struct id_alloc *allocator,
* and afi/safi combination. Since we won't waste the time computing addpath IDs * and afi/safi combination. Since we won't waste the time computing addpath IDs
* for unused strategies, the first time a peer is configured to use a strategy, * for unused strategies, the first time a peer is configured to use a strategy,
* we have to backfill the data. * we have to backfill the data.
* In labeled-unicast, addpath allocations SHOULD be done in unicast SAFI.
*/ */
static void bgp_addpath_populate_type(struct bgp *bgp, afi_t afi, safi_t safi, static void bgp_addpath_populate_type(struct bgp *bgp, afi_t afi, safi_t safi,
enum bgp_addpath_strat addpath_type) enum bgp_addpath_strat addpath_type)
@ -259,6 +269,9 @@ static void bgp_addpath_populate_type(struct bgp *bgp, afi_t afi, safi_t safi,
char buf[200]; char buf[200];
struct id_alloc *allocator; struct id_alloc *allocator;
if (safi == SAFI_LABELED_UNICAST)
safi = SAFI_UNICAST;
snprintf(buf, sizeof(buf), "Addpath ID Allocator %s:%d/%d", snprintf(buf, sizeof(buf), "Addpath ID Allocator %s:%d/%d",
bgp_addpath_names(addpath_type)->config_name, (int)afi, bgp_addpath_names(addpath_type)->config_name, (int)afi,
(int)safi); (int)safi);
@ -357,11 +370,15 @@ void bgp_addpath_set_peer_type(struct peer *peer, afi_t afi, safi_t safi,
enum bgp_addpath_strat addpath_type) enum bgp_addpath_strat addpath_type)
{ {
struct bgp *bgp = peer->bgp; struct bgp *bgp = peer->bgp;
enum bgp_addpath_strat old_type = peer->addpath_type[afi][safi]; enum bgp_addpath_strat old_type;
struct listnode *node, *nnode; struct listnode *node, *nnode;
struct peer *tmp_peer; struct peer *tmp_peer;
struct peer_group *group; struct peer_group *group;
if (safi == SAFI_LABELED_UNICAST)
safi = SAFI_UNICAST;
old_type = peer->addpath_type[afi][safi];
if (addpath_type == old_type) if (addpath_type == old_type)
return; return;
@ -431,6 +448,9 @@ void bgp_addpath_update_ids(struct bgp *bgp, struct bgp_dest *bn, afi_t afi,
struct bgp_path_info *pi; struct bgp_path_info *pi;
struct id_alloc_pool **pool_ptr; struct id_alloc_pool **pool_ptr;
if (safi == SAFI_LABELED_UNICAST)
safi = SAFI_UNICAST;
for (i = 0; i < BGP_ADDPATH_MAX; i++) { for (i = 0; i < BGP_ADDPATH_MAX; i++) {
struct id_alloc *alloc = struct id_alloc *alloc =
bgp->tx_addpath.id_allocators[afi][safi][i]; bgp->tx_addpath.id_allocators[afi][safi][i];

View File

@ -1658,7 +1658,7 @@ uint16_t bgp_open_capability(struct stream *s, struct peer *peer,
iana_safi_t pkt_safi = IANA_SAFI_UNICAST; iana_safi_t pkt_safi = IANA_SAFI_UNICAST;
as_t local_as; as_t local_as;
uint8_t afi_safi_count = 0; uint8_t afi_safi_count = 0;
int adv_addpath_tx = 0; bool adv_addpath_tx = false;
/* Non-Ext OP Len. */ /* Non-Ext OP Len. */
cp = stream_get_endp(s); cp = stream_get_endp(s);
@ -1797,7 +1797,17 @@ uint16_t bgp_open_capability(struct stream *s, struct peer *peer,
* will use it is * will use it is
* configured */ * configured */
if (peer->addpath_type[afi][safi] != BGP_ADDPATH_NONE) if (peer->addpath_type[afi][safi] != BGP_ADDPATH_NONE)
adv_addpath_tx = 1; adv_addpath_tx = true;
/* If we have enabled labeled unicast, we MUST check
* against unicast SAFI because addpath IDs are
* allocated under unicast SAFI, the same as the RIB
* is managed in unicast SAFI.
*/
if (safi == SAFI_LABELED_UNICAST)
if (peer->addpath_type[afi][SAFI_UNICAST] !=
BGP_ADDPATH_NONE)
adv_addpath_tx = true;
} }
} }
@ -1838,6 +1848,10 @@ uint16_t bgp_open_capability(struct stream *s, struct peer *peer,
SET_FLAG(flags, BGP_ADDPATH_TX); SET_FLAG(flags, BGP_ADDPATH_TX);
SET_FLAG(peer->af_cap[afi][safi], SET_FLAG(peer->af_cap[afi][safi],
PEER_CAP_ADDPATH_AF_TX_ADV); PEER_CAP_ADDPATH_AF_TX_ADV);
if (safi == SAFI_LABELED_UNICAST)
SET_FLAG(
peer->af_cap[afi][SAFI_UNICAST],
PEER_CAP_ADDPATH_AF_TX_ADV);
} else { } else {
UNSET_FLAG(peer->af_cap[afi][safi], UNSET_FLAG(peer->af_cap[afi][safi],
PEER_CAP_ADDPATH_AF_TX_ADV); PEER_CAP_ADDPATH_AF_TX_ADV);

View File

@ -2086,11 +2086,9 @@ bool subgroup_announce_check(struct bgp_dest *dest, struct bgp_path_info *pi,
/* If this is not the bestpath then check to see if there is an enabled /* If this is not the bestpath then check to see if there is an enabled
* addpath * addpath
* feature that requires us to advertise it */ * feature that requires us to advertise it */
if (!CHECK_FLAG(pi->flags, BGP_PATH_SELECTED)) { if (!CHECK_FLAG(pi->flags, BGP_PATH_SELECTED))
if (!bgp_addpath_tx_path(peer->addpath_type[afi][safi], pi)) { if (!bgp_addpath_capable(pi, peer, afi, safi))
return false; return false;
}
}
/* Aggregate-address suppress check. */ /* Aggregate-address suppress check. */
if (bgp_path_suppressed(pi) && !UNSUPPRESS_MAP_NAME(filter)) if (bgp_path_suppressed(pi) && !UNSUPPRESS_MAP_NAME(filter))

View File

@ -2230,10 +2230,18 @@ bool bgp_addpath_encode_tx(struct peer *peer, afi_t afi, safi_t safi)
PEER_CAP_ADDPATH_AF_RX_RCV)); PEER_CAP_ADDPATH_AF_RX_RCV));
} }
bool bgp_addpath_capable(struct bgp_path_info *bpi, struct peer *peer,
afi_t afi, safi_t safi)
{
return (bgp_addpath_tx_path(peer->addpath_type[afi][safi], bpi) ||
(safi == SAFI_LABELED_UNICAST &&
bgp_addpath_tx_path(peer->addpath_type[afi][SAFI_UNICAST],
bpi)));
}
bool bgp_check_selected(struct bgp_path_info *bpi, struct peer *peer, bool bgp_check_selected(struct bgp_path_info *bpi, struct peer *peer,
bool addpath_capable, afi_t afi, safi_t safi) bool addpath_capable, afi_t afi, safi_t safi)
{ {
return (CHECK_FLAG(bpi->flags, BGP_PATH_SELECTED) || return (CHECK_FLAG(bpi->flags, BGP_PATH_SELECTED) ||
(addpath_capable && (addpath_capable && bgp_addpath_capable(bpi, peer, afi, safi)));
bgp_addpath_tx_path(peer->addpath_type[afi][safi], bpi)));
} }

View File

@ -476,6 +476,8 @@ extern void update_bgp_group_free(struct bgp *bgp);
extern bool bgp_addpath_encode_tx(struct peer *peer, afi_t afi, safi_t safi); extern bool bgp_addpath_encode_tx(struct peer *peer, afi_t afi, safi_t safi);
extern bool bgp_check_selected(struct bgp_path_info *bpi, struct peer *peer, extern bool bgp_check_selected(struct bgp_path_info *bpi, struct peer *peer,
bool addpath_capable, afi_t afi, safi_t safi); bool addpath_capable, afi_t afi, safi_t safi);
extern bool bgp_addpath_capable(struct bgp_path_info *bpi, struct peer *peer,
afi_t afi, safi_t safi);
/* /*
* Inline functions * Inline functions