Merge pull request #3051 from mitch-skiba/addpath_change_V1

Addpath - Reuse IDs
This commit is contained in:
Donald Sharp 2018-11-13 09:20:22 -05:00 committed by GitHub
commit bddea5fdf8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
28 changed files with 1546 additions and 191 deletions

422
bgpd/bgp_addpath.c Normal file
View File

@ -0,0 +1,422 @@
/*
* Addpath TX ID selection, and related utilities
* Copyright (C) 2018 Amazon.com, Inc. or its affiliates
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; see the file COPYING; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "bgp_addpath.h"
#include "bgp_route.h"
static struct bgp_addpath_strategy_names strat_names[BGP_ADDPATH_MAX] = {
{
.config_name = "addpath-tx-all-paths",
.human_name = "All",
.human_description = "Advertise all paths via addpath",
.type_json_name = "addpathTxAllPaths",
.id_json_name = "addpathTxIdAll"
},
{
.config_name = "addpath-tx-bestpath-per-AS",
.human_name = "Best-Per-AS",
.human_description = "Advertise bestpath per AS via addpath",
.type_json_name = "addpathTxBestpathPerAS",
.id_json_name = "addpathTxIdBestPerAS"
}
};
static struct bgp_addpath_strategy_names unknown_names = {
.config_name = "addpath-tx-unknown",
.human_name = "Unknown-Addpath-Strategy",
.human_description = "Unknown Addpath Strategy",
.type_json_name = "addpathTxUnknown",
.id_json_name = "addpathTxIdUnknown"
};
/*
* Returns a structure full of strings associated with an addpath type. Will
* never return null.
*/
struct bgp_addpath_strategy_names *
bgp_addpath_names(enum bgp_addpath_strat strat)
{
if (strat < BGP_ADDPATH_MAX)
return &(strat_names[strat]);
else
return &unknown_names;
};
/*
* Returns if any peer is transmitting addpaths for a given afi/safi.
*/
int bgp_addpath_is_addpath_used(struct bgp_addpath_bgp_data *d, afi_t afi,
safi_t safi)
{
return d->total_peercount[afi][safi] > 0;
}
/*
* Initialize the BGP instance level data for addpath.
*/
void bgp_addpath_init_bgp_data(struct bgp_addpath_bgp_data *d)
{
safi_t safi;
afi_t afi;
int i;
for (afi = AFI_IP; afi < AFI_MAX; afi++) {
for (safi = SAFI_UNICAST; safi < SAFI_MAX; safi++) {
for (i = 0; i < BGP_ADDPATH_MAX; i++) {
d->id_allocators[afi][safi][i] = NULL;
d->peercount[afi][safi][i] = 0;
}
d->total_peercount[afi][safi] = 0;
}
}
}
/*
* Free up resources associated with BGP route info structures.
*/
void bgp_addpath_free_info_data(struct bgp_addpath_info_data *d,
struct bgp_addpath_node_data *nd)
{
int i;
for (i = 0; i < BGP_ADDPATH_MAX; i++) {
if (d->addpath_tx_id[i] != IDALLOC_INVALID)
idalloc_free_to_pool(&nd->free_ids[i],
d->addpath_tx_id[i]);
}
}
/*
* Return the addpath ID used to send a particular route, to a particular peer,
* in a particular AFI/SAFI.
*/
uint32_t bgp_addpath_id_for_peer(struct peer *peer, afi_t afi, safi_t safi,
struct bgp_addpath_info_data *d)
{
if (peer->addpath_type[afi][safi] < BGP_ADDPATH_MAX)
return d->addpath_tx_id[peer->addpath_type[afi][safi]];
else
return IDALLOC_INVALID;
}
/*
* Returns true if the path has an assigned addpath ID for any of the addpath
* strategies.
*/
int bgp_addpath_info_has_ids(struct bgp_addpath_info_data *d)
{
int i;
for (i = 0; i < BGP_ADDPATH_MAX; i++)
if (d->addpath_tx_id[i] != 0)
return 1;
return 0;
}
/*
* Releases any ID's associated with the BGP prefix.
*/
void bgp_addpath_free_node_data(struct bgp_addpath_bgp_data *bd,
struct bgp_addpath_node_data *nd, afi_t afi,
safi_t safi)
{
int i;
for (i = 0; i < BGP_ADDPATH_MAX; i++) {
idalloc_drain_pool(bd->id_allocators[afi][safi][i],
&(nd->free_ids[i]));
}
}
/*
* Check to see if the addpath strategy requires DMED to be configured to work.
*/
int bgp_addpath_dmed_required(int strategy)
{
return strategy == BGP_ADDPATH_BEST_PER_AS;
}
/*
* Return true if this is a path we should advertise due to a
* configured addpath-tx knob
*/
int bgp_addpath_tx_path(enum bgp_addpath_strat strat,
struct bgp_path_info *pi)
{
switch (strat) {
case BGP_ADDPATH_NONE:
return 0;
case BGP_ADDPATH_ALL:
return 1;
case BGP_ADDPATH_BEST_PER_AS:
if (CHECK_FLAG(pi->flags, BGP_PATH_DMED_SELECTED))
return 1;
else
return 0;
default:
return 0;
}
}
/*
* Purge all addpath ID's on a BGP instance associated with the addpath
* strategy, and afi/safi combination. This lets us let go of all memory held to
* track ID numbers associated with an addpath type not in use. Since
* post-bestpath ID processing is skipped for types not used, this is the only
* chance to free this data.
*/
static void bgp_addpath_flush_type(struct bgp *bgp, afi_t afi, safi_t safi,
enum bgp_addpath_strat addpath_type)
{
struct bgp_node *rn;
struct bgp_path_info *pi;
for (rn = bgp_table_top(bgp->rib[afi][safi]); rn;
rn = bgp_route_next(rn)) {
idalloc_drain_pool(
bgp->tx_addpath.id_allocators[afi][safi][addpath_type],
&(rn->tx_addpath.free_ids[addpath_type]));
for (pi = rn->info; pi; pi = pi->next) {
if (pi->tx_addpath.addpath_tx_id[addpath_type]
!= IDALLOC_INVALID) {
idalloc_free(
bgp->tx_addpath
.id_allocators[afi][safi]
[addpath_type],
pi->tx_addpath
.addpath_tx_id[addpath_type]);
pi->tx_addpath.addpath_tx_id[addpath_type] =
IDALLOC_INVALID;
}
}
}
idalloc_destroy(bgp->tx_addpath.id_allocators[afi][safi][addpath_type]);
bgp->tx_addpath.id_allocators[afi][safi][addpath_type] = NULL;
}
/*
* Allocate an Addpath ID for the given type on a path, if necessary.
*/
static void bgp_addpath_populate_path(struct id_alloc *allocator,
struct bgp_path_info *path,
enum bgp_addpath_strat addpath_type)
{
if (bgp_addpath_tx_path(addpath_type, path)) {
path->tx_addpath.addpath_tx_id[addpath_type] =
idalloc_allocate(allocator);
}
}
/*
* Compute addpath ID's on a BGP instance associated with the addpath strategy,
* and afi/safi combination. Since we won't waste the time computing addpath IDs
* for unused strategies, the first time a peer is configured to use a strategy,
* we have to backfill the data.
*/
static void bgp_addpath_populate_type(struct bgp *bgp, afi_t afi, safi_t safi,
enum bgp_addpath_strat addpath_type)
{
struct bgp_node *rn;
struct bgp_path_info *bi;
char buf[200];
struct id_alloc *allocator;
snprintf(buf, sizeof(buf), "Addpath ID Allocator %s:%d/%d",
bgp_addpath_names(addpath_type)->config_name, (int)afi,
(int)safi);
buf[sizeof(buf) - 1] = '\0';
zlog_info("Computing addpath IDs for addpath type %s",
bgp_addpath_names(addpath_type)->human_name);
bgp->tx_addpath.id_allocators[afi][safi][addpath_type] =
idalloc_new(buf);
idalloc_reserve(bgp->tx_addpath.id_allocators[afi][safi][addpath_type],
BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE);
allocator = bgp->tx_addpath.id_allocators[afi][safi][addpath_type];
for (rn = bgp_table_top(bgp->rib[afi][safi]); rn;
rn = bgp_route_next(rn))
for (bi = rn->info; bi; bi = bi->next)
bgp_addpath_populate_path(allocator, bi, addpath_type);
}
/*
* Handle updates to a peer or group's addpath strategy. If after adjusting
* counts a addpath strategy is in use for the first time, or no longer in use,
* the IDs for that strategy will be populated or flushed.
*/
void bgp_addpath_type_changed(struct bgp *bgp)
{
afi_t afi;
safi_t safi;
struct listnode *node, *nnode;
struct peer *peer;
int peer_count[AFI_MAX][SAFI_MAX][BGP_ADDPATH_MAX];
enum bgp_addpath_strat type;
FOREACH_AFI_SAFI(afi, safi) {
for (type=0; type<BGP_ADDPATH_MAX; type++) {
peer_count[afi][safi][type] = 0;
}
}
for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
FOREACH_AFI_SAFI(afi, safi) {
type = peer->addpath_type[afi][safi];
if (type != BGP_ADDPATH_NONE) {
peer_count[afi][safi][type] += 1;
}
}
}
FOREACH_AFI_SAFI(afi, safi) {
for (type=0; type<BGP_ADDPATH_MAX; type++) {
int old = bgp->tx_addpath.peercount[afi][safi][type];
int new = peer_count[afi][safi][type];
bgp->tx_addpath.peercount[afi][safi][type] = new;
if (old == 0 && new != 0) {
bgp_addpath_populate_type(bgp, afi, safi,
type);
} else if (old != 0 && new == 0) {
bgp_addpath_flush_type(bgp, afi, safi, type);
}
}
}
}
/*
* Change the addpath type assigned to a peer, or peer group. In addition to
* adjusting the counts, peer sessions will be reset as needed to make the
* change take effect.
*/
void bgp_addpath_set_peer_type(struct peer *peer, afi_t afi, safi_t safi,
enum bgp_addpath_strat addpath_type)
{
struct bgp *bgp = peer->bgp;
enum bgp_addpath_strat old_type = peer->addpath_type[afi][safi];
struct listnode *node, *nnode;
struct peer *tmp_peer;
struct peer_group *group;
if (addpath_type == old_type)
return;
if (addpath_type == BGP_ADDPATH_NONE && peer->group &&
!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
/* A "no" config on a group member inherits group */
addpath_type = peer->group->conf->addpath_type[afi][safi];
}
peer->addpath_type[afi][safi] = addpath_type;
bgp_addpath_type_changed(bgp);
if (addpath_type != BGP_ADDPATH_NONE) {
if (bgp_addpath_dmed_required(addpath_type)) {
if (!bgp_flag_check(bgp, BGP_FLAG_DETERMINISTIC_MED)) {
zlog_warn(
"%s: enabling bgp deterministic-med, this is required for addpath-tx-bestpath-per-AS",
peer->host);
bgp_flag_set(bgp, BGP_FLAG_DETERMINISTIC_MED);
bgp_recalculate_all_bestpaths(bgp);
}
}
}
zlog_info("Resetting peer %s%s due to change in addpath config\n",
CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP) ? "group " : "",
peer->host);
if (CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
group = peer->group;
/* group will be null as peer_group_delete calls peer_delete on
* group->conf. That peer_delete will eventuallly end up here
* if the group was configured to tx addpaths.
*/
if (group != NULL) {
for (ALL_LIST_ELEMENTS(group->peer, node, nnode,
tmp_peer)) {
if (tmp_peer->addpath_type[afi][safi] ==
old_type) {
bgp_addpath_set_peer_type(tmp_peer,
afi,
safi,
addpath_type);
}
}
}
} else {
peer_change_action(peer, afi, safi, peer_change_reset);
}
}
/*
* Intended to run after bestpath. This function will take TX IDs from paths
* that no longer need them, and give them to paths that do. This prevents
* best-per-as updates from needing to do a separate withdraw and update just to
* swap out which path is sent.
*/
void bgp_addpath_update_ids(struct bgp *bgp, struct bgp_node *bn, afi_t afi,
safi_t safi)
{
int i;
struct bgp_path_info *pi;
struct id_alloc_pool **pool_ptr;
for (i = 0; i < BGP_ADDPATH_MAX; i++) {
struct id_alloc *alloc =
bgp->tx_addpath.id_allocators[afi][safi][i];
pool_ptr = &(bn->tx_addpath.free_ids[i]);
if (bgp->tx_addpath.peercount[afi][safi][i] == 0)
continue;
/* Free Unused IDs back to the pool.*/
for (pi = bn->info; pi; pi = pi->next) {
if (pi->tx_addpath.addpath_tx_id[i] != IDALLOC_INVALID
&& !bgp_addpath_tx_path(i, pi)) {
idalloc_free_to_pool(pool_ptr,
pi->tx_addpath.addpath_tx_id[i]);
pi->tx_addpath.addpath_tx_id[i] =
IDALLOC_INVALID;
}
}
/* Give IDs to paths that need them (pulling from the pool) */
for (pi = bn->info; pi; pi = pi->next) {
if (pi->tx_addpath.addpath_tx_id[i] == IDALLOC_INVALID
&& bgp_addpath_tx_path(i, pi)) {
pi->tx_addpath.addpath_tx_id[i] =
idalloc_allocate_prefer_pool(
alloc, pool_ptr);
}
}
/* Free any IDs left in the pool to the main allocator */
idalloc_drain_pool(alloc, pool_ptr);
}
}

72
bgpd/bgp_addpath.h Normal file
View File

@ -0,0 +1,72 @@
/*
* Addpath TX ID selection, and related utilities
* Copyright (C) 2018 Amazon.com, Inc. or its affiliates
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; see the file COPYING; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef _QUAGGA_BGPD_TX_ADDPATH_H
#define _QUAGGA_BGPD_TX_ADDPATH_H
#include <stdint.h>
#include <zebra.h>
#include "bgpd/bgp_addpath_types.h"
#include "bgpd/bgp_route.h"
#include "bgpd/bgp_table.h"
#include "lib/json.h"
#define BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE 1
void bgp_addpath_init_bgp_data(struct bgp_addpath_bgp_data *d);
int bgp_addpath_is_addpath_used(struct bgp_addpath_bgp_data *d, afi_t afi,
safi_t safi);
void bgp_addpath_free_node_data(struct bgp_addpath_bgp_data *bd,
struct bgp_addpath_node_data *nd,
afi_t afi, safi_t safi);
void bgp_addpath_free_info_data(struct bgp_addpath_info_data *d,
struct bgp_addpath_node_data *nd);
int bgp_addpath_info_has_ids(struct bgp_addpath_info_data *d);
uint32_t bgp_addpath_id_for_peer(struct peer *peer, afi_t afi, safi_t safi,
struct bgp_addpath_info_data *d);
struct bgp_addpath_strategy_names *
bgp_addpath_names(enum bgp_addpath_strat strat);
int bgp_addpath_dmed_required(int strategy);
/*
* Return true if this is a path we should advertise due to a configured
* addpath-tx knob
*/
int bgp_addpath_tx_path(enum bgp_addpath_strat strat,
struct bgp_path_info *pi);
/*
* Change the type of addpath used for a peer.
*/
void bgp_addpath_set_peer_type(struct peer *peer, afi_t afi, safi_t safi,
enum bgp_addpath_strat addpath_type);
void bgp_addpath_update_ids(struct bgp *bgp, struct bgp_node *bn, afi_t afi,
safi_t safi);
void bgp_addpath_type_changed(struct bgp *bgp);
#endif

55
bgpd/bgp_addpath_types.h Normal file
View File

@ -0,0 +1,55 @@
/*
* Addpath TX ID selection, and related utilities
* Copyright (C) 2018 Amazon.com, Inc. or its affiliates
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; see the file COPYING; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef _QUAGGA_BGPD_TX_ADDPATH_DATA_H
#define _QUAGGA_BGPD_TX_ADDPATH_DATA_H
#include "lib/id_alloc.h"
#include <stdint.h>
enum bgp_addpath_strat {
BGP_ADDPATH_ALL = 0,
BGP_ADDPATH_BEST_PER_AS,
BGP_ADDPATH_MAX,
BGP_ADDPATH_NONE,
};
/* TX Addpath structures */
struct bgp_addpath_bgp_data {
unsigned int peercount[AFI_MAX][SAFI_MAX][BGP_ADDPATH_MAX];
unsigned int total_peercount[AFI_MAX][SAFI_MAX];
struct id_alloc *id_allocators[AFI_MAX][SAFI_MAX][BGP_ADDPATH_MAX];
};
struct bgp_addpath_node_data {
struct id_alloc_pool *free_ids[BGP_ADDPATH_MAX];
};
struct bgp_addpath_info_data {
uint32_t addpath_tx_id[BGP_ADDPATH_MAX];
};
struct bgp_addpath_strategy_names {
const char *config_name;
const char *human_name; /* path detail non-json */
const char *human_description; /* non-json peer descriptions */
const char *type_json_name; /* json peer listings */
const char *id_json_name; /* path json output for tx ID# */
};
#endif

View File

@ -47,6 +47,7 @@
#include "bgpd/bgp_aspath.h" #include "bgpd/bgp_aspath.h"
#include "bgpd/bgp_zebra.h" #include "bgpd/bgp_zebra.h"
#include "bgpd/bgp_nexthop.h" #include "bgpd/bgp_nexthop.h"
#include "bgpd/bgp_addpath.h"
/* /*
* Definitions and external declarations. * Definitions and external declarations.
@ -1059,7 +1060,7 @@ static int evpn_es_route_select_install(struct bgp *bgp,
&& old_select->sub_type == BGP_ROUTE_IMPORTED && old_select->sub_type == BGP_ROUTE_IMPORTED
&& !CHECK_FLAG(rn->flags, BGP_NODE_USER_CLEAR) && !CHECK_FLAG(rn->flags, BGP_NODE_USER_CLEAR)
&& !CHECK_FLAG(old_select->flags, BGP_PATH_ATTR_CHANGED) && !CHECK_FLAG(old_select->flags, BGP_PATH_ATTR_CHANGED)
&& !bgp->addpath_tx_used[afi][safi]) { && !bgp_addpath_is_addpath_used(&bgp->tx_addpath, afi, safi)) {
if (bgp_zebra_has_route_changed(rn, old_select)) { if (bgp_zebra_has_route_changed(rn, old_select)) {
ret = evpn_es_install_vtep(bgp, es, ret = evpn_es_install_vtep(bgp, es,
(struct prefix_evpn *)&rn->p, (struct prefix_evpn *)&rn->p,
@ -1142,7 +1143,7 @@ static int evpn_route_select_install(struct bgp *bgp, struct bgpevpn *vpn,
&& old_select->sub_type == BGP_ROUTE_IMPORTED && old_select->sub_type == BGP_ROUTE_IMPORTED
&& !CHECK_FLAG(rn->flags, BGP_NODE_USER_CLEAR) && !CHECK_FLAG(rn->flags, BGP_NODE_USER_CLEAR)
&& !CHECK_FLAG(old_select->flags, BGP_PATH_ATTR_CHANGED) && !CHECK_FLAG(old_select->flags, BGP_PATH_ATTR_CHANGED)
&& !bgp->addpath_tx_used[afi][safi]) { && !bgp_addpath_is_addpath_used(&bgp->tx_addpath, afi, safi)) {
if (bgp_zebra_has_route_changed(rn, old_select)) { if (bgp_zebra_has_route_changed(rn, old_select)) {
if (old_select->attr->sticky) if (old_select->attr->sticky)
SET_FLAG(flags, ZEBRA_MACIP_TYPE_STICKY); SET_FLAG(flags, ZEBRA_MACIP_TYPE_STICKY);

View File

@ -1387,10 +1387,7 @@ void bgp_open_capability(struct stream *s, struct peer *peer)
/* Only advertise addpath TX if a feature that /* Only advertise addpath TX if a feature that
* will use it is * will use it is
* configured */ * configured */
if (CHECK_FLAG(peer->af_flags[afi][safi], if (peer->addpath_type[afi][safi] != BGP_ADDPATH_NONE)
PEER_FLAG_ADDPATH_TX_ALL_PATHS)
|| CHECK_FLAG(peer->af_flags[afi][safi],
PEER_FLAG_ADDPATH_TX_BESTPATH_PER_AS))
adv_addpath_tx = 1; adv_addpath_tx = 1;
} }
} }

View File

@ -65,6 +65,7 @@
#include "bgpd/bgp_nht.h" #include "bgpd/bgp_nht.h"
#include "bgpd/bgp_updgrp.h" #include "bgpd/bgp_updgrp.h"
#include "bgpd/bgp_label.h" #include "bgpd/bgp_label.h"
#include "bgpd/bgp_addpath.h"
#if ENABLE_BGP_VNC #if ENABLE_BGP_VNC
#include "bgpd/rfapi/rfapi_backend.h" #include "bgpd/rfapi/rfapi_backend.h"
@ -247,6 +248,8 @@ static void bgp_path_info_free(struct bgp_path_info *path)
bgp_unlink_nexthop(path); bgp_unlink_nexthop(path);
bgp_path_info_extra_free(&path->extra); bgp_path_info_extra_free(&path->extra);
bgp_path_info_mpath_free(&path->mpath); bgp_path_info_mpath_free(&path->mpath);
bgp_addpath_free_info_data(&path->tx_addpath,
path->net ? &path->net->tx_addpath : NULL);
peer_unlock(path->peer); /* bgp_path_info peer reference */ peer_unlock(path->peer); /* bgp_path_info peer reference */
@ -1472,7 +1475,7 @@ int subgroup_announce_check(struct bgp_node *rn, struct bgp_path_info *pi,
* addpath * addpath
* feature that requires us to advertise it */ * feature that requires us to advertise it */
if (!CHECK_FLAG(pi->flags, BGP_PATH_SELECTED)) { if (!CHECK_FLAG(pi->flags, BGP_PATH_SELECTED)) {
if (!bgp_addpath_tx_path(peer, afi, safi, pi)) { if (!bgp_addpath_tx_path(peer->addpath_type[afi][safi], pi)) {
return 0; return 0;
} }
} }
@ -2078,6 +2081,8 @@ void bgp_best_selection(struct bgp *bgp, struct bgp_node *rn,
bgp_path_info_mpath_aggregate_update(new_select, old_select); bgp_path_info_mpath_aggregate_update(new_select, old_select);
bgp_mp_list_clear(&mp_list); bgp_mp_list_clear(&mp_list);
bgp_addpath_update_ids(bgp, rn, afi, safi);
result->old = old_select; result->old = old_select;
result->new = new_select; result->new = new_select;
@ -2127,7 +2132,7 @@ int subgroup_process_announce_selected(struct update_subgroup *subgrp,
bgp_adj_out_set_subgroup(rn, subgrp, &attr, selected); bgp_adj_out_set_subgroup(rn, subgrp, &attr, selected);
else else
bgp_adj_out_unset_subgroup(rn, subgrp, 1, bgp_adj_out_unset_subgroup(rn, subgrp, 1,
selected->addpath_tx_id); addpath_tx_id);
} }
/* If selected is NULL we must withdraw the path using addpath_tx_id */ /* If selected is NULL we must withdraw the path using addpath_tx_id */
@ -2303,7 +2308,7 @@ static void bgp_process_main_one(struct bgp *bgp, struct bgp_node *rn,
if (old_select && old_select == new_select if (old_select && old_select == new_select
&& !CHECK_FLAG(rn->flags, BGP_NODE_USER_CLEAR) && !CHECK_FLAG(rn->flags, BGP_NODE_USER_CLEAR)
&& !CHECK_FLAG(old_select->flags, BGP_PATH_ATTR_CHANGED) && !CHECK_FLAG(old_select->flags, BGP_PATH_ATTR_CHANGED)
&& !bgp->addpath_tx_used[afi][safi]) { && !bgp_addpath_is_addpath_used(&bgp->tx_addpath, afi, safi)) {
if (bgp_zebra_has_route_changed(rn, old_select)) { if (bgp_zebra_has_route_changed(rn, old_select)) {
#if ENABLE_BGP_VNC #if ENABLE_BGP_VNC
vnc_import_bgp_add_route(bgp, p, old_select); vnc_import_bgp_add_route(bgp, p, old_select);
@ -2776,7 +2781,6 @@ struct bgp_path_info *info_make(int type, int sub_type, unsigned short instance,
new->attr = attr; new->attr = attr;
new->uptime = bgp_clock(); new->uptime = bgp_clock();
new->net = rn; new->net = rn;
new->addpath_tx_id = ++peer->bgp->addpath_tx_id;
return new; return new;
} }
@ -7486,6 +7490,18 @@ static void route_vty_out_advertised_to(struct vty *vty, struct peer *peer,
} }
} }
static void route_vty_out_tx_ids(struct vty *vty,
struct bgp_addpath_info_data *d)
{
int i;
for (i = 0; i < BGP_ADDPATH_MAX; i++) {
vty_out(vty, "TX-%s %u%s", bgp_addpath_names(i)->human_name,
d->addpath_tx_id[i],
i < BGP_ADDPATH_MAX - 1 ? " " : "\n");
}
}
void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct prefix *p, void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct prefix *p,
struct bgp_path_info *path, afi_t afi, safi_t safi, struct bgp_path_info *path, afi_t afi, safi_t safi,
json_object *json_paths) json_object *json_paths)
@ -7517,6 +7533,7 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct prefix *p,
unsigned int first_as; unsigned int first_as;
bool nexthop_self = bool nexthop_self =
CHECK_FLAG(path->flags, BGP_PATH_ANNC_NH_SELF) ? true : false; CHECK_FLAG(path->flags, BGP_PATH_ANNC_NH_SELF) ? true : false;
int i;
if (json_paths) { if (json_paths) {
json_path = json_object_new_object(); json_path = json_object_new_object();
@ -8228,29 +8245,53 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct prefix *p,
} }
/* Line 8 display Addpath IDs */ /* Line 8 display Addpath IDs */
if (path->addpath_rx_id || path->addpath_tx_id) { if (path->addpath_rx_id
|| bgp_addpath_info_has_ids(&path->tx_addpath)) {
if (json_paths) { if (json_paths) {
json_object_int_add(json_path, "addpathRxId", json_object_int_add(json_path, "addpathRxId",
path->addpath_rx_id); path->addpath_rx_id);
json_object_int_add(json_path, "addpathTxId",
path->addpath_tx_id); /* Keep backwards compatibility with the old API
* by putting TX All's ID in the old field
*/
json_object_int_add(
json_path, "addpathTxId",
path->tx_addpath.addpath_tx_id
[BGP_ADDPATH_ALL]);
/* ... but create a specific field for each
* strategy
*/
for (i = 0; i < BGP_ADDPATH_MAX; i++) {
json_object_int_add(
json_path,
bgp_addpath_names(i)
->id_json_name,
path->tx_addpath
.addpath_tx_id[i]);
}
} else { } else {
vty_out(vty, " AddPath ID: RX %u, TX %u\n", vty_out(vty, " AddPath ID: RX %u, ",
path->addpath_rx_id, path->addpath_rx_id);
path->addpath_tx_id);
route_vty_out_tx_ids(vty, &path->tx_addpath);
} }
} }
/* If we used addpath to TX a non-bestpath we need to display /* If we used addpath to TX a non-bestpath we need to display
* "Advertised to" on a path-by-path basis */ * "Advertised to" on a path-by-path basis
if (bgp->addpath_tx_used[afi][safi]) { */
if (bgp_addpath_is_addpath_used(&bgp->tx_addpath, afi, safi)) {
first = 1; first = 1;
for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) { for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
addpath_capable = addpath_capable =
bgp_addpath_encode_tx(peer, afi, safi); bgp_addpath_encode_tx(peer, afi, safi);
has_adj = bgp_adj_out_lookup( has_adj = bgp_adj_out_lookup(
peer, path->net, path->addpath_tx_id); peer, path->net,
bgp_addpath_id_for_peer(
peer, afi, safi,
&path->tx_addpath));
if ((addpath_capable && has_adj) if ((addpath_capable && has_adj)
|| (!addpath_capable && has_adj || (!addpath_capable && has_adj
@ -8958,7 +8999,7 @@ void route_vty_out_detail_header(struct vty *vty, struct bgp *bgp,
* show what peers we advertised the bestpath to. If we are using * show what peers we advertised the bestpath to. If we are using
* addpath * addpath
* though then we must display Advertised to on a path-by-path basis. */ * though then we must display Advertised to on a path-by-path basis. */
if (!bgp->addpath_tx_used[afi][safi]) { if (!bgp_addpath_is_addpath_used(&bgp->tx_addpath, afi, safi)) {
for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) { for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
if (bgp_adj_out_lookup(peer, rn, 0)) { if (bgp_adj_out_lookup(peer, rn, 0)) {
if (json && !json_adv_to) if (json && !json_adv_to)

View File

@ -24,6 +24,7 @@
#include "queue.h" #include "queue.h"
#include "nexthop.h" #include "nexthop.h"
#include "bgp_table.h" #include "bgp_table.h"
#include "bgp_addpath_types.h"
struct bgp_nexthop_cache; struct bgp_nexthop_cache;
struct bgp_route_evpn; struct bgp_route_evpn;
@ -220,7 +221,7 @@ struct bgp_path_info {
/* Addpath identifiers */ /* Addpath identifiers */
uint32_t addpath_rx_id; uint32_t addpath_rx_id;
uint32_t addpath_tx_id; struct bgp_addpath_info_data tx_addpath;
}; };
/* Structure used in BGP path selection */ /* Structure used in BGP path selection */

View File

@ -29,6 +29,7 @@
#include "bgpd/bgpd.h" #include "bgpd/bgpd.h"
#include "bgpd/bgp_table.h" #include "bgpd/bgp_table.h"
#include "bgp_addpath.h"
void bgp_table_lock(struct bgp_table *rt) void bgp_table_lock(struct bgp_table *rt)
{ {
@ -76,7 +77,16 @@ static void bgp_node_destroy(route_table_delegate_t *delegate,
struct route_table *table, struct route_node *node) struct route_table *table, struct route_node *node)
{ {
struct bgp_node *bgp_node; struct bgp_node *bgp_node;
struct bgp_table *rt;
bgp_node = bgp_node_from_rnode(node); bgp_node = bgp_node_from_rnode(node);
rt = table->info;
if (rt->bgp) {
bgp_addpath_free_node_data(&rt->bgp->tx_addpath,
&bgp_node->tx_addpath,
rt->afi, rt->safi);
}
XFREE(MTYPE_BGP_NODE, bgp_node); XFREE(MTYPE_BGP_NODE, bgp_node);
} }

View File

@ -25,6 +25,7 @@
#include "table.h" #include "table.h"
#include "queue.h" #include "queue.h"
#include "linklist.h" #include "linklist.h"
#include "bgpd.h"
struct bgp_table { struct bgp_table {
/* table belongs to this instance */ /* table belongs to this instance */
@ -67,6 +68,8 @@ struct bgp_node {
#define BGP_NODE_USER_CLEAR (1 << 1) #define BGP_NODE_USER_CLEAR (1 << 1)
#define BGP_NODE_LABEL_CHANGED (1 << 2) #define BGP_NODE_LABEL_CHANGED (1 << 2)
#define BGP_NODE_REGISTERED_FOR_LABEL (1 << 3) #define BGP_NODE_REGISTERED_FOR_LABEL (1 << 3)
struct bgp_addpath_node_data tx_addpath;
}; };
/* /*

View File

@ -152,6 +152,7 @@ static void conf_copy(struct peer *dst, struct peer *src, afi_t afi,
dst->af_cap[afi][safi] = src->af_cap[afi][safi]; dst->af_cap[afi][safi] = src->af_cap[afi][safi];
dst->afc_nego[afi][safi] = src->afc_nego[afi][safi]; dst->afc_nego[afi][safi] = src->afc_nego[afi][safi];
dst->orf_plist[afi][safi] = src->orf_plist[afi][safi]; dst->orf_plist[afi][safi] = src->orf_plist[afi][safi];
dst->addpath_type[afi][safi] = src->addpath_type[afi][safi];
dst->local_as = src->local_as; dst->local_as = src->local_as;
dst->change_local_as = src->change_local_as; dst->change_local_as = src->change_local_as;
dst->shared_network = src->shared_network; dst->shared_network = src->shared_network;
@ -322,6 +323,7 @@ static unsigned int updgrp_hash_key_make(void *p)
key = jhash_1word(peer->sort, key); /* EBGP or IBGP */ key = jhash_1word(peer->sort, key); /* EBGP or IBGP */
key = jhash_1word((peer->flags & PEER_UPDGRP_FLAGS), key); key = jhash_1word((peer->flags & PEER_UPDGRP_FLAGS), key);
key = jhash_1word((flags & PEER_UPDGRP_AF_FLAGS), key); key = jhash_1word((flags & PEER_UPDGRP_AF_FLAGS), key);
key = jhash_1word((uint32_t)peer->addpath_type[afi][safi], key);
key = jhash_1word((peer->cap & PEER_UPDGRP_CAP_FLAGS), key); key = jhash_1word((peer->cap & PEER_UPDGRP_CAP_FLAGS), key);
key = jhash_1word((peer->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS), key = jhash_1word((peer->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS),
key); key);
@ -437,6 +439,9 @@ static bool updgrp_hash_cmp(const void *p1, const void *p2)
if ((flags1 & PEER_UPDGRP_AF_FLAGS) != (flags2 & PEER_UPDGRP_AF_FLAGS)) if ((flags1 & PEER_UPDGRP_AF_FLAGS) != (flags2 & PEER_UPDGRP_AF_FLAGS))
return false; return false;
if (pe1->addpath_type[afi][safi] != pe2->addpath_type[afi][safi])
return 0;
if ((pe1->cap & PEER_UPDGRP_CAP_FLAGS) if ((pe1->cap & PEER_UPDGRP_CAP_FLAGS)
!= (pe2->cap & PEER_UPDGRP_CAP_FLAGS)) != (pe2->cap & PEER_UPDGRP_CAP_FLAGS))
return false; return false;
@ -1900,22 +1905,3 @@ int bgp_addpath_encode_tx(struct peer *peer, afi_t afi, safi_t safi)
&& CHECK_FLAG(peer->af_cap[afi][safi], && CHECK_FLAG(peer->af_cap[afi][safi],
PEER_CAP_ADDPATH_AF_RX_RCV)); PEER_CAP_ADDPATH_AF_RX_RCV));
} }
/*
* Return true if this is a path we should advertise due to a
* configured addpath-tx knob
*/
int bgp_addpath_tx_path(struct peer *peer, afi_t afi, safi_t safi,
struct bgp_path_info *pi)
{
if (CHECK_FLAG(peer->af_flags[afi][safi],
PEER_FLAG_ADDPATH_TX_ALL_PATHS))
return 1;
if (CHECK_FLAG(peer->af_flags[afi][safi],
PEER_FLAG_ADDPATH_TX_BESTPATH_PER_AS)
&& CHECK_FLAG(pi->flags, BGP_PATH_DMED_SELECTED))
return 1;
return 0;
}

View File

@ -64,8 +64,7 @@
| PEER_FLAG_REMOVE_PRIVATE_AS_ALL \ | PEER_FLAG_REMOVE_PRIVATE_AS_ALL \
| PEER_FLAG_REMOVE_PRIVATE_AS_REPLACE \ | PEER_FLAG_REMOVE_PRIVATE_AS_REPLACE \
| PEER_FLAG_REMOVE_PRIVATE_AS_ALL_REPLACE \ | PEER_FLAG_REMOVE_PRIVATE_AS_ALL_REPLACE \
| PEER_FLAG_ADDPATH_TX_ALL_PATHS \ | PEER_FLAG_AS_OVERRIDE)
| PEER_FLAG_ADDPATH_TX_BESTPATH_PER_AS | PEER_FLAG_AS_OVERRIDE)
#define PEER_UPDGRP_CAP_FLAGS (PEER_CAP_AS4_RCV) #define PEER_UPDGRP_CAP_FLAGS (PEER_CAP_AS4_RCV)
@ -469,8 +468,6 @@ extern int update_group_clear_update_dbg(struct update_group *updgrp,
extern void update_bgp_group_free(struct bgp *bgp); extern void update_bgp_group_free(struct bgp *bgp);
extern int bgp_addpath_encode_tx(struct peer *peer, afi_t afi, safi_t safi); extern int bgp_addpath_encode_tx(struct peer *peer, afi_t afi, safi_t safi);
extern int bgp_addpath_tx_path(struct peer *peer, afi_t afi, safi_t safi,
struct bgp_path_info *pi);
/* /*
* Inline functions * Inline functions

View File

@ -49,6 +49,7 @@
#include "bgpd/bgp_mplsvpn.h" #include "bgpd/bgp_mplsvpn.h"
#include "bgpd/bgp_updgrp.h" #include "bgpd/bgp_updgrp.h"
#include "bgpd/bgp_advertise.h" #include "bgpd/bgp_advertise.h"
#include "bgpd/bgp_addpath.h"
/******************** /********************
@ -97,6 +98,40 @@ static void adj_free(struct bgp_adj_out *adj)
XFREE(MTYPE_BGP_ADJ_OUT, adj); XFREE(MTYPE_BGP_ADJ_OUT, adj);
} }
static void subgrp_withdraw_stale_addpath(struct updwalk_context *ctx,
struct update_subgroup *subgrp)
{
struct bgp_adj_out *adj, *adj_next;
uint32_t id;
struct bgp_path_info *pi;
afi_t afi = SUBGRP_AFI(subgrp);
safi_t safi = SUBGRP_SAFI(subgrp);
struct peer *peer = SUBGRP_PEER(subgrp);
/* Look through all of the paths we have advertised for this rn and send
* a withdraw for the ones that are no longer present */
for (adj = ctx->rn->adj_out; adj; adj = adj_next) {
adj_next = adj->next;
if (adj->subgroup == subgrp) {
for (pi = ctx->rn->info; pi; pi = pi->next) {
id = bgp_addpath_id_for_peer(peer, afi, safi,
&pi->tx_addpath);
if (id == adj->addpath_tx_id) {
break;
}
}
if (!pi) {
subgroup_process_announce_selected(
subgrp, NULL, ctx->rn,
adj->addpath_tx_id);
}
}
}
}
static int group_announce_route_walkcb(struct update_group *updgrp, void *arg) static int group_announce_route_walkcb(struct update_group *updgrp, void *arg)
{ {
struct updwalk_context *ctx = arg; struct updwalk_context *ctx = arg;
@ -131,31 +166,7 @@ static int group_announce_route_walkcb(struct update_group *updgrp, void *arg)
if (!subgrp->t_coalesce) { if (!subgrp->t_coalesce) {
/* An update-group that uses addpath */ /* An update-group that uses addpath */
if (addpath_capable) { if (addpath_capable) {
/* Look through all of the paths we have subgrp_withdraw_stale_addpath(ctx, subgrp);
* advertised for this rn and
* send a withdraw for the ones that are no
* longer present */
for (adj = ctx->rn->adj_out; adj;
adj = adj_next) {
adj_next = adj->next;
if (adj->subgroup == subgrp) {
for (pi = ctx->rn->info; pi;
pi = pi->next) {
if (pi->addpath_tx_id
== adj->addpath_tx_id) {
break;
}
}
if (!pi) {
subgroup_process_announce_selected(
subgrp, NULL,
ctx->rn,
adj->addpath_tx_id);
}
}
}
for (pi = ctx->rn->info; pi; pi = pi->next) { for (pi = ctx->rn->info; pi; pi = pi->next) {
/* Skip the bestpath for now */ /* Skip the bestpath for now */
@ -164,7 +175,9 @@ static int group_announce_route_walkcb(struct update_group *updgrp, void *arg)
subgroup_process_announce_selected( subgroup_process_announce_selected(
subgrp, pi, ctx->rn, subgrp, pi, ctx->rn,
pi->addpath_tx_id); bgp_addpath_id_for_peer(
peer, afi, safi,
&pi->tx_addpath));
} }
/* Process the bestpath last so the "show [ip] /* Process the bestpath last so the "show [ip]
@ -174,7 +187,9 @@ static int group_announce_route_walkcb(struct update_group *updgrp, void *arg)
if (ctx->pi) if (ctx->pi)
subgroup_process_announce_selected( subgroup_process_announce_selected(
subgrp, ctx->pi, ctx->rn, subgrp, ctx->pi, ctx->rn,
ctx->pi->addpath_tx_id); bgp_addpath_id_for_peer(
peer, afi, safi,
&ctx->pi->tx_addpath));
} }
/* An update-group that does not use addpath */ /* An update-group that does not use addpath */
@ -182,7 +197,9 @@ static int group_announce_route_walkcb(struct update_group *updgrp, void *arg)
if (ctx->pi) { if (ctx->pi) {
subgroup_process_announce_selected( subgroup_process_announce_selected(
subgrp, ctx->pi, ctx->rn, subgrp, ctx->pi, ctx->rn,
ctx->pi->addpath_tx_id); bgp_addpath_id_for_peer(
peer, afi, safi,
&ctx->pi->tx_addpath));
} else { } else {
/* Find the addpath_tx_id of the path we /* Find the addpath_tx_id of the path we
* had advertised and * had advertised and
@ -433,15 +450,27 @@ void bgp_adj_out_set_subgroup(struct bgp_node *rn,
{ {
struct bgp_adj_out *adj = NULL; struct bgp_adj_out *adj = NULL;
struct bgp_advertise *adv; struct bgp_advertise *adv;
struct peer *peer;
afi_t afi;
safi_t safi;
peer = SUBGRP_PEER(subgrp);
afi = SUBGRP_AFI(subgrp);
safi = SUBGRP_SAFI(subgrp);
if (DISABLE_BGP_ANNOUNCE) if (DISABLE_BGP_ANNOUNCE)
return; return;
/* Look for adjacency information. */ /* Look for adjacency information. */
adj = adj_lookup(rn, subgrp, path->addpath_tx_id); adj = adj_lookup(
rn, subgrp,
bgp_addpath_id_for_peer(peer, afi, safi, &path->tx_addpath));
if (!adj) { if (!adj) {
adj = bgp_adj_out_alloc(subgrp, rn, path->addpath_tx_id); adj = bgp_adj_out_alloc(
subgrp, rn,
bgp_addpath_id_for_peer(peer, afi, safi,
&path->tx_addpath));
if (!adj) if (!adj)
return; return;
} }
@ -597,7 +626,9 @@ void subgroup_announce_table(struct update_subgroup *subgrp,
if (CHECK_FLAG(ri->flags, BGP_PATH_SELECTED) if (CHECK_FLAG(ri->flags, BGP_PATH_SELECTED)
|| (addpath_capable || (addpath_capable
&& bgp_addpath_tx_path(peer, afi, safi, ri))) { && bgp_addpath_tx_path(
peer->addpath_type[afi][safi],
ri))) {
if (subgroup_announce_check(rn, ri, subgrp, if (subgroup_announce_check(rn, ri, subgrp,
&rn->p, &attr)) &rn->p, &attr))
bgp_adj_out_set_subgroup(rn, subgrp, bgp_adj_out_set_subgroup(rn, subgrp,
@ -605,7 +636,9 @@ void subgroup_announce_table(struct update_subgroup *subgrp,
else else
bgp_adj_out_unset_subgroup( bgp_adj_out_unset_subgroup(
rn, subgrp, 1, rn, subgrp, 1,
ri->addpath_tx_id); bgp_addpath_id_for_peer(
peer, afi, safi,
&ri->tx_addpath));
} }
/* /*

View File

@ -56,6 +56,7 @@
#include "bgpd/bgp_nht.h" #include "bgpd/bgp_nht.h"
#include "bgpd/bgp_mplsvpn.h" #include "bgpd/bgp_mplsvpn.h"
#include "bgpd/bgp_label.h" #include "bgpd/bgp_label.h"
#include "bgpd/bgp_addpath.h"
/******************** /********************
* PRIVATE FUNCTIONS * PRIVATE FUNCTIONS

View File

@ -61,6 +61,7 @@
#include "bgpd/bgp_bfd.h" #include "bgpd/bgp_bfd.h"
#include "bgpd/bgp_io.h" #include "bgpd/bgp_io.h"
#include "bgpd/bgp_evpn.h" #include "bgpd/bgp_evpn.h"
#include "bgpd/bgp_addpath.h"
static struct peer_group *listen_range_exists(struct bgp *bgp, static struct peer_group *listen_range_exists(struct bgp *bgp,
struct prefix *range, int exact); struct prefix *range, int exact);
@ -1888,9 +1889,8 @@ DEFUN (no_bgp_deterministic_med,
for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) { for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
FOREACH_AFI_SAFI (afi, safi) FOREACH_AFI_SAFI (afi, safi)
if (CHECK_FLAG( if (bgp_addpath_dmed_required(
peer->af_flags[afi][safi], peer->addpath_type[afi][safi])) {
PEER_FLAG_ADDPATH_TX_BESTPATH_PER_AS)) {
bestpath_per_as_used = 1; bestpath_per_as_used = 1;
break; break;
} }
@ -6234,9 +6234,9 @@ DEFUN (neighbor_addpath_tx_all_paths,
if (!peer) if (!peer)
return CMD_WARNING_CONFIG_FAILED; return CMD_WARNING_CONFIG_FAILED;
return peer_af_flag_set_vty(vty, argv[idx_peer]->arg, bgp_node_afi(vty), bgp_addpath_set_peer_type(peer, bgp_node_afi(vty), bgp_node_safi(vty),
bgp_node_safi(vty), BGP_ADDPATH_ALL);
PEER_FLAG_ADDPATH_TX_ALL_PATHS); return CMD_SUCCESS;
} }
ALIAS_HIDDEN(neighbor_addpath_tx_all_paths, ALIAS_HIDDEN(neighbor_addpath_tx_all_paths,
@ -6254,9 +6254,23 @@ DEFUN (no_neighbor_addpath_tx_all_paths,
"Use addpath to advertise all paths to a neighbor\n") "Use addpath to advertise all paths to a neighbor\n")
{ {
int idx_peer = 2; int idx_peer = 2;
return peer_af_flag_unset_vty(vty, argv[idx_peer]->arg, struct peer *peer;
bgp_node_afi(vty), bgp_node_safi(vty),
PEER_FLAG_ADDPATH_TX_ALL_PATHS); peer = peer_and_group_lookup_vty(vty, argv[idx_peer]->arg);
if (!peer)
return CMD_WARNING_CONFIG_FAILED;
if (peer->addpath_type[bgp_node_afi(vty)][bgp_node_safi(vty)]
!= BGP_ADDPATH_ALL) {
vty_out(vty,
"%% Peer not currently configured to transmit all paths.");
return CMD_WARNING_CONFIG_FAILED;
}
bgp_addpath_set_peer_type(peer, bgp_node_afi(vty), bgp_node_safi(vty),
BGP_ADDPATH_NONE);
return CMD_SUCCESS;
} }
ALIAS_HIDDEN(no_neighbor_addpath_tx_all_paths, ALIAS_HIDDEN(no_neighbor_addpath_tx_all_paths,
@ -6279,9 +6293,10 @@ DEFUN (neighbor_addpath_tx_bestpath_per_as,
if (!peer) if (!peer)
return CMD_WARNING_CONFIG_FAILED; return CMD_WARNING_CONFIG_FAILED;
return peer_af_flag_set_vty(vty, argv[idx_peer]->arg, bgp_node_afi(vty), bgp_addpath_set_peer_type(peer, bgp_node_afi(vty), bgp_node_safi(vty),
bgp_node_safi(vty), BGP_ADDPATH_BEST_PER_AS);
PEER_FLAG_ADDPATH_TX_BESTPATH_PER_AS);
return CMD_SUCCESS;
} }
ALIAS_HIDDEN(neighbor_addpath_tx_bestpath_per_as, ALIAS_HIDDEN(neighbor_addpath_tx_bestpath_per_as,
@ -6299,9 +6314,23 @@ DEFUN (no_neighbor_addpath_tx_bestpath_per_as,
"Use addpath to advertise the bestpath per each neighboring AS\n") "Use addpath to advertise the bestpath per each neighboring AS\n")
{ {
int idx_peer = 2; int idx_peer = 2;
return peer_af_flag_unset_vty(vty, argv[idx_peer]->arg, struct peer *peer;
bgp_node_afi(vty), bgp_node_safi(vty),
PEER_FLAG_ADDPATH_TX_BESTPATH_PER_AS); peer = peer_and_group_lookup_vty(vty, argv[idx_peer]->arg);
if (!peer)
return CMD_WARNING_CONFIG_FAILED;
if (peer->addpath_type[bgp_node_afi(vty)][bgp_node_safi(vty)]
!= BGP_ADDPATH_BEST_PER_AS) {
vty_out(vty,
"%% Peer not currently configured to transmit all best path per as.");
return CMD_WARNING_CONFIG_FAILED;
}
bgp_addpath_set_peer_type(peer, bgp_node_afi(vty), bgp_node_safi(vty),
BGP_ADDPATH_NONE);
return CMD_SUCCESS;
} }
ALIAS_HIDDEN(no_neighbor_addpath_tx_bestpath_per_as, ALIAS_HIDDEN(no_neighbor_addpath_tx_bestpath_per_as,
@ -8656,15 +8685,11 @@ static void bgp_show_peer_afi(struct vty *vty, struct peer *p, afi_t afi,
json_addr, json_addr,
"privateAsNumsRemovedInUpdatesToNbr"); "privateAsNumsRemovedInUpdatesToNbr");
if (CHECK_FLAG(p->af_flags[afi][safi], if (p->addpath_type[afi][safi] != BGP_ADDPATH_NONE)
PEER_FLAG_ADDPATH_TX_ALL_PATHS)) json_object_boolean_true_add(
json_object_boolean_true_add(json_addr, json_addr,
"addpathTxAllPaths"); bgp_addpath_names(p->addpath_type[afi][safi])
->type_json_name);
if (CHECK_FLAG(p->af_flags[afi][safi],
PEER_FLAG_ADDPATH_TX_BESTPATH_PER_AS))
json_object_boolean_true_add(json_addr,
"addpathTxBestpathPerAS");
if (CHECK_FLAG(p->af_flags[afi][safi], PEER_FLAG_AS_OVERRIDE)) if (CHECK_FLAG(p->af_flags[afi][safi], PEER_FLAG_AS_OVERRIDE))
json_object_string_add(json_addr, json_object_string_add(json_addr,
@ -8930,14 +8955,10 @@ static void bgp_show_peer_afi(struct vty *vty, struct peer *p, afi_t afi,
vty_out(vty, vty_out(vty,
" Private AS numbers removed in updates to this neighbor\n"); " Private AS numbers removed in updates to this neighbor\n");
if (CHECK_FLAG(p->af_flags[afi][safi], if (p->addpath_type[afi][safi] != BGP_ADDPATH_NONE)
PEER_FLAG_ADDPATH_TX_ALL_PATHS)) vty_out(vty, " %s\n",
vty_out(vty, " Advertise all paths via addpath\n"); bgp_addpath_names(p->addpath_type[afi][safi])
->human_description);
if (CHECK_FLAG(p->af_flags[afi][safi],
PEER_FLAG_ADDPATH_TX_BESTPATH_PER_AS))
vty_out(vty,
" Advertise bestpath per AS via addpath\n");
if (CHECK_FLAG(p->af_flags[afi][safi], PEER_FLAG_AS_OVERRIDE)) if (CHECK_FLAG(p->af_flags[afi][safi], PEER_FLAG_AS_OVERRIDE))
vty_out(vty, vty_out(vty,

View File

@ -85,6 +85,7 @@
#include "bgpd/bgp_flowspec.h" #include "bgpd/bgp_flowspec.h"
#include "bgpd/bgp_labelpool.h" #include "bgpd/bgp_labelpool.h"
#include "bgpd/bgp_pbr.h" #include "bgpd/bgp_pbr.h"
#include "bgpd/bgp_addpath.h"
DEFINE_MTYPE_STATIC(BGPD, PEER_TX_SHUTDOWN_MSG, "Peer shutdown message (TX)"); DEFINE_MTYPE_STATIC(BGPD, PEER_TX_SHUTDOWN_MSG, "Peer shutdown message (TX)");
DEFINE_QOBJ_TYPE(bgp_master) DEFINE_QOBJ_TYPE(bgp_master)
@ -883,6 +884,31 @@ static bool peergroup_filter_check(struct peer *peer, afi_t afi, safi_t safi,
} }
} }
/* Return true if the addpath type is set for peer and different from
* peer-group.
*/
static int peergroup_af_addpath_check(struct peer *peer, afi_t afi, safi_t safi)
{
enum bgp_addpath_strat type, g_type;
type = peer->addpath_type[afi][safi];
if (type != BGP_ADDPATH_NONE) {
if (peer_group_active(peer)) {
g_type = peer->group->conf->addpath_type[afi][safi];
if (type != g_type)
return 1;
else
return 0;
}
return 1;
}
return 0;
}
/* Check peer's AS number and determines if this peer is IBGP or EBGP */ /* Check peer's AS number and determines if this peer is IBGP or EBGP */
static inline bgp_peer_sort_t peer_calc_sort(struct peer *peer) static inline bgp_peer_sort_t peer_calc_sort(struct peer *peer)
{ {
@ -960,6 +986,9 @@ bgp_peer_sort_t peer_sort(struct peer *peer)
static void peer_free(struct peer *peer) static void peer_free(struct peer *peer)
{ {
afi_t afi;
safi_t safi;
assert(peer->status == Deleted); assert(peer->status == Deleted);
QOBJ_UNREG(peer); QOBJ_UNREG(peer);
@ -1032,6 +1061,13 @@ static void peer_free(struct peer *peer)
bfd_info_free(&(peer->bfd_info)); bfd_info_free(&(peer->bfd_info));
for (afi = AFI_IP; afi < AFI_MAX; afi++) {
for (safi = SAFI_UNICAST; safi < SAFI_MAX; safi++) {
bgp_addpath_set_peer_type(peer, afi, safi,
BGP_ADDPATH_NONE);
}
}
bgp_unlock(peer->bgp); bgp_unlock(peer->bgp);
memset(peer, 0, sizeof(struct peer)); memset(peer, 0, sizeof(struct peer));
@ -1118,6 +1154,7 @@ struct peer *peer_new(struct bgp *bgp)
SET_FLAG(peer->af_flags_invert[afi][safi], SET_FLAG(peer->af_flags_invert[afi][safi],
PEER_FLAG_SEND_LARGE_COMMUNITY); PEER_FLAG_SEND_LARGE_COMMUNITY);
} }
peer->addpath_type[afi][safi] = BGP_ADDPATH_NONE;
} }
/* set nexthop-unchanged for l2vpn evpn by default */ /* set nexthop-unchanged for l2vpn evpn by default */
@ -1210,6 +1247,8 @@ void peer_xfer_config(struct peer *peer_dst, struct peer *peer_src)
peer_dst->allowas_in[afi][safi] = peer_dst->allowas_in[afi][safi] =
peer_src->allowas_in[afi][safi]; peer_src->allowas_in[afi][safi];
peer_dst->weight[afi][safi] = peer_src->weight[afi][safi]; peer_dst->weight[afi][safi] = peer_src->weight[afi][safi];
peer_dst->addpath_type[afi][safi] =
peer_src->addpath_type[afi][safi];
} }
for (afidx = BGP_AF_START; afidx < BGP_AF_MAX; afidx++) { for (afidx = BGP_AF_START; afidx < BGP_AF_MAX; afidx++) {
@ -1808,6 +1847,11 @@ static void peer_group2peer_config_copy_af(struct peer_group *group,
MTYPE_BGP_FILTER_NAME); MTYPE_BGP_FILTER_NAME);
PEER_ATTR_INHERIT(peer, group, filter[afi][safi].usmap.map); PEER_ATTR_INHERIT(peer, group, filter[afi][safi].usmap.map);
} }
if (peer->addpath_type[afi][safi] == BGP_ADDPATH_NONE) {
peer->addpath_type[afi][safi] = conf->addpath_type[afi][safi];
bgp_addpath_type_changed(conf->bgp);
}
} }
static int peer_activate_af(struct peer *peer, afi_t afi, safi_t safi) static int peer_activate_af(struct peer *peer, afi_t afi, safi_t safi)
@ -2836,7 +2880,7 @@ static struct bgp *bgp_create(as_t *as, const char *name,
#if DFLT_BGP_DETERMINISTIC_MED #if DFLT_BGP_DETERMINISTIC_MED
bgp_flag_set(bgp, BGP_FLAG_DETERMINISTIC_MED); bgp_flag_set(bgp, BGP_FLAG_DETERMINISTIC_MED);
#endif #endif
bgp->addpath_tx_id = BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE; bgp_addpath_init_bgp_data(&bgp->tx_addpath);
bgp->as = *as; bgp->as = *as;
@ -3627,15 +3671,7 @@ int peer_active_nego(struct peer *peer)
return 0; return 0;
} }
/* peer_flag_change_type. */ void peer_change_action(struct peer *peer, afi_t afi, safi_t safi,
enum peer_change_type {
peer_change_none,
peer_change_reset,
peer_change_reset_in,
peer_change_reset_out,
};
static void peer_change_action(struct peer *peer, afi_t afi, safi_t safi,
enum peer_change_type type) enum peer_change_type type)
{ {
if (CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) if (CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP))
@ -3731,8 +3767,6 @@ static const struct peer_flag_action peer_af_flag_action_list[] = {
{PEER_FLAG_REMOVE_PRIVATE_AS_REPLACE, 1, peer_change_reset_out}, {PEER_FLAG_REMOVE_PRIVATE_AS_REPLACE, 1, peer_change_reset_out},
{PEER_FLAG_AS_OVERRIDE, 1, peer_change_reset_out}, {PEER_FLAG_AS_OVERRIDE, 1, peer_change_reset_out},
{PEER_FLAG_REMOVE_PRIVATE_AS_ALL_REPLACE, 1, peer_change_reset_out}, {PEER_FLAG_REMOVE_PRIVATE_AS_ALL_REPLACE, 1, peer_change_reset_out},
{PEER_FLAG_ADDPATH_TX_ALL_PATHS, 1, peer_change_reset},
{PEER_FLAG_ADDPATH_TX_BESTPATH_PER_AS, 1, peer_change_reset},
{PEER_FLAG_WEIGHT, 0, peer_change_reset_in}, {PEER_FLAG_WEIGHT, 0, peer_change_reset_in},
{0, 0, 0}}; {0, 0, 0}};
@ -3957,9 +3991,7 @@ static int peer_af_flag_modify(struct peer *peer, afi_t afi, safi_t safi,
{ {
int found; int found;
int size; int size;
int addpath_tx_used;
bool invert, member_invert; bool invert, member_invert;
struct bgp *bgp;
struct peer *member; struct peer *member;
struct listnode *node, *nnode; struct listnode *node, *nnode;
struct peer_flag_action action; struct peer_flag_action action;
@ -4122,45 +4154,6 @@ static int peer_af_flag_modify(struct peer *peer, afi_t afi, safi_t safi,
} }
} }
/* Track if addpath TX is in use */
if (flag & (PEER_FLAG_ADDPATH_TX_ALL_PATHS
| PEER_FLAG_ADDPATH_TX_BESTPATH_PER_AS)) {
bgp = peer->bgp;
addpath_tx_used = 0;
if (set) {
addpath_tx_used = 1;
if (flag & PEER_FLAG_ADDPATH_TX_BESTPATH_PER_AS) {
if (!bgp_flag_check(
bgp, BGP_FLAG_DETERMINISTIC_MED)) {
zlog_info(
"%s: enabling bgp deterministic-med, this is required"
" for addpath-tx-bestpath-per-AS",
peer->host);
bgp_flag_set(
bgp,
BGP_FLAG_DETERMINISTIC_MED);
bgp_recalculate_all_bestpaths(bgp);
}
}
} else {
for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode,
member)) {
if (CHECK_FLAG(member->af_flags[afi][safi],
PEER_FLAG_ADDPATH_TX_ALL_PATHS)
|| CHECK_FLAG(
member->af_flags[afi][safi],
PEER_FLAG_ADDPATH_TX_BESTPATH_PER_AS)) {
addpath_tx_used = 1;
break;
}
}
}
bgp->addpath_tx_used[afi][safi] = addpath_tx_used;
}
return 0; return 0;
} }
@ -7065,15 +7058,21 @@ static void bgp_config_write_peer_af(struct vty *vty, struct bgp *bgp,
} }
/* addpath TX knobs */ /* addpath TX knobs */
if (peergroup_af_flag_check(peer, afi, safi, if (peergroup_af_addpath_check(peer, afi, safi)) {
PEER_FLAG_ADDPATH_TX_ALL_PATHS)) { switch (peer->addpath_type[afi][safi]) {
vty_out(vty, " neighbor %s addpath-tx-all-paths\n", addr); case BGP_ADDPATH_ALL:
} vty_out(vty, " neighbor %s addpath-tx-all-paths\n",
addr);
if (peergroup_af_flag_check(peer, afi, safi, break;
PEER_FLAG_ADDPATH_TX_BESTPATH_PER_AS)) { case BGP_ADDPATH_BEST_PER_AS:
vty_out(vty, " neighbor %s addpath-tx-bestpath-per-AS\n", vty_out(vty,
addr); " neighbor %s addpath-tx-bestpath-per-AS\n",
addr);
break;
case BGP_ADDPATH_MAX:
case BGP_ADDPATH_NONE:
break;
}
} }
/* ORF capability. */ /* ORF capability. */

View File

@ -39,6 +39,7 @@
#include "bitfield.h" #include "bitfield.h"
#include "vxlan.h" #include "vxlan.h"
#include "bgp_labelpool.h" #include "bgp_labelpool.h"
#include "bgp_addpath_types.h"
#define BGP_MAX_HOSTNAME 64 /* Linux max, is larger than most other sys */ #define BGP_MAX_HOSTNAME 64 /* Linux max, is larger than most other sys */
#define BGP_PEER_MAX_HASH_SIZE 16384 #define BGP_PEER_MAX_HASH_SIZE 16384
@ -462,8 +463,7 @@ struct bgp {
/* Auto-shutdown new peers */ /* Auto-shutdown new peers */
bool autoshutdown; bool autoshutdown;
uint32_t addpath_tx_id; struct bgp_addpath_bgp_data tx_addpath;
int addpath_tx_used[AFI_MAX][SAFI_MAX];
#if ENABLE_BGP_VNC #if ENABLE_BGP_VNC
struct rfapi_cfg *rfapi_cfg; struct rfapi_cfg *rfapi_cfg;
@ -938,12 +938,12 @@ struct peer {
#define PEER_FLAG_REMOVE_PRIVATE_AS_REPLACE (1 << 19) /* remove-private-as replace-as */ #define PEER_FLAG_REMOVE_PRIVATE_AS_REPLACE (1 << 19) /* remove-private-as replace-as */
#define PEER_FLAG_AS_OVERRIDE (1 << 20) /* as-override */ #define PEER_FLAG_AS_OVERRIDE (1 << 20) /* as-override */
#define PEER_FLAG_REMOVE_PRIVATE_AS_ALL_REPLACE (1 << 21) /* remove-private-as all replace-as */ #define PEER_FLAG_REMOVE_PRIVATE_AS_ALL_REPLACE (1 << 21) /* remove-private-as all replace-as */
#define PEER_FLAG_ADDPATH_TX_ALL_PATHS (1 << 22) /* addpath-tx-all-paths */
#define PEER_FLAG_ADDPATH_TX_BESTPATH_PER_AS (1 << 23) /* addpath-tx-bestpath-per-AS */
#define PEER_FLAG_WEIGHT (1 << 24) /* weight */ #define PEER_FLAG_WEIGHT (1 << 24) /* weight */
#define PEER_FLAG_ALLOWAS_IN_ORIGIN (1 << 25) /* allowas-in origin */ #define PEER_FLAG_ALLOWAS_IN_ORIGIN (1 << 25) /* allowas-in origin */
#define PEER_FLAG_SEND_LARGE_COMMUNITY (1 << 26) /* Send large Communities */ #define PEER_FLAG_SEND_LARGE_COMMUNITY (1 << 26) /* Send large Communities */
enum bgp_addpath_strat addpath_type[AFI_MAX][SAFI_MAX];
/* MD5 password */ /* MD5 password */
char *password; char *password;
@ -1466,6 +1466,14 @@ typedef enum {
BGP_POLICY_DISTRIBUTE_LIST, BGP_POLICY_DISTRIBUTE_LIST,
} bgp_policy_type_e; } bgp_policy_type_e;
/* peer_flag_change_type. */
enum peer_change_type {
peer_change_none,
peer_change_reset,
peer_change_reset_in,
peer_change_reset_out,
};
extern struct bgp_master *bm; extern struct bgp_master *bm;
extern unsigned int multipath_num; extern unsigned int multipath_num;
@ -1597,6 +1605,8 @@ extern int peer_af_flag_unset(struct peer *, afi_t, safi_t, uint32_t);
extern int peer_af_flag_check(struct peer *, afi_t, safi_t, uint32_t); extern int peer_af_flag_check(struct peer *, afi_t, safi_t, uint32_t);
extern void peer_af_flag_inherit(struct peer *peer, afi_t afi, safi_t safi, extern void peer_af_flag_inherit(struct peer *peer, afi_t afi, safi_t safi,
uint32_t flag); uint32_t flag);
extern void peer_change_action(struct peer *peer, afi_t afi, safi_t safi,
enum peer_change_type type);
extern int peer_ebgp_multihop_set(struct peer *, int); extern int peer_ebgp_multihop_set(struct peer *, int);
extern int peer_ebgp_multihop_unset(struct peer *); extern int peer_ebgp_multihop_unset(struct peer *);

View File

@ -46,6 +46,7 @@ man8 += $(MANBUILD)/bgpd.8
endif endif
bgpd_libbgp_a_SOURCES = \ bgpd_libbgp_a_SOURCES = \
bgpd/bgp_addpath.c \
bgpd/bgp_advertise.c \ bgpd/bgp_advertise.c \
bgpd/bgp_aspath.c \ bgpd/bgp_aspath.c \
bgpd/bgp_attr.c \ bgpd/bgp_attr.c \
@ -115,6 +116,8 @@ bgpd_libbgp_a_SOURCES += \
endif endif
noinst_HEADERS += \ noinst_HEADERS += \
bgpd/bgp_addpath.h \
bgpd/bgp_addpath_types.h \
bgpd/bgp_advertise.h \ bgpd/bgp_advertise.h \
bgpd/bgp_aspath.h \ bgpd/bgp_aspath.h \
bgpd/bgp_attr.h \ bgpd/bgp_attr.h \

406
lib/id_alloc.c Normal file
View File

@ -0,0 +1,406 @@
/*
* FRR ID Number Allocator
* Copyright (C) 2018 Amazon.com, Inc. or its affiliates
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; see the file COPYING; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "id_alloc.h"
#include "log.h"
#include "lib_errors.h"
#include "memory.h"
#include <inttypes.h>
DEFINE_MTYPE_STATIC(LIB, IDALLOC_ALLOCATOR, "ID Number Allocator")
DEFINE_MTYPE_STATIC(LIB, IDALLOC_ALLOCATOR_NAME, "ID Number Allocator Name")
DEFINE_MTYPE_STATIC(LIB, IDALLOC_DIRECTORY, "ID Number Allocator Directory")
DEFINE_MTYPE_STATIC(LIB, IDALLOC_SUBDIRECTORY,
"ID Number Allocator Subdirectory")
DEFINE_MTYPE_STATIC(LIB, IDALLOC_PAGE, "ID Number Allocator Page")
DEFINE_MTYPE_STATIC(LIB, IDALLOC_POOL, "ID Number temporary holding pool entry")
#if UINT_MAX >= UINT32_MAX
#define FFS32(x) ffs(x)
#else
/* ints less than 32 bits? Yikes. */
#define FFS32(x) ffsl(x)
#endif
#define DIR_MASK ((1<<IDALLOC_DIR_BITS)-1)
#define SUBDIR_MASK ((1<<IDALLOC_SUBDIR_BITS)-1)
#define PAGE_MASK ((1<<IDALLOC_PAGE_BITS)-1)
#define WORD_MASK ((1<<IDALLOC_WORD_BITS)-1)
#define OFFSET_MASK ((1<<IDALLOC_OFFSET_BITS)-1)
#define DIR_SHIFT (IDALLOC_OFFSET_BITS + IDALLOC_WORD_BITS + \
IDALLOC_PAGE_BITS + IDALLOC_SUBDIR_BITS)
#define SUBDIR_SHIFT (IDALLOC_OFFSET_BITS + IDALLOC_WORD_BITS + \
IDALLOC_PAGE_BITS)
#define PAGE_SHIFT (IDALLOC_OFFSET_BITS + IDALLOC_WORD_BITS)
#define WORD_SHIFT (IDALLOC_OFFSET_BITS)
#define OFFSET_SHIFT (0)
#define ID_DIR(id) ((id >> DIR_SHIFT) & DIR_MASK)
#define ID_SUBDIR(id) ((id >> SUBDIR_SHIFT) & SUBDIR_MASK)
#define ID_PAGE(id) ((id >> PAGE_SHIFT) & PAGE_MASK)
#define ID_WORD(id) ((id >> WORD_SHIFT) & WORD_MASK)
#define ID_OFFSET(id) ((id >> OFFSET_SHIFT) & OFFSET_MASK)
/*
* Find the page that an ID number belongs to in an allocator.
* Optionally create the page if it doesn't exist.
*/
static struct id_alloc_page *find_or_create_page(struct id_alloc *alloc,
uint32_t id, int create)
{
struct id_alloc_dir *dir = NULL;
struct id_alloc_subdir *subdir = NULL;
struct id_alloc_page *page = NULL;
dir = alloc->sublevels[ID_DIR(id)];
if (dir == NULL) {
if (create) {
dir = XCALLOC(MTYPE_IDALLOC_DIRECTORY, sizeof(*dir));
alloc->sublevels[ID_DIR(id)] = dir;
} else {
return NULL;
}
}
subdir = dir->sublevels[ID_SUBDIR(id)];
if (subdir == NULL) {
if (create) {
subdir = XCALLOC(MTYPE_IDALLOC_SUBDIRECTORY,
sizeof(*subdir));
dir->sublevels[ID_SUBDIR(id)] = subdir;
} else {
return NULL;
}
}
page = subdir->sublevels[ID_PAGE(id)];
if (page == NULL && create) {
page = XCALLOC(MTYPE_IDALLOC_PAGE, sizeof(*page));
page->base_value = id;
subdir->sublevels[ID_PAGE(id)] = page;
alloc->capacity += 1 << PAGE_SHIFT;
page->next_has_free = alloc->has_free;
alloc->has_free = page;
} else if (page != NULL && create) {
flog_err(
EC_LIB_ID_CONSISTENCY,
"ID Allocator %s attempt to re-create page at %" PRIu32,
alloc->name, id);
}
return page;
}
/*
* Return an ID number back to the allocator.
* While this ID can be re-assigned through idalloc_allocate, the underlying
* memory will not be freed. If this is the first free ID in the page, the page
* will be added to the allocator's list of pages with free IDs.
*/
void idalloc_free(struct id_alloc *alloc, uint32_t id)
{
struct id_alloc_page *page = NULL;
int word, offset;
uint32_t old_word, old_word_mask;
page = find_or_create_page(alloc, id, 0);
if (!page) {
flog_err(EC_LIB_ID_CONSISTENCY,
"ID Allocator %s cannot free #%" PRIu32
". ID Block does not exist.",
alloc->name, id);
return;
}
word = ID_WORD(id);
offset = ID_OFFSET(id);
if ((page->allocated_mask[word] & (1 << offset)) == 0) {
flog_err(EC_LIB_ID_CONSISTENCY,
"ID Allocator %s cannot free #%" PRIu32
". ID was not allocated at the time of free.",
alloc->name, id);
return;
}
old_word = page->allocated_mask[word];
page->allocated_mask[word] &= ~(((uint32_t)1) << offset);
alloc->allocated -= 1;
if (old_word == UINT32_MAX) {
/* first bit in this block of 32 to be freed.*/
old_word_mask = page->full_word_mask;
page->full_word_mask &= ~(((uint32_t)1) << word);
if (old_word_mask == UINT32_MAX) {
/* first bit in page freed, add this to the allocator's
* list of pages with free space
*/
page->next_has_free = alloc->has_free;
alloc->has_free = page;
}
}
}
/*
* Add a allocation page to the end of the allocator's current range.
* Returns null if the allocator has had all possible pages allocated already.
*/
static struct id_alloc_page *create_next_page(struct id_alloc *alloc)
{
if (alloc->capacity == 0 && alloc->sublevels[0])
return NULL; /* All IDs allocated and the capacity looped. */
return find_or_create_page(alloc, alloc->capacity, 1);
}
/*
* Marks an ID within an allocator page as in use.
* If the ID was the last free ID in the page, the page is removed from the
* allocator's list of free IDs. In the typical allocation case, this page is
* the first page in the list, and removing the page is fast. If instead an ID
* is being reserved by number, this may end up scanning the whole single linked
* list of pages in order to remove it.
*/
static void reserve_bit(struct id_alloc *alloc, struct id_alloc_page *page,
int word, int offset)
{
struct id_alloc_page *itr;
page->allocated_mask[word] |= ((uint32_t)1) << offset;
alloc->allocated += 1;
if (page->allocated_mask[word] == UINT32_MAX) {
page->full_word_mask |= ((uint32_t)1) << word;
if (page->full_word_mask == UINT32_MAX) {
if (alloc->has_free == page) {
/* allocate always pulls from alloc->has_free */
alloc->has_free = page->next_has_free;
} else {
/* reserve could pull from any page with free
* bits
*/
itr = alloc->has_free;
while (itr) {
if (itr->next_has_free == page) {
itr->next_has_free =
page->next_has_free;
return;
}
itr = itr->next_has_free;
}
}
}
}
}
/*
* Reserve an ID number from the allocator. Returns IDALLOC_INVALID (0) if the
* allocator has no more IDs available.
*/
uint32_t idalloc_allocate(struct id_alloc *alloc)
{
struct id_alloc_page *page;
int word, offset;
uint32_t return_value;
if (alloc->has_free == NULL)
create_next_page(alloc);
if (alloc->has_free == NULL) {
flog_err(EC_LIB_ID_EXHAUST,
"ID Allocator %s has run out of IDs.", alloc->name);
return IDALLOC_INVALID;
}
page = alloc->has_free;
word = FFS32(~(page->full_word_mask)) - 1;
if (word < 0 || word >= 32) {
flog_err(EC_LIB_ID_CONSISTENCY,
"ID Allocator %s internal error. Page starting at %d is inconsistent.",
alloc->name, page->base_value);
return IDALLOC_INVALID;
}
offset = FFS32(~(page->allocated_mask[word])) - 1;
if (offset < 0 || offset >= 32) {
flog_err(EC_LIB_ID_CONSISTENCY,
"ID Allocator %s internal error. Page starting at %d is inconsistent on word %d",
alloc->name, page->base_value, word);
return IDALLOC_INVALID;
}
return_value = page->base_value + word * 32 + offset;
reserve_bit(alloc, page, word, offset);
return return_value;
}
/*
* Tries to allocate a specific ID from the allocator. Returns IDALLOC_INVALID
* when the ID being "reserved" has allready been assigned/reserved. This should
* only be done with low numbered IDs, as the allocator needs to reserve bit-map
* pages in order
*/
uint32_t idalloc_reserve(struct id_alloc *alloc, uint32_t id)
{
struct id_alloc_page *page;
int word, offset;
while (alloc->capacity <= id)
create_next_page(alloc);
word = ID_WORD(id);
offset = ID_OFFSET(id);
page = find_or_create_page(alloc, id, 0);
/* page can't be null because the loop above ensured it was created. */
if (page->allocated_mask[word] & (((uint32_t)1) << offset)) {
flog_err(EC_LIB_ID_CONSISTENCY,
"ID Allocator %s could not reserve %" PRIu32
" because it is already allocated.",
alloc->name, id);
return IDALLOC_INVALID;
}
reserve_bit(alloc, page, word, offset);
return id;
}
/*
* Set up an empty ID allocator, with IDALLOC_INVALID pre-reserved.
*/
struct id_alloc *idalloc_new(const char *name)
{
struct id_alloc *ret;
ret = XCALLOC(MTYPE_IDALLOC_ALLOCATOR, sizeof(*ret));
ret->name = XSTRDUP(MTYPE_IDALLOC_ALLOCATOR_NAME, name);
idalloc_reserve(ret, IDALLOC_INVALID);
return ret;
}
/*
* Free a subdir, and all pages below it.
*/
static void idalloc_destroy_subdir(struct id_alloc_subdir *subdir)
{
int i;
for (i = 0; i < IDALLOC_PAGE_COUNT; i++) {
if (subdir->sublevels[i])
XFREE(MTYPE_IDALLOC_PAGE, subdir->sublevels[i]);
else
break;
}
XFREE(MTYPE_IDALLOC_SUBDIRECTORY, subdir);
}
/*
* Free a dir, and all subdirs/pages below it.
*/
static void idalloc_destroy_dir(struct id_alloc_dir *dir)
{
int i;
for (i = 0; i < IDALLOC_SUBDIR_COUNT; i++) {
if (dir->sublevels[i])
idalloc_destroy_subdir(dir->sublevels[i]);
else
break;
}
XFREE(MTYPE_IDALLOC_DIRECTORY, dir);
}
/*
* Free all memory associated with an ID allocator.
*/
void idalloc_destroy(struct id_alloc *alloc)
{
int i;
for (i = 0; i < IDALLOC_DIR_COUNT; i++) {
if (alloc->sublevels[i])
idalloc_destroy_dir(alloc->sublevels[i]);
else
break;
}
XFREE(MTYPE_IDALLOC_ALLOCATOR_NAME, alloc->name);
XFREE(MTYPE_IDALLOC_ALLOCATOR, alloc);
}
/*
* Give an ID number to temporary holding pool.
*/
void idalloc_free_to_pool(struct id_alloc_pool **pool_ptr, uint32_t id)
{
struct id_alloc_pool *new_pool;
new_pool = XMALLOC(MTYPE_IDALLOC_POOL, sizeof(*new_pool));
new_pool->id = id;
new_pool->next = *pool_ptr;
*pool_ptr = new_pool;
}
/*
* Free all ID numbers held in a holding pool back to the main allocator.
*/
void idalloc_drain_pool(struct id_alloc *alloc, struct id_alloc_pool **pool_ptr)
{
struct id_alloc_pool *current, *next;
while (*pool_ptr) {
current = *pool_ptr;
next = current->next;
idalloc_free(alloc, current->id);
XFREE(MTYPE_IDALLOC_POOL, current);
*pool_ptr = next;
}
}
/*
* Allocate an ID from either a holding pool, or the main allocator. IDs will
* only be pulled form the main allocator when the pool is empty.
*/
uint32_t idalloc_allocate_prefer_pool(struct id_alloc *alloc,
struct id_alloc_pool **pool_ptr)
{
uint32_t ret;
struct id_alloc_pool *pool_head = *pool_ptr;
if (pool_head) {
ret = pool_head->id;
*pool_ptr = pool_head->next;
XFREE(MTYPE_IDALLOC_POOL, pool_head);
return ret;
} else {
return idalloc_allocate(alloc);
}
}

90
lib/id_alloc.h Normal file
View File

@ -0,0 +1,90 @@
/*
* FRR ID Number Allocator
* Copyright (C) 2018 Amazon.com, Inc. or its affiliates
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; see the file COPYING; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef _ZEBRA_ID_ALLOC_H
#define _ZEBRA_ID_ALLOC_H
#include <strings.h>
#include <limits.h>
#include <stdint.h>
#define IDALLOC_INVALID 0
#define IDALLOC_DIR_BITS 8
#define IDALLOC_SUBDIR_BITS 7
#define IDALLOC_PAGE_BITS 7
#define IDALLOC_WORD_BITS 5
#define IDALLOC_OFFSET_BITS 5
#define IDALLOC_DIR_COUNT (1 << IDALLOC_DIR_BITS)
#define IDALLOC_SUBDIR_COUNT (1 << IDALLOC_SUBDIR_BITS)
#define IDALLOC_PAGE_COUNT (1 << IDALLOC_PAGE_BITS)
#define IDALLOC_WORD_COUNT (1 << IDALLOC_WORD_BITS)
struct id_alloc_page {
/* Bitmask of allocations. 1s indicates the ID is already allocated. */
uint32_t allocated_mask[IDALLOC_WORD_COUNT];
/* Bitmask for free space in allocated_mask. 1s indicate whole 32 bit
* section is full.
*/
uint32_t full_word_mask;
/* The ID that bit 0 in allocated_mask corresponds to. */
uint32_t base_value;
struct id_alloc_page
*next_has_free; /* Next page with at least one bit open */
};
struct id_alloc_subdir {
struct id_alloc_page *sublevels[IDALLOC_PAGE_COUNT];
};
struct id_alloc_dir {
struct id_alloc_subdir *sublevels[IDALLOC_SUBDIR_COUNT];
};
struct id_alloc {
struct id_alloc_dir *sublevels[IDALLOC_DIR_COUNT];
struct id_alloc_page *has_free;
char *name;
uint32_t allocated, capacity;
};
struct id_alloc_pool {
struct id_alloc_pool *next;
uint32_t id;
};
void idalloc_free(struct id_alloc *alloc, uint32_t id);
void idalloc_free_to_pool(struct id_alloc_pool **pool_ptr, uint32_t id);
void idalloc_drain_pool(struct id_alloc *alloc,
struct id_alloc_pool **pool_ptr);
uint32_t idalloc_allocate(struct id_alloc *alloc);
uint32_t idalloc_allocate_prefer_pool(struct id_alloc *alloc,
struct id_alloc_pool **pool_ptr);
uint32_t idalloc_reserve(struct id_alloc *alloc, uint32_t id);
struct id_alloc *idalloc_new(const char *name);
void idalloc_destroy(struct id_alloc *alloc);
#endif

View File

@ -72,6 +72,8 @@ enum lib_log_refs {
EC_LIB_SYSREPO_INIT, EC_LIB_SYSREPO_INIT,
EC_LIB_SYSREPO_DATA_CONVERT, EC_LIB_SYSREPO_DATA_CONVERT,
EC_LIB_LIBSYSREPO, EC_LIB_LIBSYSREPO,
EC_LIB_ID_CONSISTENCY,
EC_LIB_ID_EXHAUST,
}; };
extern void lib_error_init(void); extern void lib_error_init(void);

View File

@ -29,6 +29,7 @@ lib_libfrr_la_SOURCES = \
lib/graph.c \ lib/graph.c \
lib/hash.c \ lib/hash.c \
lib/hook.c \ lib/hook.c \
lib/id_alloc.c \
lib/if.c \ lib/if.c \
lib/if_rmap.c \ lib/if_rmap.c \
lib/imsg-buffer.c \ lib/imsg-buffer.c \
@ -147,6 +148,7 @@ pkginclude_HEADERS += \
lib/graph.h \ lib/graph.h \
lib/hash.h \ lib/hash.h \
lib/hook.h \ lib/hook.h \
lib/id_alloc.h \
lib/if.h \ lib/if.h \
lib/if_rmap.h \ lib/if_rmap.h \
lib/imsg.h \ lib/imsg.h \

7
tests/.gitignore vendored
View File

@ -20,23 +20,24 @@
/lib/cli/test_commands_defun.c /lib/cli/test_commands_defun.c
/lib/test_buffer /lib/test_buffer
/lib/test_checksum /lib/test_checksum
/lib/test_graph
/lib/test_heavy /lib/test_heavy
/lib/test_heavy_thread /lib/test_heavy_thread
/lib/test_heavy_wq /lib/test_heavy_wq
/lib/test_idalloc
/lib/test_memory /lib/test_memory
/lib/test_nexthop_iter /lib/test_nexthop_iter
/lib/test_privs /lib/test_privs
/lib/test_ringbuf /lib/test_ringbuf
/lib/test_srcdest_table
/lib/test_segv /lib/test_segv
/lib/test_sig /lib/test_sig
/lib/test_srcdest_table
/lib/test_stream /lib/test_stream
/lib/test_table /lib/test_table
/lib/test_timer_correctness /lib/test_timer_correctness
/lib/test_timer_performance /lib/test_timer_performance
/lib/test_ttable /lib/test_ttable
/lib/test_zmq
/lib/test_zlog /lib/test_zlog
/lib/test_graph /lib/test_zmq
/ospf6d/test_lsdb /ospf6d/test_lsdb
/ospf6d/test_lsdb_clippy.c /ospf6d/test_lsdb_clippy.c

View File

@ -28,6 +28,8 @@
#include "bgpd/bgp_table.h" #include "bgpd/bgp_table.h"
#include "linklist.h" #include "linklist.h"
/* Satisfy link requirements from including bgpd.h */
struct zebra_privs_t bgpd_privs = {0};
/* /*
* test_node_t * test_node_t
* *

View File

@ -254,6 +254,8 @@ TEST_STR_ATTR_HANDLER_DECL(password, password, "FRR-Peer", "FRR-Group");
TEST_ATTR_HANDLER_DECL(local_as, change_local_as, 1, 2); TEST_ATTR_HANDLER_DECL(local_as, change_local_as, 1, 2);
TEST_ATTR_HANDLER_DECL(timers_1, keepalive, 10, 20); TEST_ATTR_HANDLER_DECL(timers_1, keepalive, 10, 20);
TEST_ATTR_HANDLER_DECL(timers_2, holdtime, 30, 60); TEST_ATTR_HANDLER_DECL(timers_2, holdtime, 30, 60);
TEST_ATTR_HANDLER_DECL(addpath_types, addpath_type[pa->afi][pa->safi],
BGP_ADDPATH_ALL, BGP_ADDPATH_BEST_PER_AS);
TEST_SU_ATTR_HANDLER_DECL(update_source_su, update_source, "255.255.255.1", TEST_SU_ATTR_HANDLER_DECL(update_source_su, update_source, "255.255.255.1",
"255.255.255.2"); "255.255.255.2");
TEST_STR_ATTR_HANDLER_DECL(update_source_if, update_if, "IF-PEER", "IF-GROUP"); TEST_STR_ATTR_HANDLER_DECL(update_source_if, update_if, "IF-PEER", "IF-GROUP");
@ -414,12 +416,11 @@ static struct test_peer_attr test_peer_attrs[] = {
/* Address Family Attributes */ /* Address Family Attributes */
{ {
.cmd = "addpath-tx-all-paths", .cmd = "addpath",
.u.flag = PEER_FLAG_ADDPATH_TX_ALL_PATHS, .peer_cmd = "addpath-tx-all-paths",
}, .group_cmd = "addpath-tx-bestpath-per-AS",
{ .type = PEER_AT_AF_CUSTOM,
.cmd = "addpath-tx-bestpath-per-AS", .handlers[0] = TEST_HANDLER(addpath_types),
.u.flag = PEER_FLAG_ADDPATH_TX_BESTPATH_PER_AS,
}, },
{ {
.cmd = "allowas-in", .cmd = "allowas-in",

View File

@ -26,14 +26,10 @@ TestFlag.okfail('peer\\timers')
TestFlag.okfail('peer\\timers connect') TestFlag.okfail('peer\\timers connect')
TestFlag.okfail('peer\\update-source') TestFlag.okfail('peer\\update-source')
TestFlag.okfail('peer\\update-source') TestFlag.okfail('peer\\update-source')
TestFlag.okfail('peer\\ipv4-unicast\\addpath-tx-all-paths') TestFlag.okfail('peer\\ipv4-unicast\\addpath')
TestFlag.okfail('peer\\ipv4-multicast\\addpath-tx-all-paths') TestFlag.okfail('peer\\ipv4-multicast\\addpath')
TestFlag.okfail('peer\\ipv6-unicast\\addpath-tx-all-paths') TestFlag.okfail('peer\\ipv6-unicast\\addpath')
TestFlag.okfail('peer\\ipv6-multicast\\addpath-tx-all-paths') TestFlag.okfail('peer\\ipv6-multicast\\addpath')
TestFlag.okfail('peer\\ipv4-unicast\\addpath-tx-bestpath-per-AS')
TestFlag.okfail('peer\\ipv4-multicast\\addpath-tx-bestpath-per-AS')
TestFlag.okfail('peer\\ipv6-unicast\\addpath-tx-bestpath-per-AS')
TestFlag.okfail('peer\\ipv6-multicast\\addpath-tx-bestpath-per-AS')
TestFlag.okfail('peer\\ipv4-unicast\\allowas-in') TestFlag.okfail('peer\\ipv4-unicast\\allowas-in')
TestFlag.okfail('peer\\ipv4-multicast\\allowas-in') TestFlag.okfail('peer\\ipv4-multicast\\allowas-in')
TestFlag.okfail('peer\\ipv6-unicast\\allowas-in') TestFlag.okfail('peer\\ipv6-unicast\\allowas-in')

193
tests/lib/test_idalloc.c Normal file
View File

@ -0,0 +1,193 @@
#include "id_alloc.h"
#include <inttypes.h>
#include <string.h>
#include <assert.h>
#include <stdio.h>
#define IDS_PER_PAGE (1<<(IDALLOC_OFFSET_BITS + IDALLOC_WORD_BITS))
char allocated_markers[IDS_PER_PAGE*3];
int main(int argc, char **argv)
{
int i, val;
uint32_t pg;
struct id_alloc *a;
/* 1. Rattle test, shake it a little and make sure it doesn't make any
* noise :)
*/
a = idalloc_new("Rattle test");
for (i = 0; i < 1000000; i++)
assert(idalloc_allocate(a) != 0);
idalloc_destroy(a);
/* 2. Reserve a few low IDs, make sure they are skipped by normal
* allocation.
*/
a = idalloc_new("Low Reservations");
assert(idalloc_reserve(a, 1) == 1);
assert(idalloc_reserve(a, 3) == 3);
assert(idalloc_reserve(a, 5) == 5);
for (i = 0; i < 100; i++) {
val = idalloc_allocate(a);
assert(val != 1 && val != 3 && val != 5);
}
idalloc_destroy(a);
/* 3. Single page testing. Check that IDs are kept unique, and all IDs
* in the existing page are allocated before a new page is added.
*/
memset(allocated_markers, 0, sizeof(allocated_markers));
allocated_markers[IDALLOC_INVALID] = 1;
a = idalloc_new("Single Page");
/* reserve the rest of the first page */
for (i = 0; i < IDS_PER_PAGE - 1; i++) {
val = idalloc_allocate(a);
assert(val < IDS_PER_PAGE);
assert(allocated_markers[val] == 0);
assert(a->capacity == IDS_PER_PAGE);
allocated_markers[val] = 1;
}
/* Check that the count is right */
assert(a->allocated == IDS_PER_PAGE);
/* Free some IDs out of the middle. */
idalloc_free(a, 300);
allocated_markers[300] = 0;
idalloc_free(a, 400);
allocated_markers[400] = 0;
idalloc_free(a, 500);
allocated_markers[500] = 0;
assert(a->allocated == IDS_PER_PAGE-3);
/* Allocate the three IDs back and make sure they are pulled from the
* set just freed
*/
for (i = 0; i < 3; i++) {
val = idalloc_allocate(a);
assert(val < IDS_PER_PAGE);
assert(allocated_markers[val] == 0);
assert(a->capacity == IDS_PER_PAGE);
allocated_markers[val] = 1;
}
idalloc_destroy(a);
/* 4. Multi-page testing. */
memset(allocated_markers, 0, sizeof(allocated_markers));
allocated_markers[IDALLOC_INVALID] = 1;
a = idalloc_new("Multi-page");
/* reserve the rest of the first page and all of the second and third */
for (i = 0; i < 3 * IDS_PER_PAGE - 1; i++) {
val = idalloc_allocate(a);
assert(val < 3*IDS_PER_PAGE);
assert(allocated_markers[val] == 0);
allocated_markers[val] = 1;
}
assert(a->capacity == 3*IDS_PER_PAGE);
assert(a->allocated == 3*IDS_PER_PAGE);
/* Free two IDs from each page. */
for (i = 0; i < 3; i++) {
idalloc_free(a, 7 + i*IDS_PER_PAGE);
allocated_markers[7 + i*IDS_PER_PAGE] = 0;
idalloc_free(a, 4 + i*IDS_PER_PAGE);
allocated_markers[4 + i*IDS_PER_PAGE] = 0;
}
assert(a->allocated == 3*IDS_PER_PAGE - 6);
/* Allocate the six IDs back and make sure they are pulled from the set
* just freed.
*/
for (i = 0; i < 6; i++) {
val = idalloc_allocate(a);
assert(val < 3*IDS_PER_PAGE);
assert(allocated_markers[val] == 0);
assert(a->capacity == 3*IDS_PER_PAGE);
allocated_markers[val] = 1;
}
assert(a->capacity == 3*IDS_PER_PAGE);
assert(a->allocated == 3*IDS_PER_PAGE);
/* Walk each allocated ID. Free it, then re-allocate it back. */
for (i = 1; i < 3 * IDS_PER_PAGE - 1; i++) {
idalloc_free(a, i);
val = idalloc_allocate(a);
assert(val == i);
assert(a->capacity == 3*IDS_PER_PAGE);
assert(a->allocated == 3*IDS_PER_PAGE);
}
idalloc_destroy(a);
/* 5. Weird Reservations
* idalloc_reserve exists primarily to black out low numbered IDs that
* are reserved for special cases. However, we will test it for more
* complex use cases to avoid unpleasant surprises.
*/
memset(allocated_markers, 0, sizeof(allocated_markers));
allocated_markers[IDALLOC_INVALID] = 1;
a = idalloc_new("Weird Reservations");
/* Start with 3 pages fully allocated. */
for (i = 0; i < 3 * IDS_PER_PAGE - 1; i++) {
val = idalloc_allocate(a);
assert(val < 3*IDS_PER_PAGE);
assert(allocated_markers[val] == 0);
allocated_markers[val] = 1;
}
assert(a->capacity == 3*IDS_PER_PAGE);
assert(a->allocated == 3*IDS_PER_PAGE);
/* Free a bit out of each of the three pages. Then reserve one of the
* three freed IDs. Finally, allocate the other two freed IDs. Do this
* each of three ways. (Reserve out of the first, seconds then third
* page.)
* The intent here is to exercise the rare cases on reserve_bit's
* linked-list removal in the case that it is not removing the first
* page with a free bit in its list of pages with free bits.
*/
for (pg = 0; pg < 3; pg++) {
/* free a bit out of each of the three pages */
for (i = 0; i < 3; i++) {
idalloc_free(a, i*IDS_PER_PAGE + 17);
allocated_markers[i*IDS_PER_PAGE + 17] = 0;
}
assert(a->capacity == 3*IDS_PER_PAGE);
assert(a->allocated == 3*IDS_PER_PAGE-3);
/* Reserve one of the freed IDs */
assert(idalloc_reserve(a, pg*IDS_PER_PAGE + 17) ==
pg*IDS_PER_PAGE + 17);
allocated_markers[pg*IDS_PER_PAGE + 17] = 1;
assert(a->capacity == 3*IDS_PER_PAGE);
assert(a->allocated == 3*IDS_PER_PAGE-2);
/* Allocate the other two back */
for (i = 0; i < 2; i++) {
val = idalloc_allocate(a);
assert(val < 3*IDS_PER_PAGE);
assert(allocated_markers[val] == 0);
allocated_markers[val] = 1;
}
assert(a->capacity == 3*IDS_PER_PAGE);
assert(a->allocated == 3*IDS_PER_PAGE);
}
idalloc_destroy(a);
puts("ID Allocator test successful.\n");
return 0;
}

View File

@ -0,0 +1,6 @@
import frrtest
class TestIDAlloc(frrtest.TestMultiOut):
program = './test_idalloc'
TestIDAlloc.onesimple('ID Allocator test successful.')

View File

@ -51,6 +51,7 @@ check_PROGRAMS = \
tests/lib/test_heavy_thread \ tests/lib/test_heavy_thread \
tests/lib/test_heavy_wq \ tests/lib/test_heavy_wq \
tests/lib/test_heavy \ tests/lib/test_heavy \
tests/lib/test_idalloc \
tests/lib/test_memory \ tests/lib/test_memory \
tests/lib/test_nexthop_iter \ tests/lib/test_nexthop_iter \
tests/lib/test_privs \ tests/lib/test_privs \
@ -198,6 +199,9 @@ tests_lib_test_heavy_wq_CFLAGS = $(TESTS_CFLAGS)
tests_lib_test_heavy_wq_CPPFLAGS = $(TESTS_CPPFLAGS) tests_lib_test_heavy_wq_CPPFLAGS = $(TESTS_CPPFLAGS)
tests_lib_test_heavy_wq_LDADD = $(ALL_TESTS_LDADD) -lm tests_lib_test_heavy_wq_LDADD = $(ALL_TESTS_LDADD) -lm
tests_lib_test_heavy_wq_SOURCES = tests/lib/test_heavy_wq.c tests/helpers/c/main.c tests_lib_test_heavy_wq_SOURCES = tests/lib/test_heavy_wq.c tests/helpers/c/main.c
tests_lib_test_idalloc_CFLAGS = $(TESTS_CFLAGS)
tests_lib_test_idalloc_LDADD = $(ALL_TESTS_LDADD)
tests_lib_test_idalloc_SOURCES = tests/lib/test_idalloc.c
tests_lib_test_memory_CFLAGS = $(TESTS_CFLAGS) tests_lib_test_memory_CFLAGS = $(TESTS_CFLAGS)
tests_lib_test_memory_CPPFLAGS = $(TESTS_CPPFLAGS) tests_lib_test_memory_CPPFLAGS = $(TESTS_CPPFLAGS)
tests_lib_test_memory_LDADD = $(ALL_TESTS_LDADD) tests_lib_test_memory_LDADD = $(ALL_TESTS_LDADD)