Merge pull request #13425 from FRRouting/revert-12646-mpls_alloc_per_nh

Revert "MPLS allocation mode per next hop"
This commit is contained in:
Donald Sharp 2023-05-03 07:35:37 -04:00 committed by GitHub
commit abecbc3df1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
49 changed files with 84 additions and 3510 deletions

View File

@ -23,9 +23,6 @@
#include "bgpd/bgp_debug.h"
#include "bgpd/bgp_errors.h"
#include "bgpd/bgp_route.h"
#include "bgpd/bgp_zebra.h"
#include "bgpd/bgp_vty.h"
#include "bgpd/bgp_rd.h"
#define BGP_LABELPOOL_ENABLE_TESTS 0
@ -833,16 +830,6 @@ DEFUN(show_bgp_labelpool_ledger, show_bgp_labelpool_ledger_cmd,
lcb->label);
break;
case LP_TYPE_NEXTHOP:
if (uj) {
json_object_string_add(json_elem, "prefix",
"nexthop");
json_object_int_add(json_elem, "label",
lcb->label);
} else
vty_out(vty, "%-18s %u\n", "nexthop",
lcb->label);
break;
}
}
if (uj)
@ -932,15 +919,6 @@ DEFUN(show_bgp_labelpool_inuse, show_bgp_labelpool_inuse_cmd,
vty_out(vty, "%-18s %u\n", "VRF",
label);
break;
case LP_TYPE_NEXTHOP:
if (uj) {
json_object_string_add(json_elem, "prefix",
"nexthop");
json_object_int_add(json_elem, "label", label);
} else
vty_out(vty, "%-18s %u\n", "nexthop",
label);
break;
}
}
if (uj)
@ -1013,13 +991,6 @@ DEFUN(show_bgp_labelpool_requests, show_bgp_labelpool_requests_cmd,
else
vty_out(vty, "VRF\n");
break;
case LP_TYPE_NEXTHOP:
if (uj)
json_object_string_add(json_elem, "prefix",
"nexthop");
else
vty_out(vty, "Nexthop\n");
break;
}
}
if (uj)
@ -1082,99 +1053,6 @@ DEFUN(show_bgp_labelpool_chunks, show_bgp_labelpool_chunks_cmd,
return CMD_SUCCESS;
}
static void show_bgp_nexthop_label_afi(struct vty *vty, afi_t afi,
struct bgp *bgp, bool detail)
{
struct bgp_label_per_nexthop_cache_head *tree;
struct bgp_label_per_nexthop_cache *iter;
safi_t safi;
void *src;
char buf[PREFIX2STR_BUFFER];
char labelstr[MPLS_LABEL_STRLEN];
struct bgp_dest *dest;
struct bgp_path_info *path;
struct bgp *bgp_path;
struct bgp_table *table;
time_t tbuf;
vty_out(vty, "Current BGP label nexthop cache for %s, VRF %s\n",
afi2str(afi), bgp->name_pretty);
tree = &bgp->mpls_labels_per_nexthop[afi];
frr_each (bgp_label_per_nexthop_cache, tree, iter) {
if (afi2family(afi) == AF_INET)
src = (void *)&iter->nexthop.u.prefix4;
else
src = (void *)&iter->nexthop.u.prefix6;
vty_out(vty, " %s, label %s #paths %u\n",
inet_ntop(afi2family(afi), src, buf, sizeof(buf)),
mpls_label2str(1, &iter->label, labelstr,
sizeof(labelstr), 0, true),
iter->path_count);
if (iter->nh)
vty_out(vty, " if %s\n",
ifindex2ifname(iter->nh->ifindex,
iter->nh->vrf_id));
tbuf = time(NULL) - (monotime(NULL) - iter->last_update);
vty_out(vty, " Last update: %s", ctime(&tbuf));
if (!detail)
continue;
vty_out(vty, " Paths:\n");
LIST_FOREACH (path, &(iter->paths), label_nh_thread) {
dest = path->net;
table = bgp_dest_table(dest);
assert(dest && table);
afi = family2afi(bgp_dest_get_prefix(dest)->family);
safi = table->safi;
bgp_path = table->bgp;
if (dest->pdest) {
vty_out(vty, " %d/%d %pBD RD ", afi, safi,
dest);
vty_out(vty, BGP_RD_AS_FORMAT(bgp->asnotation),
(struct prefix_rd *)bgp_dest_get_prefix(
dest->pdest));
vty_out(vty, " %s flags 0x%x\n",
bgp_path->name_pretty, path->flags);
} else
vty_out(vty, " %d/%d %pBD %s flags 0x%x\n",
afi, safi, dest, bgp_path->name_pretty,
path->flags);
}
}
}
DEFPY(show_bgp_nexthop_label, show_bgp_nexthop_label_cmd,
"show bgp [<view|vrf> VIEWVRFNAME] label-nexthop [detail]",
SHOW_STR BGP_STR BGP_INSTANCE_HELP_STR
"BGP label per-nexthop table\n"
"Show detailed information\n")
{
int idx = 0;
char *vrf = NULL;
struct bgp *bgp;
bool detail = false;
int afi;
if (argv_find(argv, argc, "vrf", &idx)) {
vrf = argv[++idx]->arg;
bgp = bgp_lookup_by_name(vrf);
} else
bgp = bgp_get_default();
if (!bgp)
return CMD_SUCCESS;
if (argv_find(argv, argc, "detail", &idx))
detail = true;
for (afi = AFI_IP; afi <= AFI_IP6; afi++)
show_bgp_nexthop_label_afi(vty, afi, bgp, detail);
return CMD_SUCCESS;
}
#if BGP_LABELPOOL_ENABLE_TESTS
/*------------------------------------------------------------------------
* Testing code start
@ -1654,66 +1532,3 @@ void bgp_lp_vty_init(void)
install_element(ENABLE_NODE, &clear_labelpool_perf_test_cmd);
#endif /* BGP_LABELPOOL_ENABLE_TESTS */
}
DEFINE_MTYPE_STATIC(BGPD, LABEL_PER_NEXTHOP_CACHE,
"BGP Label Per Nexthop entry");
/* The nexthops values are compared to
* find in the tree the appropriate cache entry
*/
int bgp_label_per_nexthop_cache_cmp(const struct bgp_label_per_nexthop_cache *a,
const struct bgp_label_per_nexthop_cache *b)
{
return prefix_cmp(&a->nexthop, &b->nexthop);
}
struct bgp_label_per_nexthop_cache *
bgp_label_per_nexthop_new(struct bgp_label_per_nexthop_cache_head *tree,
struct prefix *nexthop)
{
struct bgp_label_per_nexthop_cache *blnc;
blnc = XCALLOC(MTYPE_LABEL_PER_NEXTHOP_CACHE,
sizeof(struct bgp_label_per_nexthop_cache));
blnc->tree = tree;
blnc->label = MPLS_INVALID_LABEL;
prefix_copy(&blnc->nexthop, nexthop);
LIST_INIT(&(blnc->paths));
bgp_label_per_nexthop_cache_add(tree, blnc);
return blnc;
}
struct bgp_label_per_nexthop_cache *
bgp_label_per_nexthop_find(struct bgp_label_per_nexthop_cache_head *tree,
struct prefix *nexthop)
{
struct bgp_label_per_nexthop_cache blnc = {};
if (!tree)
return NULL;
memcpy(&blnc.nexthop, nexthop, sizeof(struct prefix));
return bgp_label_per_nexthop_cache_find(tree, &blnc);
}
void bgp_label_per_nexthop_free(struct bgp_label_per_nexthop_cache *blnc)
{
if (blnc->label != MPLS_INVALID_LABEL) {
bgp_zebra_send_nexthop_label(ZEBRA_MPLS_LABELS_DELETE,
blnc->label, blnc->nh->ifindex,
blnc->nh->vrf_id, ZEBRA_LSP_BGP,
&blnc->nexthop);
bgp_lp_release(LP_TYPE_NEXTHOP, blnc, blnc->label);
}
bgp_label_per_nexthop_cache_del(blnc->tree, blnc);
if (blnc->nh)
nexthop_free(blnc->nh);
blnc->nh = NULL;
XFREE(MTYPE_LABEL_PER_NEXTHOP_CACHE, blnc);
}
void bgp_label_per_nexthop_init(void)
{
install_element(VIEW_NODE, &show_bgp_nexthop_label_cmd);
}

View File

@ -17,7 +17,6 @@
*/
#define LP_TYPE_VRF 0x00000001
#define LP_TYPE_BGP_LU 0x00000002
#define LP_TYPE_NEXTHOP 0x00000003
PREDECL_LIST(lp_fifo);
@ -42,55 +41,4 @@ extern void bgp_lp_event_zebra_down(void);
extern void bgp_lp_event_zebra_up(void);
extern void bgp_lp_vty_init(void);
struct bgp_label_per_nexthop_cache;
PREDECL_RBTREE_UNIQ(bgp_label_per_nexthop_cache);
extern int
bgp_label_per_nexthop_cache_cmp(const struct bgp_label_per_nexthop_cache *a,
const struct bgp_label_per_nexthop_cache *b);
struct bgp_label_per_nexthop_cache {
/* RB-tree entry. */
struct bgp_label_per_nexthop_cache_item entry;
/* the nexthop is the key of the list */
struct prefix nexthop;
/* calculated label */
mpls_label_t label;
/* number of path_vrfs */
unsigned int path_count;
/* back pointer to bgp instance */
struct bgp *to_bgp;
/* copy a nexthop resolution from bgp nexthop tracking
* used to extract the interface nexthop
*/
struct nexthop *nh;
/* list of path_vrfs using it */
LIST_HEAD(path_lists, bgp_path_info) paths;
time_t last_update;
/* Back pointer to the cache tree this entry belongs to. */
struct bgp_label_per_nexthop_cache_head *tree;
};
DECLARE_RBTREE_UNIQ(bgp_label_per_nexthop_cache,
struct bgp_label_per_nexthop_cache, entry,
bgp_label_per_nexthop_cache_cmp);
void bgp_label_per_nexthop_free(struct bgp_label_per_nexthop_cache *blnc);
struct bgp_label_per_nexthop_cache *
bgp_label_per_nexthop_new(struct bgp_label_per_nexthop_cache_head *tree,
struct prefix *nexthop);
struct bgp_label_per_nexthop_cache *
bgp_label_per_nexthop_find(struct bgp_label_per_nexthop_cache_head *tree,
struct prefix *nexthop);
void bgp_label_per_nexthop_init(void);
#endif /* _FRR_BGP_LABELPOOL_H */

View File

@ -1116,14 +1116,12 @@ leak_update(struct bgp *to_bgp, struct bgp_dest *bn,
/*
* Routes that are redistributed into BGP from zebra do not get
* nexthop tracking, unless MPLS allocation per nexthop is
* performed. In the default case nexthop tracking does not apply,
* if those routes are subsequently imported to other RIBs within
* BGP, the leaked routes do not carry the original
* BGP_ROUTE_REDISTRIBUTE sub_type. Therefore, in order to determine
* if the route we are currently leaking should have nexthop
* tracking, we must find the ultimate parent so we can check its
* sub_type.
* nexthop tracking. However, if those routes are subsequently
* imported to other RIBs within BGP, the leaked routes do not
* carry the original BGP_ROUTE_REDISTRIBUTE sub_type. Therefore,
* in order to determine if the route we are currently leaking
* should have nexthop tracking, we must find the ultimate
* parent so we can check its sub_type.
*
* As of now, source_bpi may at most be a second-generation route
* (only one hop back to ultimate parent for vrf-vpn-vrf scheme).
@ -1338,265 +1336,6 @@ leak_update(struct bgp *to_bgp, struct bgp_dest *bn,
return new;
}
void bgp_mplsvpn_path_nh_label_unlink(struct bgp_path_info *pi)
{
struct bgp_label_per_nexthop_cache *blnc;
if (!pi)
return;
blnc = pi->label_nexthop_cache;
if (!blnc)
return;
LIST_REMOVE(pi, label_nh_thread);
pi->label_nexthop_cache->path_count--;
pi->label_nexthop_cache = NULL;
if (LIST_EMPTY(&(blnc->paths)))
bgp_label_per_nexthop_free(blnc);
}
/* Called upon reception of a ZAPI Message from zebra, about
* a new available label.
*/
static int bgp_mplsvpn_get_label_per_nexthop_cb(mpls_label_t label,
void *context, bool allocated)
{
struct bgp_label_per_nexthop_cache *blnc = context;
mpls_label_t old_label;
int debug = BGP_DEBUG(vpn, VPN_LEAK_LABEL);
struct bgp_path_info *pi;
struct bgp_table *table;
old_label = blnc->label;
if (debug)
zlog_debug("%s: label=%u, allocated=%d, nexthop=%pFX", __func__,
label, allocated, &blnc->nexthop);
if (allocated)
/* update the entry with the new label */
blnc->label = label;
else
/*
* previously-allocated label is now invalid
* eg: zebra deallocated the labels and notifies it
*/
blnc->label = MPLS_INVALID_LABEL;
if (old_label == blnc->label)
return 0; /* no change */
/* update paths */
if (blnc->label != MPLS_INVALID_LABEL)
bgp_zebra_send_nexthop_label(
ZEBRA_MPLS_LABELS_ADD, blnc->label, blnc->nh->ifindex,
blnc->nh->vrf_id, ZEBRA_LSP_BGP, &blnc->nexthop);
LIST_FOREACH (pi, &(blnc->paths), label_nh_thread) {
if (!pi->net)
continue;
table = bgp_dest_table(pi->net);
if (!table)
continue;
vpn_leak_from_vrf_update(blnc->to_bgp, table->bgp, pi);
}
return 0;
}
/* Get a per label nexthop value:
* - Find and return a per label nexthop from the cache
* - else allocate a new per label nexthop cache entry and request a
* label to zebra. Return MPLS_INVALID_LABEL
*/
static mpls_label_t _vpn_leak_from_vrf_get_per_nexthop_label(
struct bgp_path_info *pi, struct bgp *to_bgp, struct bgp *from_bgp,
afi_t afi, safi_t safi)
{
struct bgp_nexthop_cache *bnc = pi->nexthop;
struct bgp_label_per_nexthop_cache *blnc;
struct bgp_label_per_nexthop_cache_head *tree;
struct prefix *nh_pfx = NULL;
struct prefix nh_gate = {0};
/* extract the nexthop from the BNC nexthop cache */
switch (bnc->nexthop->type) {
case NEXTHOP_TYPE_IPV4:
case NEXTHOP_TYPE_IPV4_IFINDEX:
/* the nexthop is recursive */
nh_gate.family = AF_INET;
nh_gate.prefixlen = IPV4_MAX_BITLEN;
IPV4_ADDR_COPY(&nh_gate.u.prefix4, &bnc->nexthop->gate.ipv4);
nh_pfx = &nh_gate;
break;
case NEXTHOP_TYPE_IPV6:
case NEXTHOP_TYPE_IPV6_IFINDEX:
/* the nexthop is recursive */
nh_gate.family = AF_INET6;
nh_gate.prefixlen = IPV6_MAX_BITLEN;
IPV6_ADDR_COPY(&nh_gate.u.prefix6, &bnc->nexthop->gate.ipv6);
nh_pfx = &nh_gate;
break;
case NEXTHOP_TYPE_IFINDEX:
/* the nexthop is direcly connected */
nh_pfx = &bnc->prefix;
break;
case NEXTHOP_TYPE_BLACKHOLE:
assert(!"Blackhole nexthop. Already checked by the caller.");
}
/* find or allocate a nexthop label cache entry */
tree = &from_bgp->mpls_labels_per_nexthop[family2afi(nh_pfx->family)];
blnc = bgp_label_per_nexthop_find(tree, nh_pfx);
if (!blnc) {
blnc = bgp_label_per_nexthop_new(tree, nh_pfx);
blnc->to_bgp = to_bgp;
/* request a label to zebra for this nexthop
* the response from zebra will trigger the callback
*/
bgp_lp_get(LP_TYPE_NEXTHOP, blnc,
bgp_mplsvpn_get_label_per_nexthop_cb);
}
if (pi->label_nexthop_cache == blnc)
/* no change */
return blnc->label;
/* Unlink from any existing nexthop cache. Free the entry if unused.
*/
bgp_mplsvpn_path_nh_label_unlink(pi);
if (blnc) {
/* updates NHT pi list reference */
LIST_INSERT_HEAD(&(blnc->paths), pi, label_nh_thread);
pi->label_nexthop_cache = blnc;
pi->label_nexthop_cache->path_count++;
blnc->last_update = monotime(NULL);
}
/* then add or update the selected nexthop */
if (!blnc->nh)
blnc->nh = nexthop_dup(bnc->nexthop, NULL);
else if (!nexthop_same(bnc->nexthop, blnc->nh)) {
nexthop_free(blnc->nh);
blnc->nh = nexthop_dup(bnc->nexthop, NULL);
if (blnc->label != MPLS_INVALID_LABEL) {
bgp_zebra_send_nexthop_label(
ZEBRA_MPLS_LABELS_REPLACE, blnc->label,
bnc->nexthop->ifindex, bnc->nexthop->vrf_id,
ZEBRA_LSP_BGP, &blnc->nexthop);
}
}
return blnc->label;
}
/* Filter out all the cases where a per nexthop label is not possible:
* - return an invalid label when the nexthop is invalid
* - return the per VRF label when the per nexthop label is not supported
* Otherwise, find or request a per label nexthop.
*/
static mpls_label_t vpn_leak_from_vrf_get_per_nexthop_label(
afi_t afi, safi_t safi, struct bgp_path_info *pi, struct bgp *from_bgp,
struct bgp *to_bgp)
{
struct bgp_path_info *bpi_ultimate = bgp_get_imported_bpi_ultimate(pi);
struct bgp *bgp_nexthop = NULL;
bool nh_valid;
afi_t nh_afi;
bool is_bgp_static_route;
is_bgp_static_route = bpi_ultimate->sub_type == BGP_ROUTE_STATIC &&
bpi_ultimate->type == ZEBRA_ROUTE_BGP;
if (is_bgp_static_route == false && afi == AFI_IP &&
CHECK_FLAG(pi->attr->flag, ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP)) &&
(pi->attr->nexthop.s_addr == INADDR_ANY ||
!ipv4_unicast_valid(&pi->attr->nexthop))) {
/* IPv4 nexthop in standard BGP encoding format.
* Format of address is not valid (not any, not unicast).
* Fallback to the per VRF label.
*/
bgp_mplsvpn_path_nh_label_unlink(pi);
return from_bgp->vpn_policy[afi].tovpn_label;
}
if (is_bgp_static_route == false && afi == AFI_IP &&
pi->attr->mp_nexthop_len == BGP_ATTR_NHLEN_IPV4 &&
(pi->attr->mp_nexthop_global_in.s_addr == INADDR_ANY ||
!ipv4_unicast_valid(&pi->attr->mp_nexthop_global_in))) {
/* IPv4 nexthop is in MP-BGP encoding format.
* Format of address is not valid (not any, not unicast).
* Fallback to the per VRF label.
*/
bgp_mplsvpn_path_nh_label_unlink(pi);
return from_bgp->vpn_policy[afi].tovpn_label;
}
if (is_bgp_static_route == false && afi == AFI_IP6 &&
(pi->attr->mp_nexthop_len == BGP_ATTR_NHLEN_IPV6_GLOBAL ||
pi->attr->mp_nexthop_len == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL) &&
(IN6_IS_ADDR_UNSPECIFIED(&pi->attr->mp_nexthop_global) ||
IN6_IS_ADDR_LOOPBACK(&pi->attr->mp_nexthop_global) ||
IN6_IS_ADDR_MULTICAST(&pi->attr->mp_nexthop_global))) {
/* IPv6 nexthop is in MP-BGP encoding format.
* Format of address is not valid
* Fallback to the per VRF label.
*/
bgp_mplsvpn_path_nh_label_unlink(pi);
return from_bgp->vpn_policy[afi].tovpn_label;
}
/* Check the next-hop reachability.
* Get the bgp instance where the bgp_path_info originates.
*/
if (pi->extra && pi->extra->bgp_orig)
bgp_nexthop = pi->extra->bgp_orig;
else
bgp_nexthop = from_bgp;
nh_afi = BGP_ATTR_NH_AFI(afi, pi->attr);
nh_valid = bgp_find_or_add_nexthop(from_bgp, bgp_nexthop, nh_afi, safi,
pi, NULL, 0, NULL);
if (!nh_valid && is_bgp_static_route &&
!CHECK_FLAG(from_bgp->flags, BGP_FLAG_IMPORT_CHECK)) {
/* "network" prefixes not routable, but since 'no bgp network
* import-check' is configured, they are always valid in the BGP
* table. Fallback to the per-vrf label
*/
bgp_mplsvpn_path_nh_label_unlink(pi);
return from_bgp->vpn_policy[afi].tovpn_label;
}
if (!nh_valid || !pi->nexthop || pi->nexthop->nexthop_num == 0 ||
!pi->nexthop->nexthop) {
/* invalid next-hop:
* do not send the per-vrf label
* otherwise, when the next-hop becomes valid,
* we will have 2 BGP updates:
* - one with the per-vrf label
* - the second with the per-nexthop label
*/
bgp_mplsvpn_path_nh_label_unlink(pi);
return MPLS_INVALID_LABEL;
}
if (pi->nexthop->nexthop_num > 1 ||
pi->nexthop->nexthop->type == NEXTHOP_TYPE_BLACKHOLE) {
/* Blackhole or ECMP routes
* is not compatible with per-nexthop label.
* Fallback to per-vrf label.
*/
bgp_mplsvpn_path_nh_label_unlink(pi);
return from_bgp->vpn_policy[afi].tovpn_label;
}
return _vpn_leak_from_vrf_get_per_nexthop_label(pi, to_bgp, from_bgp,
afi, safi);
}
/* cf vnc_import_bgp_add_route_mode_nvegroup() and add_vnc_route() */
void vpn_leak_from_vrf_update(struct bgp *to_bgp, /* to */
struct bgp *from_bgp, /* from */
@ -1789,32 +1528,12 @@ void vpn_leak_from_vrf_update(struct bgp *to_bgp, /* to */
nexthop_self_flag = 1;
}
if (CHECK_FLAG(from_bgp->vpn_policy[afi].flags,
BGP_VPN_POLICY_TOVPN_LABEL_PER_NEXTHOP))
/* per nexthop label mode */
label_val = vpn_leak_from_vrf_get_per_nexthop_label(
afi, safi, path_vrf, from_bgp, to_bgp);
else
/* per VRF label mode */
label_val = from_bgp->vpn_policy[afi].tovpn_label;
if (label_val == MPLS_INVALID_LABEL &&
CHECK_FLAG(from_bgp->vpn_policy[afi].flags,
BGP_VPN_POLICY_TOVPN_LABEL_PER_NEXTHOP)) {
/* no valid label for the moment
* when the 'bgp_mplsvpn_get_label_per_nexthop_cb' callback gets
* a valid label value, it will call the current function again.
*/
if (debug)
zlog_debug(
"%s: %s skipping: waiting for a valid per-label nexthop.",
__func__, from_bgp->name_pretty);
return;
}
if (label_val == MPLS_LABEL_NONE)
label_val = from_bgp->vpn_policy[afi].tovpn_label;
if (label_val == MPLS_LABEL_NONE) {
encode_label(MPLS_LABEL_IMPLICIT_NULL, &label);
else
} else {
encode_label(label_val, &label);
}
/* Set originator ID to "me" */
SET_FLAG(static_attr.flag, ATTR_FLAG_BIT(BGP_ATTR_ORIGINATOR_ID));
@ -2051,8 +1770,6 @@ void vpn_leak_from_vrf_withdraw_all(struct bgp *to_bgp, struct bgp *from_bgp,
bpi, afi, safi);
bgp_path_info_delete(bn, bpi);
bgp_process(to_bgp, bn, afi, safi);
bgp_mplsvpn_path_nh_label_unlink(
bpi->extra->parent);
}
}
}

View File

@ -31,7 +31,6 @@
#define BGP_PREFIX_SID_SRV6_MAX_FUNCTION_LENGTH 20
extern void bgp_mplsvpn_init(void);
extern void bgp_mplsvpn_path_nh_label_unlink(struct bgp_path_info *pi);
extern int bgp_nlri_parse_vpn(struct peer *, struct attr *, struct bgp_nlri *);
extern uint32_t decode_label(mpls_label_t *);
extern void encode_label(mpls_label_t, mpls_label_t *);

View File

@ -31,7 +31,6 @@
#include "bgpd/bgp_fsm.h"
#include "bgpd/bgp_vty.h"
#include "bgpd/bgp_rd.h"
#include "bgpd/bgp_mplsvpn.h"
DEFINE_MTYPE_STATIC(BGPD, MARTIAN_STRING, "BGP Martian Addr Intf String");
@ -120,8 +119,6 @@ static void bgp_nexthop_cache_reset(struct bgp_nexthop_cache_head *tree)
while (!LIST_EMPTY(&(bnc->paths))) {
struct bgp_path_info *path = LIST_FIRST(&(bnc->paths));
bgp_mplsvpn_path_nh_label_unlink(path);
path_nh_map(path, bnc, false);
}

View File

@ -31,7 +31,6 @@
#include "bgpd/bgp_flowspec_util.h"
#include "bgpd/bgp_evpn.h"
#include "bgpd/bgp_rd.h"
#include "bgpd/bgp_mplsvpn.h"
extern struct zclient *zclient;
@ -150,8 +149,6 @@ void bgp_unlink_nexthop(struct bgp_path_info *path)
{
struct bgp_nexthop_cache *bnc = path->nexthop;
bgp_mplsvpn_path_nh_label_unlink(path);
if (!bnc)
return;
@ -1137,21 +1134,10 @@ void evaluate_paths(struct bgp_nexthop_cache *bnc)
}
LIST_FOREACH (path, &(bnc->paths), nh_thread) {
if (path->type == ZEBRA_ROUTE_BGP &&
(path->sub_type == BGP_ROUTE_NORMAL ||
path->sub_type == BGP_ROUTE_STATIC ||
path->sub_type == BGP_ROUTE_IMPORTED))
/* evaluate the path */
;
else if (path->sub_type == BGP_ROUTE_REDISTRIBUTE) {
/* evaluate the path for redistributed routes
* except those from VNC
*/
if ((path->type == ZEBRA_ROUTE_VNC) ||
(path->type == ZEBRA_ROUTE_VNC_DIRECT))
continue;
} else
/* don't evaluate the path */
if (!(path->type == ZEBRA_ROUTE_BGP
&& ((path->sub_type == BGP_ROUTE_NORMAL)
|| (path->sub_type == BGP_ROUTE_STATIC)
|| (path->sub_type == BGP_ROUTE_IMPORTED))))
continue;
dest = path->net;
@ -1244,26 +1230,7 @@ void evaluate_paths(struct bgp_nexthop_cache *bnc)
SET_FLAG(path->flags, BGP_PATH_IGP_CHANGED);
path_valid = CHECK_FLAG(path->flags, BGP_PATH_VALID);
if (path->type == ZEBRA_ROUTE_BGP &&
path->sub_type == BGP_ROUTE_STATIC &&
!CHECK_FLAG(bgp_path->flags, BGP_FLAG_IMPORT_CHECK))
/* static routes with 'no bgp network import-check' are
* always valid. if nht is called with static routes,
* the vpn exportation needs to be triggered
*/
vpn_leak_from_vrf_update(bgp_get_default(), bgp_path,
path);
else if (path->sub_type == BGP_ROUTE_REDISTRIBUTE &&
safi == SAFI_UNICAST &&
(bgp_path->inst_type == BGP_INSTANCE_TYPE_VRF ||
bgp_path->inst_type == BGP_INSTANCE_TYPE_DEFAULT))
/* redistribute routes are always valid
* if nht is called with redistribute routes, the vpn
* exportation needs to be triggered
*/
vpn_leak_from_vrf_update(bgp_get_default(), bgp_path,
path);
else if (path_valid != bnc_is_valid_nexthop) {
if (path_valid != bnc_is_valid_nexthop) {
if (path_valid) {
/* No longer valid, clear flag; also for EVPN
* routes, unimport from VRFs if needed.
@ -1276,12 +1243,6 @@ void evaluate_paths(struct bgp_nexthop_cache *bnc)
bgp_evpn_is_prefix_nht_supported(bgp_dest_get_prefix(dest)))
bgp_evpn_unimport_route(bgp_path,
afi, safi, bgp_dest_get_prefix(dest), path);
if (safi == SAFI_UNICAST &&
(bgp_path->inst_type !=
BGP_INSTANCE_TYPE_VIEW))
vpn_leak_from_vrf_withdraw(
bgp_get_default(), bgp_path,
path);
} else {
/* Path becomes valid, set flag; also for EVPN
* routes, import from VRFs if needed.
@ -1294,12 +1255,6 @@ void evaluate_paths(struct bgp_nexthop_cache *bnc)
bgp_evpn_is_prefix_nht_supported(bgp_dest_get_prefix(dest)))
bgp_evpn_import_route(bgp_path,
afi, safi, bgp_dest_get_prefix(dest), path);
if (safi == SAFI_UNICAST &&
(bgp_path->inst_type !=
BGP_INSTANCE_TYPE_VIEW))
vpn_leak_from_vrf_update(
bgp_get_default(), bgp_path,
path);
}
}

View File

@ -8676,16 +8676,12 @@ void bgp_redistribute_add(struct bgp *bgp, struct prefix *p,
*/
assert(attr.aspath);
if (p->family == AF_INET6)
UNSET_FLAG(attr.flag, ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP));
switch (nhtype) {
case NEXTHOP_TYPE_IFINDEX:
switch (p->family) {
case AF_INET:
attr.nexthop.s_addr = INADDR_ANY;
attr.mp_nexthop_len = BGP_ATTR_NHLEN_IPV4;
attr.mp_nexthop_global_in.s_addr = INADDR_ANY;
break;
case AF_INET6:
memset(&attr.mp_nexthop_global, 0,
@ -8698,7 +8694,6 @@ void bgp_redistribute_add(struct bgp *bgp, struct prefix *p,
case NEXTHOP_TYPE_IPV4_IFINDEX:
attr.nexthop = nexthop->ipv4;
attr.mp_nexthop_len = BGP_ATTR_NHLEN_IPV4;
attr.mp_nexthop_global_in = nexthop->ipv4;
break;
case NEXTHOP_TYPE_IPV6:
case NEXTHOP_TYPE_IPV6_IFINDEX:
@ -8710,7 +8705,6 @@ void bgp_redistribute_add(struct bgp *bgp, struct prefix *p,
case AF_INET:
attr.nexthop.s_addr = INADDR_ANY;
attr.mp_nexthop_len = BGP_ATTR_NHLEN_IPV4;
attr.mp_nexthop_global_in.s_addr = INADDR_ANY;
break;
case AF_INET6:
memset(&attr.mp_nexthop_global, 0,

View File

@ -319,12 +319,6 @@ struct bgp_path_info {
/* Addpath identifiers */
uint32_t addpath_rx_id;
struct bgp_addpath_info_data tx_addpath;
/* For nexthop per label linked list */
LIST_ENTRY(bgp_path_info) label_nh_thread;
/* Back pointer to the bgp label per nexthop structure */
struct bgp_label_per_nexthop_cache *label_nexthop_cache;
};
/* Structure used in BGP path selection */

View File

@ -9183,63 +9183,6 @@ ALIAS (af_rd_vpn_export,
"Between current address-family and vpn\n"
"For routes leaked from current address-family to vpn\n")
DEFPY(af_label_vpn_export_allocation_mode,
af_label_vpn_export_allocation_mode_cmd,
"[no$no] label vpn export allocation-mode <per-vrf$label_per_vrf|per-nexthop$label_per_nh>",
NO_STR
"label value for VRF\n"
"Between current address-family and vpn\n"
"For routes leaked from current address-family to vpn\n"
"Label allocation mode\n"
"Allocate one label for all BGP updates of the VRF\n"
"Allocate a label per connected next-hop in the VRF\n")
{
VTY_DECLVAR_CONTEXT(bgp, bgp);
afi_t afi;
bool old_per_nexthop, new_per_nexthop;
afi = vpn_policy_getafi(vty, bgp, false);
old_per_nexthop = !!CHECK_FLAG(bgp->vpn_policy[afi].flags,
BGP_VPN_POLICY_TOVPN_LABEL_PER_NEXTHOP);
if (no) {
if (old_per_nexthop == false && label_per_nh)
return CMD_ERR_NO_MATCH;
if (old_per_nexthop == true && label_per_vrf)
return CMD_ERR_NO_MATCH;
new_per_nexthop = false;
} else {
if (label_per_nh)
new_per_nexthop = true;
else
new_per_nexthop = false;
}
/* no change */
if (old_per_nexthop == new_per_nexthop)
return CMD_SUCCESS;
/*
* pre-change: un-export vpn routes (vpn->vrf routes unaffected)
*/
vpn_leak_prechange(BGP_VPN_POLICY_DIR_TOVPN, afi, bgp_get_default(),
bgp);
if (new_per_nexthop)
SET_FLAG(bgp->vpn_policy[afi].flags,
BGP_VPN_POLICY_TOVPN_LABEL_PER_NEXTHOP);
else
UNSET_FLAG(bgp->vpn_policy[afi].flags,
BGP_VPN_POLICY_TOVPN_LABEL_PER_NEXTHOP);
/* post-change: re-export vpn routes */
vpn_leak_postchange(BGP_VPN_POLICY_DIR_TOVPN, afi, bgp_get_default(),
bgp);
hook_call(bgp_snmp_update_last_changed, bgp);
return CMD_SUCCESS;
}
DEFPY (af_label_vpn_export,
af_label_vpn_export_cmd,
"[no] label vpn export <(0-1048575)$label_val|auto$label_auto>",
@ -17357,12 +17300,6 @@ static void bgp_vpn_policy_config_write_afi(struct vty *vty, struct bgp *bgp,
}
}
if (CHECK_FLAG(bgp->vpn_policy[afi].flags,
BGP_VPN_POLICY_TOVPN_LABEL_PER_NEXTHOP))
vty_out(vty,
"%*slabel vpn export allocation-mode per-nexthop\n",
indent, "");
tovpn_sid_index = bgp->vpn_policy[afi].tovpn_sid_index;
if (CHECK_FLAG(bgp->vpn_policy[afi].flags,
BGP_VPN_POLICY_TOVPN_SID_AUTO)) {
@ -20536,10 +20473,6 @@ void bgp_vty_init(void)
install_element(BGP_IPV6_NODE, &af_rd_vpn_export_cmd);
install_element(BGP_IPV4_NODE, &af_label_vpn_export_cmd);
install_element(BGP_IPV6_NODE, &af_label_vpn_export_cmd);
install_element(BGP_IPV4_NODE,
&af_label_vpn_export_allocation_mode_cmd);
install_element(BGP_IPV6_NODE,
&af_label_vpn_export_allocation_mode_cmd);
install_element(BGP_IPV4_NODE, &af_nexthop_vpn_export_cmd);
install_element(BGP_IPV6_NODE, &af_nexthop_vpn_export_cmd);
install_element(BGP_IPV4_NODE, &af_rt_vpn_imexport_cmd);

View File

@ -3911,32 +3911,3 @@ int bgp_zebra_srv6_manager_release_locator_chunk(const char *name)
{
return srv6_manager_release_locator_chunk(zclient, name);
}
void bgp_zebra_send_nexthop_label(int cmd, mpls_label_t label,
ifindex_t ifindex, vrf_id_t vrf_id,
enum lsp_types_t ltype, struct prefix *p)
{
struct zapi_labels zl = {};
struct zapi_nexthop *znh;
zl.type = ltype;
zl.local_label = label;
zl.nexthop_num = 1;
znh = &zl.nexthops[0];
if (p->family == AF_INET)
IPV4_ADDR_COPY(&znh->gate.ipv4, &p->u.prefix4);
else
IPV6_ADDR_COPY(&znh->gate.ipv6, &p->u.prefix6);
if (ifindex == IFINDEX_INTERNAL)
znh->type = (p->family == AF_INET) ? NEXTHOP_TYPE_IPV4
: NEXTHOP_TYPE_IPV6;
else
znh->type = (p->family == AF_INET) ? NEXTHOP_TYPE_IPV4_IFINDEX
: NEXTHOP_TYPE_IPV6_IFINDEX;
znh->ifindex = ifindex;
znh->vrf_id = vrf_id;
znh->label_num = 0;
/* vrf_id is DEFAULT_VRF */
zebra_send_mpls_labels(zclient, cmd, &zl);
}

View File

@ -118,8 +118,4 @@ extern int bgp_zebra_update(struct bgp *bgp, afi_t afi, safi_t safi,
extern int bgp_zebra_stale_timer_update(struct bgp *bgp);
extern int bgp_zebra_srv6_manager_get_locator_chunk(const char *name);
extern int bgp_zebra_srv6_manager_release_locator_chunk(const char *name);
extern void bgp_zebra_send_nexthop_label(int cmd, mpls_label_t label,
ifindex_t index, vrf_id_t vrfid,
enum lsp_types_t ltype,
struct prefix *p);
#endif /* _QUAGGA_BGP_ZEBRA_H */

View File

@ -3354,11 +3354,6 @@ static struct bgp *bgp_create(as_t *as, const char *name,
SET_FLAG(bgp->af_flags[afi][SAFI_MPLS_VPN],
BGP_VPNVX_RETAIN_ROUTE_TARGET_ALL);
}
for (afi = AFI_IP; afi < AFI_MAX; afi++)
bgp_label_per_nexthop_cache_init(
&bgp->mpls_labels_per_nexthop[afi]);
if (name)
bgp->name = XSTRDUP(MTYPE_BGP, name);
@ -8256,8 +8251,6 @@ void bgp_init(unsigned short instance)
bgp_lp_vty_init();
bgp_label_per_nexthop_init();
cmd_variable_handler_register(bgp_viewvrf_var_handlers);
}

View File

@ -211,7 +211,6 @@ struct vpn_policy {
#define BGP_VPN_POLICY_TOVPN_RD_SET (1 << 1)
#define BGP_VPN_POLICY_TOVPN_NEXTHOP_SET (1 << 2)
#define BGP_VPN_POLICY_TOVPN_SID_AUTO (1 << 3)
#define BGP_VPN_POLICY_TOVPN_LABEL_PER_NEXTHOP (1 << 4)
/*
* If we are importing another vrf into us keep a list of
@ -574,10 +573,6 @@ struct bgp {
/* Allocate MPLS labels */
uint8_t allocate_mpls_labels[AFI_MAX][SAFI_MAX];
/* Tree for next-hop lookup cache. */
struct bgp_label_per_nexthop_cache_head
mpls_labels_per_nexthop[AFI_MAX];
/* Allocate hash entries to store policy routing information
* The hash are used to host pbr rules somewhere.
* Actually, pbr will only be used by flowspec

View File

@ -2890,13 +2890,6 @@ address-family:
extended community values as described in
:ref:`bgp-extended-communities-attribute`.
.. clicmd:: label vpn export allocation-mode per-vrf|per-nexthop
Select how labels are allocated in the given VRF. By default, the `per-vrf`
mode is selected, and one label is used for all prefixes from the VRF. The
`per-nexthop` will use a unique label for all prefixes that are reachable
via the same nexthop.
.. clicmd:: label vpn export (0..1048575)|auto
Enables an MPLS label to be attached to a route exported from the current

View File

@ -1,143 +0,0 @@
{
"vrfName": "vrf1",
"localAS": 65500,
"routes":
{
"10.200.0.0/24": [
{
"valid": true,
"bestpath": true,
"prefix": "10.200.0.0",
"prefixLen": 24,
"network": "10.200.0.0\/24",
"nexthops": [
{
"ip": "192.168.0.2",
"afi": "ipv4",
"used": true
}
]
}
],
"172.31.0.11/32": [
{
"valid":true,
"bestpath":true,
"prefix":"172.31.0.11",
"prefixLen":32,
"network":"172.31.0.11/32",
"peerId":"192.0.2.100",
"nexthops":[
{
"ip":"192.0.2.11",
"afi":"ipv4",
"used":true
}
]
}
],
"172.31.0.12/32": [
{
"valid":true,
"bestpath":true,
"prefix":"172.31.0.12",
"prefixLen":32,
"network":"172.31.0.12/32",
"peerId":"192.0.2.100",
"nexthops":[
{
"ip":"192.0.2.12",
"afi":"ipv4",
"used":true
}
]
}
],
"172.31.0.13/32": [
{
"valid":true,
"bestpath":true,
"prefix":"172.31.0.13",
"prefixLen":32,
"network":"172.31.0.13/32",
"peerId":"192.168.255.13",
"nexthops":[
{
"ip":"192.168.255.13",
"afi":"ipv4",
"used":true
}
]
}
],
"172.31.0.14/32": [
{
"valid":true,
"bestpath":true,
"prefix":"172.31.0.14",
"prefixLen":32,
"network":"172.31.0.14/32",
"peerId":"(unspec)",
"nexthops":[
{
"ip":"192.0.2.14",
"afi":"ipv4",
"used":true
}
]
}
],
"172.31.0.15/32": [
{
"valid":true,
"bestpath":true,
"prefix":"172.31.0.15",
"prefixLen":32,
"network":"172.31.0.15/32",
"peerId":"(unspec)",
"nexthops":[
{
"ip":"192.0.2.12",
"afi":"ipv4",
"used":true
}
]
}
],
"172.31.0.20/32": [
{
"valid":true,
"bestpath":true,
"prefix":"172.31.0.20",
"prefixLen":32,
"network":"172.31.0.20/32",
"peerId":"192.0.2.100",
"nexthops":[
{
"ip":"192.0.2.11",
"afi":"ipv4",
"used":true
}
]
}
],
"172.31.0.111/32": [
{
"valid":true,
"bestpath":true,
"prefix":"172.31.0.111",
"prefixLen":32,
"network":"172.31.0.111/32",
"peerId":"192.0.2.100",
"nexthops":[
{
"ip":"192.0.2.11",
"afi":"ipv4",
"used":true
}
]
}
]
}
}

View File

@ -1,30 +0,0 @@
router bgp 65500
bgp router-id 192.168.0.1
no bgp ebgp-requires-policy
neighbor 192.168.0.2 remote-as 65501
address-family ipv4 unicast
no neighbor 192.168.0.2 activate
exit-address-family
address-family ipv4 vpn
neighbor 192.168.0.2 activate
neighbor 192.168.0.2 soft-reconfiguration inbound
exit-address-family
!
router bgp 65500 vrf vrf1
bgp router-id 192.168.0.1
neighbor 192.0.2.100 remote-as 65500
neighbor 192.168.255.13 remote-as 65500
address-family ipv4 unicast
redistribute connected
redistribute static
label vpn export allocation-mode per-nexthop
label vpn export auto
rd vpn export 444:1
rt vpn both 52:100
export vpn
import vpn
exit-address-family
!
interface r1-eth0
mpls bgp forwarding
!

View File

@ -1,50 +0,0 @@
{
"10.200.0.0/24": [
{
"prefix": "10.200.0.0/24",
"prefixLen": 24,
"protocol": "bgp",
"vrfName": "vrf1",
"selected": true,
"destSelected": true,
"distance": 20,
"metric": 0,
"nexthops": [
{
"flags": 3,
"fib": true,
"ip": "10.125.0.2",
"afi": "ipv4",
"interfaceName": "r1-eth0",
"vrf": "default",
"active": true,
"labels":[
102
]
}
]
}
],
"10.201.0.0/24": [
{
"prefix": "10.201.0.0/24",
"prefixLen": 24,
"protocol": "connected",
"vrfName": "vrf1",
"selected": true,
"destSelected": true,
"distance": 0,
"metric": 0,
"installed": true,
"nexthops":[
{
"flags": 3,
"fib": true,
"directlyConnected": true,
"interfaceName": "r1-eth1",
"active": true
}
]
}
]
}

View File

@ -1,18 +0,0 @@
log stdout
debug zebra nht
!debug zebra kernel msgdump recv
!debug zebra dplane detailed
!debug zebra packet recv
interface r1-eth1 vrf vrf1
ip address 192.0.2.1/24
!
interface r1-eth2 vrf vrf1
ip address 192.168.255.1/24
!
interface r1-eth0
ip address 192.168.0.1/24
!
vrf vrf1
ip route 172.31.0.14/32 192.0.2.14
ip route 172.31.0.15/32 192.0.2.12
exit-vrf

View File

@ -1,11 +0,0 @@
router bgp 65500
bgp router-id 192.0.2.11
no bgp network import-check
neighbor 192.0.2.100 remote-as 65500
address-family ipv4 unicast
network 172.31.0.11/32
network 172.31.0.111/32
network 172.31.0.20/32
exit-address-family
!

View File

@ -1,4 +0,0 @@
log stdout
interface r11-eth0
ip address 192.0.2.11/24
!

View File

@ -1,9 +0,0 @@
router bgp 65500
bgp router-id 192.0.2.12
no bgp network import-check
neighbor 192.0.2.100 remote-as 65500
address-family ipv4 unicast
network 172.31.0.12/32
exit-address-family
!

View File

@ -1,4 +0,0 @@
log stdout
interface r12-eth0
ip address 192.0.2.12/24
!

View File

@ -1,9 +0,0 @@
router bgp 65500
bgp router-id 192.168.255.13
no bgp network import-check
address-family ipv4 unicast
neighbor 192.168.255.1 remote-as 65500
network 172.31.0.13/32
exit-address-family
!

View File

@ -1,4 +0,0 @@
log stdout
interface r13-eth0
ip address 192.168.255.13/24
!

View File

@ -1,38 +0,0 @@
{
"vrfName": "vrf1",
"localAS": 65501,
"routes":
{
"10.201.0.0/24": [
{
"prefix": "10.201.0.0",
"prefixLen": 24,
"network": "10.201.0.0\/24",
"nhVrfName": "default",
"nexthops": [
{
"ip": "192.168.0.1",
"afi": "ipv4",
"used": true
}
]
}
],
"10.200.0.0/24": [
{
"valid": true,
"bestpath": true,
"prefix": "10.200.0.0",
"prefixLen": 24,
"network": "10.200.0.0\/24",
"nexthops": [
{
"ip": "0.0.0.0",
"afi": "ipv4",
"used": true
}
]
}
]
}
}

View File

@ -1,187 +0,0 @@
{
"vrfName": "default",
"localAS": 65501,
"routes":
{
"routeDistinguishers":
{
"444:1":
{
"172.31.0.11/32": [
{
"valid": true,
"bestpath": true,
"prefix": "172.31.0.11",
"prefixLen": 32,
"network": "172.31.0.11\/32",
"peerId": "192.168.0.1",
"nexthops": [
{
"ip": "192.168.0.1",
"afi": "ipv4",
"used": true
}
]
}
],
"172.31.0.12/32": [
{
"valid": true,
"bestpath": true,
"prefix": "172.31.0.12",
"prefixLen": 32,
"network": "172.31.0.12\/32",
"peerId": "192.168.0.1",
"nexthops": [
{
"ip": "192.168.0.1",
"afi": "ipv4",
"used": true
}
]
}
],
"172.31.0.13/32": [
{
"valid": true,
"bestpath": true,
"prefix": "172.31.0.13",
"prefixLen": 32,
"network": "172.31.0.13\/32",
"peerId": "192.168.0.1",
"nexthops": [
{
"ip": "192.168.0.1",
"afi": "ipv4",
"used": true
}
]
}
],
"172.31.0.14/32": [
{
"valid": true,
"bestpath": true,
"prefix": "172.31.0.14",
"prefixLen": 32,
"network": "172.31.0.14\/32",
"peerId": "192.168.0.1",
"nexthops": [
{
"ip": "192.168.0.1",
"afi": "ipv4",
"used": true
}
]
}
],
"172.31.0.15/32": [
{
"valid": true,
"bestpath": true,
"prefix": "172.31.0.15",
"prefixLen": 32,
"network": "172.31.0.15\/32",
"peerId": "192.168.0.1",
"nexthops": [
{
"ip": "192.168.0.1",
"afi": "ipv4",
"used": true
}
]
}
],
"172.31.0.20/32": [
{
"valid": true,
"bestpath": true,
"prefix": "172.31.0.20",
"prefixLen": 32,
"network": "172.31.0.20\/32",
"peerId": "192.168.0.1",
"nexthops": [
{
"ip": "192.168.0.1",
"afi": "ipv4",
"used": true
}
]
}
],
"172.31.0.111/32": [
{
"valid": true,
"bestpath": true,
"prefix": "172.31.0.111",
"prefixLen": 32,
"network": "172.31.0.111\/32",
"peerId": "192.168.0.1",
"nexthops": [
{
"ip": "192.168.0.1",
"afi": "ipv4",
"used": true
}
]
}
],
"192.0.2.0/24": [
{
"valid": true,
"bestpath": true,
"prefix": "192.0.2.0",
"prefixLen": 24,
"network": "192.0.2.0\/24",
"peerId": "192.168.0.1",
"nexthops": [
{
"ip": "192.168.0.1",
"afi": "ipv4",
"used": true
}
]
}
],
"192.168.255.0/24": [
{
"valid": true,
"bestpath": true,
"prefix": "192.168.255.0",
"prefixLen": 24,
"network": "192.168.255.0\/24",
"peerId": "192.168.0.1",
"nexthops": [
{
"ip": "192.168.0.1",
"afi": "ipv4",
"used": true
}
]
}
]
},
"444:2":
{
"10.200.0.0/24": [
{
"valid": true,
"bestpath": true,
"prefix": "10.200.0.0",
"prefixLen": 24,
"network": "10.200.0.0\/24",
"peerId": "(unspec)",
"nhVrfName": "vrf1",
"nexthops": [
{
"ip": "0.0.0.0",
"afi": "ipv4",
"used": true
}
]
}
]
}
}
}
}

View File

@ -1,25 +0,0 @@
router bgp 65501
bgp router-id 192.168.0.2
no bgp ebgp-requires-policy
neighbor 192.168.0.1 remote-as 65500
address-family ipv4 unicast
no neighbor 192.168.0.1 activate
exit-address-family
address-family ipv4 vpn
neighbor 192.168.0.1 activate
exit-address-family
!
router bgp 65501 vrf vrf1
bgp router-id 192.168.0.2
address-family ipv4 unicast
redistribute connected
label vpn export 102
rd vpn export 444:2
rt vpn both 52:100
export vpn
import vpn
exit-address-family
!
interface r2-eth0
mpls bgp forwarding
!

View File

@ -1,7 +0,0 @@
log stdout
interface r2-eth1 vrf vrf1
ip address 10.200.0.2/24
!
interface r2-eth0
ip address 192.168.0.2/24
!

View File

@ -1,13 +0,0 @@
router bgp 65500
bgp router-id 100.100.100.100
no bgp network import-check
neighbor 192.0.2.1 remote-as 65500
neighbor 192.0.2.11 remote-as 65500
neighbor 192.0.2.12 remote-as 65500
address-family ipv4 unicast
neighbor 192.0.2.1 route-reflector-client
neighbor 192.0.2.11 route-reflector-client
neighbor 192.0.2.12 route-reflector-client
exit-address-family
!

View File

@ -1,4 +0,0 @@
log stdout
interface rr-eth0
ip address 192.0.2.100/24
!

View File

@ -1,795 +0,0 @@
#!/usr/bin/env python
# SPDX-License-Identifier: ISC
#
# test_bgp_vpnv4_per_nexthop_label.py
#
# Copyright 2023 6WIND S.A.
#
"""
test_bgp_vpnv4_per_nexthop_label.py: Test the FRR BGP daemon using EBGP peering
Let us exchange VPNv4 updates between both devices
Updates from r1 will originate from the same RD, but will have separate
label values.
+----------+
| r11 |
|192.0.2.11+---+
| | | +----+--------+ +----------+
+----------+ | 192.0.2.1 |vrf | r1 |192.168.0.0/24| r2 |
+-------------------+ | 1+--------------+ |
+----------+ | |VRF1|AS65500 | | AS65501 |
| r12 | | +-------------+ | VPNV4| |VPNV4 |
|192.0.2.12+---+ |192.168.255.1+-+--+--------+ +----------+
| | |
+----------+ |
|
+----------+ |
| r13 | |
|192.168. +---------+
| 255.13 |
+----------+
"""
import os
import sys
import json
from functools import partial
import pytest
import functools
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
# Import topogen and topotest helpers
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
pytestmark = [pytest.mark.bgpd]
PREFIXES_R11 = ["172.31.0.11/32", "172.31.0.20/32", "172.31.0.111/32"]
PREFIXES_R12 = ["172.31.0.12/32", "172.31.0.15/32"]
PREFIXES_R13 = ["172.31.0.13/32"]
PREFIXES_REDIST = ["172.31.0.14/32"]
PREFIXES_CONNECTED = ["192.168.255.0/24", "192.0.2.0/24"]
def build_topo(tgen):
"Build function"
# Create 2 routers.
tgen.add_router("r1")
tgen.add_router("r2")
tgen.add_router("r11")
tgen.add_router("r12")
tgen.add_router("r13")
tgen.add_router("r14")
tgen.add_router("rr")
switch = tgen.add_switch("s1")
switch.add_link(tgen.gears["r1"])
switch.add_link(tgen.gears["r2"])
switch = tgen.add_switch("s2")
switch.add_link(tgen.gears["r1"])
switch.add_link(tgen.gears["r11"])
switch.add_link(tgen.gears["r12"])
switch.add_link(tgen.gears["rr"])
switch = tgen.add_switch("s3")
switch.add_link(tgen.gears["r2"])
switch = tgen.add_switch("s4")
switch.add_link(tgen.gears["r1"])
switch.add_link(tgen.gears["r13"])
switch = tgen.add_switch("s5")
switch.add_link(tgen.gears["r1"])
switch.add_link(tgen.gears["r14"])
def _populate_iface():
tgen = get_topogen()
cmds_list = [
"ip link add vrf1 type vrf table 10",
"echo 100000 > /proc/sys/net/mpls/platform_labels",
"ip link set dev vrf1 up",
"ip link set dev {0}-eth1 master vrf1",
"echo 1 > /proc/sys/net/mpls/conf/{0}-eth0/input",
]
cmds_list_plus = [
"ip link set dev {0}-eth2 master vrf1",
]
for cmd in cmds_list:
input = cmd.format("r1")
logger.info("input: " + cmd)
output = tgen.net["r1"].cmd(cmd.format("r1"))
logger.info("output: " + output)
for cmd in cmds_list_plus:
input = cmd.format("r1")
logger.info("input: " + cmd)
output = tgen.net["r1"].cmd(cmd.format("r1"))
logger.info("output: " + output)
for cmd in cmds_list:
input = cmd.format("r2")
logger.info("input: " + cmd)
output = tgen.net["r2"].cmd(cmd.format("r2"))
logger.info("output: " + output)
def setup_module(mod):
"Sets up the pytest environment"
tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
_populate_iface()
for rname, router in router_list.items():
router.load_config(
TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
router.load_config(
TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
)
# Initialize all routers.
tgen.start_router()
def teardown_module(_mod):
"Teardown the pytest environment"
tgen = get_topogen()
tgen.stop_topology()
def bgp_vpnv4_table_check(router, group, label_list=None, label_value_expected=None):
"""
Dump and check that vpnv4 entries have the same MPLS label value
* 'router': the router to check
* 'group': the list of prefixes to check. a single label value for the group has to be found
* 'label_list': check that the label values are not present in the vpnv4 entries
* that list is updated with the present label value
* 'label_value_expected': check that the mpls label read is the same as that value
"""
stored_label_inited = False
for prefix in group:
dump = router.vtysh_cmd("show bgp ipv4 vpn {} json".format(prefix), isjson=True)
assert dump, "{0}, {1}, route distinguisher not present".format(
router.name, prefix
)
for rd, pathes in dump.items():
for path in pathes["paths"]:
assert (
"remoteLabel" in path.keys()
), "{0}, {1}, remoteLabel not present".format(router.name, prefix)
logger.info(
"{0}, {1}, label value is {2}".format(
router.name, prefix, path["remoteLabel"]
)
)
if stored_label_inited:
assert (
path["remoteLabel"] == stored_label
), "{0}, {1}, label value not expected one (expected {2}, observed {3}".format(
router.name, prefix, stored_label, path["remoteLabel"]
)
else:
stored_label = path["remoteLabel"]
stored_label_inited = True
if label_list is not None:
assert (
stored_label not in label_list
), "{0}, {1}, label already detected in a previous prefix".format(
router.name, prefix
)
label_list.add(stored_label)
if label_value_expected:
assert (
path["remoteLabel"] == label_value_expected
), "{0}, {1}, label value not expected (expected {2}, observed {3}".format(
router.name, prefix, label_value_expected, path["remoteLabel"]
)
def bgp_vpnv4_table_check_all(router, label_list=None, same=False):
"""
Dump and check that vpnv4 entries are correctly configured with specific label values
* 'router': the router to check
* 'label_list': check that the label values are not present in the vpnv4 entries
* that list is updated with the present label value found.
* 'same': by default, set to False. Addresses groups are classified by addresses.
* if set to True, all entries of all groups should have a unique label value
"""
if same:
bgp_vpnv4_table_check(
router,
group=PREFIXES_R11
+ PREFIXES_R12
+ PREFIXES_R13
+ PREFIXES_REDIST
+ PREFIXES_CONNECTED,
label_list=label_list,
)
else:
for group in (
PREFIXES_R11,
PREFIXES_R12,
PREFIXES_R13,
PREFIXES_REDIST,
PREFIXES_CONNECTED,
):
bgp_vpnv4_table_check(router, group=group, label_list=label_list)
def mpls_table_check(router, blacklist=None, label_list=None, whitelist=None):
"""
Dump and check 'show mpls table json' output. An assert is triggered in case test fails
* 'router': the router to check
* 'blacklist': the list of nexthops (IP or interface) that should not be on output
* 'label_list': the list of labels that should be in inLabel value
* 'whitelist': the list of nexthops (IP or interface) that should be on output
"""
nexthop_list = []
if blacklist:
nexthop_list.append(blacklist)
logger.info("Checking MPLS labels on {}".format(router.name))
dump = router.vtysh_cmd("show mpls table json", isjson=True)
for in_label, label_info in dump.items():
if label_list is not None:
label_list.add(in_label)
for nh in label_info["nexthops"]:
assert (
nh["installed"] == True and nh["type"] == "BGP"
), "{}, show mpls table, nexthop is not installed".format(router.name)
if "nexthop" in nh.keys():
assert (
nh["nexthop"] not in nexthop_list
), "{}, show mpls table, duplicated or blacklisted nexthop address".format(
router.name
)
nexthop_list.append(nh["nexthop"])
elif "interface" in nh.keys():
assert (
nh["interface"] not in nexthop_list
), "{}, show mpls table, duplicated or blacklisted nexthop interface".format(
router.name
)
nexthop_list.append(nh["interface"])
else:
assert (
0
), "{}, show mpls table, entry with neither nexthop nor interface".format(
router.name
)
if whitelist:
for entry in whitelist:
assert (
entry in nexthop_list
), "{}, show mpls table, entry with nexthop {} not present in nexthop list".format(
router.name, entry
)
def check_show_bgp_vpn_prefix_not_found(router, ipversion, prefix, rd, label=None):
output = json.loads(
router.vtysh_cmd("show bgp {} vpn {} json".format(ipversion, prefix))
)
if label:
expected = {rd: {"prefix": prefix, "paths": [{"remoteLabel": label}]}}
else:
expected = {rd: {"prefix": prefix}}
ret = topotest.json_cmp(output, expected)
if ret is None:
return "not good"
return None
def check_show_bgp_vpn_prefix_found(router, ipversion, prefix, rd):
output = json.loads(
router.vtysh_cmd("show bgp {} vpn {} json".format(ipversion, prefix))
)
expected = {rd: {"prefix": prefix}}
return topotest.json_cmp(output, expected)
def check_show_mpls_table_entry_label_found(router, inlabel, interface):
output = json.loads(router.vtysh_cmd("show mpls table {} json".format(inlabel)))
expected = {
"inLabel": inlabel,
"installed": True,
"nexthops": [{"interface": interface}],
}
return topotest.json_cmp(output, expected)
def check_show_mpls_table_entry_label_not_found(router, inlabel):
output = json.loads(router.vtysh_cmd("show mpls table {} json".format(inlabel)))
expected = {"inlabel": inlabel, "installed": True}
ret = topotest.json_cmp(output, expected)
if ret is None:
return "not good"
return None
def mpls_entry_get_interface(router, label):
"""
Assert that the label is in MPLS table
Assert an outgoing interface is programmed
return the outgoing interface
"""
outgoing_interface = None
logger.info("Checking MPLS labels on {}".format(router.name))
dump = router.vtysh_cmd("show mpls table {} json".format(label), isjson=True)
assert dump, "{0}, label {1} not present".format(router.name, label)
for nh in dump["nexthops"]:
assert (
"interface" in nh.keys()
), "{}, show mpls table, nexthop interface not present for MPLS entry {}".format(
router.name, label
)
outgoing_interface = nh["interface"]
return outgoing_interface
def test_protocols_convergence():
"""
Assert that all protocols have converged
statuses as they depend on it.
"""
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
# Check BGP IPv4 routing tables on VRF1 of r1
logger.info("Checking BGP IPv4 routes for convergence on r1 VRF1")
router = tgen.gears["r1"]
json_file = "{}/{}/bgp_ipv4_routes_vrf1.json".format(CWD, router.name)
expected = json.loads(open(json_file).read())
test_func = partial(
topotest.router_json_cmp,
router,
"show bgp vrf vrf1 ipv4 json",
expected,
)
_, result = topotest.run_and_expect(test_func, None, count=20, wait=0.5)
assertmsg = '"{}" JSON output mismatches'.format(router.name)
assert result is None, assertmsg
logger.info("Checking BGP VPNv4 routes for convergence on r2")
router = tgen.gears["r2"]
json_file = "{}/{}/bgp_vpnv4_routes.json".format(CWD, router.name)
expected = json.loads(open(json_file).read())
test_func = partial(
topotest.router_json_cmp,
router,
"show bgp ipv4 vpn json",
expected,
)
_, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
assertmsg = '"{}" JSON output mismatches'.format(router.name)
assert result is None, assertmsg
# Check BGP labels received on r2
logger.info("Checking BGP VPNv4 labels on r2")
label_list = set()
bgp_vpnv4_table_check_all(tgen.gears["r2"], label_list)
# Check MPLS labels received on r1
mpls_table_check(tgen.gears["r1"], label_list)
def test_flapping_bgp_vrf_down():
"""
Turn down a remote BGP session
"""
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
logger.info("Unpeering BGP on r11")
tgen.gears["r11"].vtysh_cmd(
"configure terminal\nrouter bgp 65500\nno neighbor 192.0.2.100\n",
isjson=False,
)
def _bgp_prefix_not_found(router, vrf, ipversion, prefix):
output = json.loads(
router.vtysh_cmd(
"show bgp vrf {} {} {} json".format(vrf, ipversion, prefix)
)
)
expected = {"prefix": prefix}
ret = topotest.json_cmp(output, expected)
if ret is None:
return "not good"
return None
# Check prefix from r11 is not present
test_func = functools.partial(
_bgp_prefix_not_found, tgen.gears["r1"], "vrf1", "ipv4", "172.31.0.11/32"
)
success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
assert (
success
), "r1, prefix 172.31.0.11/32 from r11 did not disappear. r11 still connected to rr ?"
# Check BGP updated received on r2 are not from r11
logger.info("Checking BGP VPNv4 labels on r2")
for entry in PREFIXES_R11:
dump = tgen.gears["r2"].vtysh_cmd(
"show bgp ipv4 vpn {} json".format(entry), isjson=True
)
for rd in dump:
assert False, "r2, {}, route distinguisher {} present".format(entry, rd)
mpls_table_check(tgen.gears["r1"], blacklist=["192.0.2.11"])
def test_flapping_bgp_vrf_up():
"""
Turn up a remote BGP session
"""
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
logger.info("Peering BGP on r11")
tgen.gears["r11"].vtysh_cmd(
"configure terminal\nrouter bgp 65500\nneighbor 192.0.2.100 remote-as 65500\n",
isjson=False,
)
# Check r2 gets prefix 172.31.0.11/128
test_func = functools.partial(
check_show_bgp_vpn_prefix_found,
tgen.gears["r2"],
"ipv4",
"172.31.0.11/32",
"444:1",
)
success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
assert (
success
), "r2, prefix 172.31.0.11/32 from r11 not present. r11 still disconnected from rr ?"
bgp_vpnv4_table_check_all(tgen.gears["r2"])
def test_recursive_route():
"""
Test static recursive route redistributed over BGP
"""
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
logger.info("Enabling recursive static route")
tgen.gears["r1"].vtysh_cmd(
"configure terminal\nvrf vrf1\nip route 172.31.0.30/32 172.31.0.20\n",
isjson=False,
)
logger.info("Checking BGP VPNv4 labels on r2")
# Check r2 received vpnv4 update with 172.31.0.30
test_func = functools.partial(
check_show_bgp_vpn_prefix_found,
tgen.gears["r2"],
"ipv4",
"172.31.0.30/32",
"444:1",
)
success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
assert success, "r2, vpnv4 update 172.31.0.30 not found"
bgp_vpnv4_table_check(tgen.gears["r2"], group=PREFIXES_R11 + ["172.31.0.30/32"])
# diagnostic
logger.info("Dumping label nexthop table")
tgen.gears["r1"].vtysh_cmd("show bgp vrf vrf1 label-nexthop detail", isjson=False)
logger.info("Dumping nexthop table")
tgen.gears["r1"].vtysh_cmd("show bgp vrf vrf1 nexthop detail", isjson=False)
logger.info("Disabling recursive static route")
tgen.gears["r1"].vtysh_cmd(
"configure terminal\nvrf vrf1\nno ip route 172.31.0.30/32 172.31.0.20\n",
isjson=False,
)
logger.info("Checking BGP VPNv4 labels on r2")
# Check r2 removed 172.31.0.30 vpnv4 update
test_func = functools.partial(
check_show_bgp_vpn_prefix_not_found,
tgen.gears["r2"],
"ipv4",
"172.31.0.30/32",
"444:1",
)
success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
assert success, "r2, vpnv4 update 172.31.0.30 still present"
def test_prefix_changes_interface():
"""
Test BGP update for a given prefix learnt on different interface
"""
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
logger.info("Enabling a 172.31.0.50/32 prefix for r11")
tgen.gears["r11"].vtysh_cmd(
"configure terminal\nrouter bgp\naddress-family ipv4 unicast\nnetwork 172.31.0.50/32",
isjson=False,
)
# Check r2 received vpnv4 update with 172.31.0.50
test_func = functools.partial(
check_show_bgp_vpn_prefix_found,
tgen.gears["r2"],
"ipv4",
"172.31.0.50/32",
"444:1",
)
success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
assert success, "r2, vpnv4 update 172.31.0.50 not found"
# diagnostic
logger.info("Dumping label nexthop table")
tgen.gears["r1"].vtysh_cmd("show bgp vrf vrf1 label-nexthop detail", isjson=False)
label_list = set()
bgp_vpnv4_table_check(
tgen.gears["r2"],
group=["172.31.0.11/32", "172.31.0.111/32", "172.31.0.50/32"],
label_list=label_list,
)
assert (
len(label_list) == 1
), "Multiple Label values found for updates from r11 found"
oldlabel = label_list.pop()
logger.info("r1, getting the outgoing interface used by label {}".format(oldlabel))
old_outgoing_interface = mpls_entry_get_interface(tgen.gears["r1"], oldlabel)
logger.info(
"r1, outgoing interface used by label {} is {}".format(
oldlabel, old_outgoing_interface
)
)
logger.info("Moving the 172.31.0.50/32 prefix from r11 to r13")
tgen.gears["r11"].vtysh_cmd(
"configure terminal\nrouter bgp\naddress-family ipv4 unicast\nno network 172.31.0.50/32",
isjson=False,
)
tgen.gears["r13"].vtysh_cmd(
"configure terminal\nrouter bgp\naddress-family ipv4 unicast\nnetwork 172.31.0.50/32",
isjson=False,
)
# Check r2 removed 172.31.0.50 vpnv4 update with old label
test_func = functools.partial(
check_show_bgp_vpn_prefix_not_found,
tgen.gears["r2"],
"ipv4",
"172.31.0.50/32",
"444:1",
label=oldlabel,
)
success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
assert (
success
), "r2, vpnv4 update 172.31.0.50 with old label {0} still present".format(oldlabel)
# diagnostic
logger.info("Dumping label nexthop table")
tgen.gears["r1"].vtysh_cmd("show bgp vrf vrf1 label-nexthop detail", isjson=False)
# Check r2 received new 172.31.0.50 vpnv4 update
test_func = functools.partial(
check_show_bgp_vpn_prefix_found,
tgen.gears["r2"],
"ipv4",
"172.31.0.50/32",
"444:1",
)
success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
assert success, "r2, vpnv4 update 172.31.0.50 not found"
label_list = set()
bgp_vpnv4_table_check(
tgen.gears["r2"],
group=PREFIXES_R13 + ["172.31.0.50/32"],
label_list=label_list,
)
assert (
len(label_list) == 1
), "Multiple Label values found for updates from r13 found"
newlabel = label_list.pop()
logger.info("r1, getting the outgoing interface used by label {}".format(newlabel))
new_outgoing_interface = mpls_entry_get_interface(tgen.gears["r1"], newlabel)
logger.info(
"r1, outgoing interface used by label {} is {}".format(
newlabel, new_outgoing_interface
)
)
if old_outgoing_interface == new_outgoing_interface:
assert 0, "r1, outgoing interface did not change whereas BGP update moved"
logger.info("Restoring state by removing the 172.31.0.50/32 prefix from r13")
tgen.gears["r13"].vtysh_cmd(
"configure terminal\nrouter bgp\naddress-family ipv4 unicast\nno network 172.31.0.50/32",
isjson=False,
)
def test_changing_default_label_value():
"""
Change the MPLS default value
Check that r1 VPNv4 entries have the 222 label value
Check that MPLS entry with old label value is no more present
Check that MPLS entry for local traffic has inLabel set to 222
"""
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
router = tgen.gears["r1"]
# counting the number of labels used in the VPNv4 table
label_list = set()
logger.info("r1, vpnv4 table, check the number of labels used before modification")
bgp_vpnv4_table_check_all(router, label_list)
old_len = len(label_list)
assert (
old_len != 1
), "r1, number of labels used should be greater than 1, oberved {} ".format(old_len)
logger.info("r1, vrf1, changing the default MPLS label value to export to 222")
router.vtysh_cmd(
"configure terminal\nrouter bgp 65500 vrf vrf1\naddress-family ipv4 unicast\nlabel vpn export 222\n",
isjson=False,
)
# Check r1 updated the MPLS entry with the 222 label value
logger.info(
"r1, mpls table, check that MPLS entry with inLabel set to 222 has vrf1 interface"
)
test_func = functools.partial(
check_show_mpls_table_entry_label_found, router, 222, "vrf1"
)
success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
assert success, "r1, mpls entry with label 222 not found"
# check label repartition is ok
logger.info("r1, vpnv4 table, check the number of labels used after modification")
label_list = set()
bgp_vpnv4_table_check_all(router, label_list)
new_len = len(label_list)
assert (
old_len == new_len
), "r1, number of labels after modification differ from previous, observed {}, expected {} ".format(
new_len, old_len
)
logger.info(
"r1, vpnv4 table, check that prefixes that were using the vrf label have refreshed the label value to 222"
)
bgp_vpnv4_table_check(
router, group=["192.168.255.0/24", "192.0.2.0/24"], label_value_expected=222
)
def test_unconfigure_allocation_mode_nexthop():
"""
Test unconfiguring allocation mode per nexthop
Check that show mpls table has no entry with label 17 (previously used)
Check that all VPN updates on r1 should have label value moved to 222
Check that show mpls table will only have 222 label value
"""
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
logger.info("Unconfiguring allocation mode per nexthop")
router = tgen.gears["r1"]
router.vtysh_cmd(
"configure terminal\nrouter bgp 65500 vrf vrf1\naddress-family ipv4 unicast\nno label vpn export allocation-mode per-nexthop\n",
isjson=False,
)
# Check r1 updated the MPLS entry with the 222 label value
logger.info(
"r1, mpls table, check that MPLS entry with inLabel set to 17 is not present"
)
test_func = functools.partial(
check_show_mpls_table_entry_label_not_found, router, 17
)
success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
assert success, "r1, mpls entry with label 17 still present"
# Check vpnv4 routes from r1
logger.info("Checking vpnv4 routes on r1")
label_list = set()
bgp_vpnv4_table_check_all(router, label_list=label_list, same=True)
assert len(label_list) == 1, "r1, multiple Label values found for vpnv4 updates"
new_label = label_list.pop()
assert (
new_label == 222
), "r1, wrong label value in VPNv4 table, expected 222, observed {}".format(
new_label
)
# Check mpls table with 222 value
logger.info("Checking MPLS values on show mpls table of r1")
label_list = set()
label_list.add(222)
mpls_table_check(router, label_list=label_list)
def test_reconfigure_allocation_mode_nexthop():
"""
Test re-configuring allocation mode per nexthop
Check that show mpls table has no entry with label 17
Check that all VPN updates on r1 should have multiple label values and not only 222
Check that show mpls table will have multiple label values and not only 222
"""
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
logger.info("Reconfiguring allocation mode per nexthop")
router = tgen.gears["r1"]
router.vtysh_cmd(
"configure terminal\nrouter bgp 65500 vrf vrf1\naddress-family ipv4 unicast\nlabel vpn export allocation-mode per-nexthop\n",
isjson=False,
)
# Check that show mpls table has no entry with label 17
logger.info(
"r1, mpls table, check that MPLS entry with inLabel set to 17 is present"
)
test_func = functools.partial(
check_show_mpls_table_entry_label_not_found, router, 17
)
success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
assert success, "r1, mpls entry with label 17 still present"
# Check vpnv4 routes from r1
logger.info("Checking vpnv4 routes on r1")
label_list = set()
bgp_vpnv4_table_check_all(router, label_list=label_list)
assert len(label_list) != 1, "r1, only 1 label values found for vpnv4 updates"
# Check mpls table with all values
logger.info("Checking MPLS values on show mpls table of r1")
mpls_table_check(router, label_list=label_list)
def test_memory_leak():
"Run the memory leak test and report results."
tgen = get_topogen()
if not tgen.is_memleak_enabled():
pytest.skip("Memory leak test/report is disabled")
tgen.report_memory_leaks()
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))

View File

@ -1,183 +0,0 @@
{
"vrfName": "vrf1",
"localAS": 65500,
"routes":
{
"10:200::/64": [
{
"valid": true,
"bestpath": true,
"prefix": "10:200::",
"prefixLen": 64,
"network": "10:200::/64",
"nexthops": [
{
"ip": "192:168::2",
"afi": "ipv6",
"used": true
}
]
}
],
"172:31::11/128": [
{
"valid":true,
"bestpath":true,
"prefix":"172:31::11",
"prefixLen":128,
"network":"172:31::11/128",
"peerId":"192:2::100",
"nexthops":[
{
"ip":"192:2::11",
"afi":"ipv6",
"scope":"global"
}
]
}
],
"172:31::12/128": [
{
"valid":true,
"bestpath":true,
"prefix":"172:31::12",
"prefixLen":128,
"network":"172:31::12/128",
"peerId":"192:2::100",
"nexthops":[
{
"ip":"192:2::12",
"afi":"ipv6",
"scope":"global"
},
{
"scope": "link-local",
"used":true
}
]
}
],
"172:31::13/128": [
{
"valid":true,
"bestpath":true,
"prefix":"172:31::13",
"prefixLen":128,
"network":"172:31::13/128",
"peerId":"192:168::255:13",
"nexthops":[
{
"ip":"192:168::255:13",
"afi":"ipv6",
"scope": "global"
},
{
"scope": "link-local"
}
]
}
],
"172:31::14/128": [
{
"valid":true,
"bestpath":true,
"prefix":"172:31::14",
"prefixLen":128,
"network":"172:31::14/128",
"peerId":"(unspec)",
"nexthops":[
{
"ip":"192:2::14",
"afi":"ipv6",
"used":true
}
]
}
],
"172:31::15/128": [
{
"valid":true,
"bestpath":true,
"prefix":"172:31::15",
"prefixLen":128,
"network":"172:31::15/128",
"peerId":"(unspec)",
"nexthops":[
{
"ip":"192:2::12",
"afi":"ipv6",
"used":true
}
]
}
],
"172:31::20/128": [
{
"valid":true,
"bestpath":true,
"prefix":"172:31::20",
"prefixLen":128,
"network":"172:31::20/128",
"peerId":"192:2::100",
"nexthops":[
{
"ip":"192:2::11",
"afi":"ipv6",
"scope":"global"
}
]
}
],
"172:31::111/128": [
{
"valid":true,
"bestpath":true,
"prefix":"172:31::111",
"prefixLen":128,
"network":"172:31::111/128",
"peerId":"192:2::100",
"nexthops":[
{
"ip":"192:2::11",
"afi":"ipv6",
"scope":"global"
}
]
}
],
"192:2::/64": [
{
"valid":true,
"bestpath":true,
"prefix":"192:2::",
"prefixLen":64,
"network":"192:2::/64",
"peerId":"(unspec)",
"nexthops":[
{
"ip":"::",
"afi":"ipv6",
"used":true
}
]
}
],
"192:168::255:0/112": [
{
"valid":true,
"bestpath":true,
"prefix":"192:168::255:0",
"prefixLen":112,
"network":"192:168::255:0/112",
"peerId":"(unspec)",
"nexthops":[
{
"ip":"::",
"afi":"ipv6",
"used":true
}
]
}
]
}
}

View File

@ -1,46 +0,0 @@
debug bgp vpn leak-from-vrf
debug bgp vpn label
debug bgp nht
debug bgp updates out
router bgp 65500
bgp router-id 192.168.0.1
no bgp ebgp-requires-policy
neighbor 192:168::2 remote-as 65501
address-family ipv4 unicast
no neighbor 192:168::2 activate
exit-address-family
address-family ipv6 vpn
neighbor 192:168::2 activate
neighbor 192:168::2 soft-reconfiguration inbound
exit-address-family
!
router bgp 65500 vrf vrf1
bgp router-id 192.168.0.1
neighbor 192:2::100 remote-as 65500
neighbor 192:168::255:13 remote-as 65500
address-family ipv6 unicast
neighbor 192:2::100 activate
neighbor 192:2::100 route-map rmap in
neighbor 192:168::255:13 activate
neighbor 192:168::255:13 route-map rmap in
redistribute connected
redistribute static
label vpn export allocation-mode per-nexthop
label vpn export auto
rd vpn export 444:1
rt vpn both 52:100
export vpn
import vpn
exit-address-family
!
interface r1-eth0
mpls bgp forwarding
!
bgp community-list 1 seq 5 permit 10:10
!
route-map rmap permit 1
match community 1
set ipv6 next-hop prefer-global
!
route-map rmap permit 2
!

View File

@ -1,18 +0,0 @@
log stdout
debug zebra nht
!debug zebra kernel msgdump recv
!debug zebra dplane detailed
!debug zebra packet recv
interface r1-eth1 vrf vrf1
ipv6 address 192:2::1/64
!
interface r1-eth2 vrf vrf1
ipv6 address 192:168::255:1/112
!
interface r1-eth0
ip address 192:168::1/112
!
vrf vrf1
ipv6 route 172:31::14/128 192:2::14
ipv6 route 172:31::15/128 192:2::12
exit-vrf

View File

@ -1,18 +0,0 @@
router bgp 65500
bgp router-id 11.11.11.11
no bgp network import-check
neighbor 192:2::100 remote-as 65500
address-family ipv4 unicast
no neighbor 192:2::100 activate
!
address-family ipv6 unicast
neighbor 192:2::100 activate
neighbor 192:2::100 route-map rmap out
network 172:31::11/128
network 172:31::111/128
network 172:31::20/128
exit-address-family
!
route-map rmap permit 1
set community 10:10
!

View File

@ -1,4 +0,0 @@
log stdout
interface r11-eth0
ipv6 address 192:2::11/64
!

View File

@ -1,13 +0,0 @@
router bgp 65500
bgp router-id 12.12.12.12
no bgp network import-check
neighbor 192:2::100 remote-as 65500
address-family ipv4 unicast
no neighbor 192:2::100 activate
!
address-family ipv6 unicast
neighbor 192:2::100 activate
network 172:31::12/128
exit-address-family
!

View File

@ -1,4 +0,0 @@
log stdout
interface r12-eth0
ipv6 address 192:2::12/64
!

View File

@ -1,16 +0,0 @@
router bgp 65500
bgp router-id 13.13.13.13
no bgp network import-check
neighbor 192:168::255:1 remote-as 65500
address-family ipv4 unicast
no neighbor 192:168::255:1 activate
exit-address-family
address-family ipv6 unicast
neighbor 192:168::255:1 activate
neighbor 192:168::255:1 route-map rmap out
network 172:31::0:13/128
exit-address-family
!
route-map rmap permit 1
set community 10:10
!

View File

@ -1,4 +0,0 @@
log stdout
interface r13-eth0
ipv6 address 192:168::255:13/112
!

View File

@ -1,187 +0,0 @@
{
"vrfName": "default",
"localAS": 65501,
"routes":
{
"routeDistinguishers":
{
"444:1":
{
"172:31::11/128": [
{
"valid": true,
"bestpath": true,
"prefix": "172:31::11",
"prefixLen": 128,
"network": "172:31::11/128",
"peerId": "192:168::1",
"nexthops": [
{
"ip": "192:168::1",
"afi": "ipv6",
"used": true
}
]
}
],
"172:31::12/128": [
{
"valid": true,
"bestpath": true,
"prefix": "172:31::12",
"prefixLen": 128,
"network": "172:31::12/128",
"peerId": "192:168::1",
"nexthops": [
{
"ip": "192:168::1",
"afi": "ipv6",
"used": true
}
]
}
],
"172:31::13/128": [
{
"valid": true,
"bestpath": true,
"prefix": "172:31::13",
"prefixLen": 128,
"network": "172:31::13/128",
"peerId": "192:168::1",
"nexthops": [
{
"ip": "192:168::1",
"afi": "ipv6",
"used": true
}
]
}
],
"172:31::14/128": [
{
"valid": true,
"bestpath": true,
"prefix": "172:31::14",
"prefixLen": 128,
"network": "172:31::14/128",
"peerId": "192:168::1",
"nexthops": [
{
"ip": "192:168::1",
"afi": "ipv6",
"used": true
}
]
}
],
"172:31::15/128": [
{
"valid": true,
"bestpath": true,
"prefix": "172:31::15",
"prefixLen": 128,
"network": "172:31::15/128",
"peerId": "192:168::1",
"nexthops": [
{
"ip": "192:168::1",
"afi": "ipv6",
"used": true
}
]
}
],
"172:31::20/128": [
{
"valid": true,
"bestpath": true,
"prefix": "172:31::20",
"prefixLen": 128,
"network": "172:31::20/128",
"peerId": "192:168::1",
"nexthops": [
{
"ip": "192:168::1",
"afi": "ipv6",
"used": true
}
]
}
],
"172:31::111/128": [
{
"valid": true,
"bestpath": true,
"prefix": "172:31::111",
"prefixLen": 128,
"network": "172:31::111/128",
"peerId": "192:168::1",
"nexthops": [
{
"ip": "192:168::1",
"afi": "ipv6",
"used": true
}
]
}
],
"192:2::/64": [
{
"valid": true,
"bestpath": true,
"prefix": "192:2::",
"prefixLen": 64,
"network": "192:2::/64",
"peerId": "192:168::1",
"nexthops": [
{
"ip": "192:168::1",
"afi": "ipv6",
"used": true
}
]
}
],
"192:168::255:0/112": [
{
"valid": true,
"bestpath": true,
"prefix": "192:168::255:0",
"prefixLen": 112,
"network": "192:168::255:0/112",
"peerId": "192:168::1",
"nexthops": [
{
"ip": "192:168::1",
"afi": "ipv6",
"used": true
}
]
}
]
},
"444:2":
{
"10:200::/64": [
{
"valid": true,
"bestpath": true,
"prefix": "10:200::",
"prefixLen": 64,
"network": "10:200::/64",
"peerId": "(unspec)",
"nhVrfName": "vrf1",
"nexthops": [
{
"ip": "::",
"afi": "ipv6",
"used": true
}
]
}
]
}
}
}
}

View File

@ -1,25 +0,0 @@
router bgp 65501
bgp router-id 192.168.0.2
no bgp ebgp-requires-policy
neighbor 192:168::1 remote-as 65500
address-family ipv4 unicast
no neighbor 192:168::1 activate
exit-address-family
address-family ipv6 vpn
neighbor 192:168::1 activate
exit-address-family
!
router bgp 65501 vrf vrf1
bgp router-id 192.168.0.2
address-family ipv6 unicast
redistribute connected
label vpn export 102
rd vpn export 444:2
rt vpn both 52:100
export vpn
import vpn
exit-address-family
!
interface r2-eth0
mpls bgp forwarding
!

View File

@ -1,7 +0,0 @@
log stdout
interface r2-eth1 vrf vrf1
ipv6 address 10:200::2/64
!
interface r2-eth0
ipv6 address 192:168::2/112
!

View File

@ -1,24 +0,0 @@
router bgp 65500
bgp router-id 100.100.100.100
no bgp network import-check
neighbor 192:2::1 remote-as 65500
neighbor 192:2::11 remote-as 65500
neighbor 192:2::12 remote-as 65500
address-family ipv4 unicast
no neighbor 192:2::1 activate
no neighbor 192:2::11 activate
no neighbor 192:2::12 activate
!
address-family ipv6 unicast
neighbor 192:2::1 activate
neighbor 192:2::1 route-reflector-client
neighbor 192:2::1 nexthop-local unchanged
neighbor 192:2::11 activate
neighbor 192:2::11 route-reflector-client
neighbor 192:2::11 nexthop-local unchanged
neighbor 192:2::12 activate
neighbor 192:2::12 route-reflector-client
neighbor 192:2::12 nexthop-local unchanged
exit-address-family
!

View File

@ -1,4 +0,0 @@
log stdout
interface rr-eth0
ipv6 address 192:2::100/64
!

View File

@ -1,816 +0,0 @@
#!/usr/bin/env python
# SPDX-License-Identifier: ISC
#
# test_bgp_vpnv6_per_nexthop_label.py
#
# Copyright 2023 6WIND S.A.
#
"""
test_bgp_vpnv6_per_nexthop_label.py: Test the FRR BGP daemon using EBGP peering
Let us exchange VPNv6 updates between both devices
Updates from r1 will originate from the same RD, but will have separate
label values.
+----------+
| r11 |
|192::2:11 +---+
| | | +----+--------+ +----------+
+----------+ | 192::2::1 |vrf | r1 |192:168::/112 | r2 |
+-------------------+ | 1+--------------+ |
+----------+ | |VRF1|AS65500 | | AS65501 |
| r12 | | +--------------+ | VPNV4| |VPNV4 |
|192::2:12 +---+ |192:168::255:1+-+--+--------+ +----------+
| | |
+----------+ |
|
+----------+ |
| r13 | |
|192:168:: +--------+
| 255:13 |
+----------+
"""
import os
import sys
import json
from functools import partial
import pytest
import functools
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
# Import topogen and topotest helpers
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
pytestmark = [pytest.mark.bgpd]
PREFIXES_R11 = ["172:31::11/128", "172:31::20/128", "172:31::111/128"]
PREFIXES_R12 = ["172:31::12/128"]
PREFIXES_REDIST_R12 = ["172:31::15/128"]
PREFIXES_R13 = ["172:31::13/128"]
PREFIXES_REDIST_R14 = ["172:31::14/128"]
PREFIXES_CONNECTED = ["192:168::255/112", "192:2::/64"]
def build_topo(tgen):
"Build function"
# Create 2 routers.
tgen.add_router("r1")
tgen.add_router("r2")
tgen.add_router("r11")
tgen.add_router("r12")
tgen.add_router("r13")
tgen.add_router("r14")
tgen.add_router("rr")
switch = tgen.add_switch("s1")
switch.add_link(tgen.gears["r1"])
switch.add_link(tgen.gears["r2"])
switch = tgen.add_switch("s2")
switch.add_link(tgen.gears["r1"])
switch.add_link(tgen.gears["r11"])
switch.add_link(tgen.gears["r12"])
switch.add_link(tgen.gears["rr"])
switch = tgen.add_switch("s3")
switch.add_link(tgen.gears["r2"])
switch = tgen.add_switch("s4")
switch.add_link(tgen.gears["r1"])
switch.add_link(tgen.gears["r13"])
switch = tgen.add_switch("s5")
switch.add_link(tgen.gears["r1"])
switch.add_link(tgen.gears["r14"])
def _populate_iface():
tgen = get_topogen()
cmds_list = [
"ip link add vrf1 type vrf table 10",
"echo 100000 > /proc/sys/net/mpls/platform_labels",
"ip link set dev vrf1 up",
"ip link set dev {0}-eth1 master vrf1",
"echo 1 > /proc/sys/net/mpls/conf/{0}-eth0/input",
]
cmds_list_plus = [
"ip link set dev {0}-eth2 master vrf1",
]
cmd_no_ll = [
"ip link set dev {0}-eth0 down",
"echo 1 > /proc/sys/net/ipv6/conf/{0}-eth0/addr_gen_mode",
"ip link set dev {0}-eth0 up",
]
for cmd in cmds_list:
input = cmd.format("r1")
logger.info("input: " + cmd)
output = tgen.net["r1"].cmd(cmd.format("r1"))
logger.info("output: " + output)
for cmd in cmds_list_plus:
input = cmd.format("r1")
logger.info("input: " + cmd)
output = tgen.net["r1"].cmd(cmd.format("r1"))
logger.info("output: " + output)
for cmd in cmds_list:
input = cmd.format("r2")
logger.info("input: " + cmd)
output = tgen.net["r2"].cmd(cmd.format("r2"))
logger.info("output: " + output)
for rtr in ("r11", "r13"):
for cmd in cmd_no_ll:
input = cmd.format(rtr)
logger.info("input: " + cmd)
output = tgen.net[rtr].cmd(cmd.format(rtr))
logger.info("output: " + output)
def setup_module(mod):
"Sets up the pytest environment"
tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
_populate_iface()
for rname, router in router_list.items():
router.load_config(
TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
router.load_config(
TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
)
# Initialize all routers.
tgen.start_router()
def teardown_module(_mod):
"Teardown the pytest environment"
tgen = get_topogen()
tgen.stop_topology()
def bgp_vpnv6_table_check(router, group, label_list=None, label_value_expected=None):
"""
Dump and check that vpnv6 entries have the same MPLS label value
* 'router': the router to check
* 'group': the list of prefixes to check. a single label value for the group has to be found
* 'label_list': check that the label values are not present in the vpnv6 entries
* that list is updated with the present label value
* 'label_value_expected': check that the mpls label read is the same as that value
"""
stored_label_inited = False
for prefix in group:
dump = router.vtysh_cmd("show bgp ipv6 vpn {} json".format(prefix), isjson=True)
for rd, pathes in dump.items():
for path in pathes["paths"]:
assert (
"remoteLabel" in path.keys()
), "{0}, {1}, remoteLabel not present".format(router.name, prefix)
logger.info(
"{0}, {1}, label value is {2}".format(
router.name, prefix, path["remoteLabel"]
)
)
if stored_label_inited:
assert (
path["remoteLabel"] == stored_label
), "{0}, {1}, label value not expected one (expected {2}, observed {3}".format(
router.name, prefix, stored_label, path["remoteLabel"]
)
else:
stored_label = path["remoteLabel"]
stored_label_inited = True
if label_list is not None:
assert (
stored_label not in label_list
), "{0}, {1}, label already detected in a previous prefix".format(
router.name, prefix
)
label_list.add(stored_label)
if label_value_expected:
assert (
path["remoteLabel"] == label_value_expected
), "{0}, {1}, label value not expected (expected {2}, observed {3}".format(
router.name, prefix, label_value_expected, path["remoteLabel"]
)
def bgp_vpnv6_table_check_all(router, label_list=None, same=False):
"""
Dump and check that vpnv6 entries are correctly configured with specific label values
* 'router': the router to check
* 'label_list': check that the label values are not present in the vpnv6 entries
* that list is updated with the present label value found.
* 'same': by default, set to False. Addresses groups are classified by addresses.
* if set to True, all entries of all groups should have a unique label value
"""
if same:
bgp_vpnv6_table_check(
router,
group=PREFIXES_R11
+ PREFIXES_R12
+ PREFIXES_REDIST_R12
+ PREFIXES_R13
+ PREFIXES_REDIST_R14
+ PREFIXES_CONNECTED,
label_list=label_list,
)
else:
for group in (
PREFIXES_R11,
PREFIXES_R12,
PREFIXES_REDIST_R12,
PREFIXES_R13,
PREFIXES_REDIST_R14,
PREFIXES_CONNECTED,
):
bgp_vpnv6_table_check(router, group=group, label_list=label_list)
def mpls_table_check(router, blacklist=None, label_list=None, whitelist=None):
"""
Dump and check 'show mpls table json' output. An assert is triggered in case test fails
* 'router': the router to check
* 'blacklist': the list of nexthops (IP or interface) that should not be on output
* 'label_list': the list of labels that should be in inLabel value
* 'whitelist': the list of nexthops (IP or interface) that should be on output
"""
nexthop_list = []
if blacklist:
nexthop_list.append(blacklist)
logger.info("Checking MPLS labels on {}".format(router.name))
dump = router.vtysh_cmd("show mpls table json", isjson=True)
for in_label, label_info in dump.items():
if label_list is not None:
label_list.add(in_label)
for nh in label_info["nexthops"]:
assert (
nh["installed"] == True and nh["type"] == "BGP"
), "{}, show mpls table, nexthop is not installed".format(router.name)
if "nexthop" in nh.keys():
assert (
nh["nexthop"] not in nexthop_list
), "{}, show mpls table, duplicated or blacklisted nexthop address".format(
router.name
)
nexthop_list.append(nh["nexthop"])
elif "interface" in nh.keys():
assert (
nh["interface"] not in nexthop_list
), "{}, show mpls table, duplicated or blacklisted nexthop interface".format(
router.name
)
nexthop_list.append(nh["interface"])
else:
assert (
0
), "{}, show mpls table, entry with neither nexthop nor interface".format(
router.name
)
if whitelist:
for entry in whitelist:
assert (
entry in nexthop_list
), "{}, show mpls table, entry with nexthop {} not present in nexthop list".format(
router.name, entry
)
def check_show_bgp_vpn_prefix_not_found(router, ipversion, prefix, rd, label=None):
output = json.loads(
router.vtysh_cmd("show bgp {} vpn {} json".format(ipversion, prefix))
)
if label:
expected = {rd: {"prefix": prefix, "paths": [{"remoteLabel": label}]}}
else:
expected = {rd: {"prefix": prefix}}
ret = topotest.json_cmp(output, expected)
if ret is None:
return "not good"
return None
def check_show_bgp_vpn_prefix_found(router, ipversion, prefix, rd):
output = json.loads(
router.vtysh_cmd("show bgp {} vpn {} json".format(ipversion, prefix))
)
expected = {rd: {"prefix": prefix}}
return topotest.json_cmp(output, expected)
def check_show_mpls_table_entry_label_found(router, inlabel, interface):
output = json.loads(router.vtysh_cmd("show mpls table {} json".format(inlabel)))
expected = {
"inLabel": inlabel,
"installed": True,
"nexthops": [{"interface": interface}],
}
return topotest.json_cmp(output, expected)
def check_show_mpls_table_entry_label_not_found(router, inlabel):
output = json.loads(router.vtysh_cmd("show mpls table {} json".format(inlabel)))
expected = {"inlabel": inlabel, "installed": True}
ret = topotest.json_cmp(output, expected)
if ret is None:
return "not good"
return None
def mpls_entry_get_interface(router, label):
"""
Assert that the label is in MPLS table
Assert an outgoing interface is programmed
return the outgoing interface
"""
outgoing_interface = None
logger.info("Checking MPLS labels on {}".format(router.name))
dump = router.vtysh_cmd("show mpls table {} json".format(label), isjson=True)
assert dump, "{}, show mpls table, inLabel {} not found".format(router.name, label)
for nh in dump["nexthops"]:
assert (
"interface" in nh.keys()
), "{}, show mpls table, nexthop interface not present for MPLS entry {}".format(
router.name, label
)
outgoing_interface = nh["interface"]
return outgoing_interface
def test_protocols_convergence():
"""
Assert that all protocols have converged
statuses as they depend on it.
"""
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
# Check BGP IPv6 routing tables on VRF1 of r1
logger.info("Checking BGP IPv6 routes for convergence on r1 VRF1")
router = tgen.gears["r1"]
json_file = "{}/{}/bgp_ipv6_routes_vrf1.json".format(CWD, router.name)
expected = json.loads(open(json_file).read())
test_func = partial(
topotest.router_json_cmp,
router,
"show bgp vrf vrf1 ipv6 json",
expected,
)
_, result = topotest.run_and_expect(test_func, None, count=20, wait=0.5)
assertmsg = '"{}" JSON output mismatches'.format(router.name)
assert result is None, assertmsg
logger.info("Checking BGP VPNv6 routes for convergence on r2")
router = tgen.gears["r2"]
json_file = "{}/{}/bgp_vpnv6_routes.json".format(CWD, router.name)
expected = json.loads(open(json_file).read())
test_func = partial(
topotest.router_json_cmp,
router,
"show bgp ipv6 vpn json",
expected,
)
_, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
assertmsg = '"{}" JSON output mismatches'.format(router.name)
assert result is None, assertmsg
# Check BGP labels received on r2
logger.info("Checking BGP VPNv6 labels on r2")
label_list = set()
bgp_vpnv6_table_check_all(tgen.gears["r2"], label_list)
# Check MPLS labels received on r1
mpls_table_check(tgen.gears["r1"], label_list)
def test_flapping_bgp_vrf_down():
"""
Turn down a remote BGP session
"""
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
logger.info("Unpeering BGP on r11")
tgen.gears["r11"].vtysh_cmd(
"configure terminal\nrouter bgp 65500\nno neighbor 192:2::100\n",
isjson=False,
)
def _bgp_prefix_not_found(router, vrf, ipversion, prefix):
output = json.loads(
router.vtysh_cmd(
"show bgp vrf {} {} {} json".format(vrf, ipversion, prefix)
)
)
expected = {"prefix": prefix}
ret = topotest.json_cmp(output, expected)
if ret is None:
return "not good"
return None
# Check prefix from r11 is not present
test_func = functools.partial(
_bgp_prefix_not_found, tgen.gears["r1"], "vrf1", "ipv6", "172:31::11/128"
)
success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
assert (
success
), "r1, prefix 172:31::11/128 from r11 did not disappear. r11 still connected to rr ?"
# Check BGP updated received on r2 are not from r11
logger.info("Checking BGP VPNv6 labels on r2")
for entry in PREFIXES_R11:
dump = tgen.gears["r2"].vtysh_cmd(
"show bgp ipv6 vpn {} json".format(entry), isjson=True
)
for rd in dump:
assert False, "r2, {}, route distinguisher {} present".format(entry, rd)
mpls_table_check(tgen.gears["r1"], blacklist=["192:2::11"])
def test_flapping_bgp_vrf_up():
"""
Turn up a remote BGP session
"""
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
logger.info("Peering BGP on r11")
tgen.gears["r11"].vtysh_cmd(
"configure terminal\nrouter bgp 65500\nneighbor 192:2::100 remote-as 65500\n",
isjson=False,
)
tgen.gears["r11"].vtysh_cmd(
"configure terminal\nrouter bgp 65500\naddress-family ipv6 unicast\nneighbor 192:2::100 activate\n",
isjson=False,
)
# Check r2 gets prefix 172:31::11/128
test_func = functools.partial(
check_show_bgp_vpn_prefix_found,
tgen.gears["r2"],
"ipv6",
"172:31::11/128",
"444:1",
)
success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
assert (
success
), "r2, prefix 172:31::11/128 from r11 not present. r11 still disconnected from rr ?"
bgp_vpnv6_table_check_all(tgen.gears["r2"])
def test_recursive_route():
"""
Test static recursive route redistributed over BGP
"""
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
logger.info("Enabling recursive static route")
tgen.gears["r1"].vtysh_cmd(
"configure terminal\nvrf vrf1\nipv6 route 172:31::30/128 172:31::20\n",
isjson=False,
)
logger.info("Checking BGP VPNv6 labels on r2")
# that route should be sent along with label for 192.0.2.11
def _prefix30_not_found(router):
output = json.loads(router.vtysh_cmd("show bgp ipv6 vpn 172:31::30/128 json"))
expected = {"444:1": {"prefix": "172:31::30/128"}}
ret = topotest.json_cmp(output, expected)
if ret is None:
return "not good"
return None
def _prefix30_found(router):
output = json.loads(router.vtysh_cmd("show bgp ipv6 vpn 172:31::30/128 json"))
expected = {"444:1": {"prefix": "172:31::30/128"}}
return topotest.json_cmp(output, expected)
# Check r2 received vpnv6 update with 172:31::30
test_func = functools.partial(_prefix30_found, tgen.gears["r2"])
success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
assert success, "r2, VPNv6 update 172:31::30 not found"
# that route should be sent along with label for 192::2:11
bgp_vpnv6_table_check(
tgen.gears["r2"],
group=PREFIXES_R11 + ["172:31::30/128"],
)
# diagnostic
logger.info("Dumping label nexthop table")
tgen.gears["r1"].vtysh_cmd("show bgp vrf vrf1 label-nexthop detail", isjson=False)
logger.info("Dumping nexthop table")
tgen.gears["r1"].vtysh_cmd("show bgp vrf vrf1 nexthop detail", isjson=False)
logger.info("Disabling recursive static route")
tgen.gears["r1"].vtysh_cmd(
"configure terminal\nvrf vrf1\nno ipv6 route 172:31::30/128 172:31::20\n",
isjson=False,
)
# Check r2 removed 172:31::30 vpnv6 update
test_func = functools.partial(_prefix30_not_found, tgen.gears["r2"])
success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
assert success, "r2, VPNv6 update 172:31::30 still present"
def test_prefix_changes_interface():
"""
Test BGP update for a given prefix learnt on different interface
"""
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
logger.info("Enabling a 172:31::50/128 prefix for r11")
tgen.gears["r11"].vtysh_cmd(
"configure terminal\nrouter bgp\naddress-family ipv6 unicast\nnetwork 172:31::50/128",
isjson=False,
)
# Check r2 received vpnv6 update with 172:31::50
test_func = functools.partial(
check_show_bgp_vpn_prefix_found,
tgen.gears["r2"],
"ipv6",
"172:31::50/128",
"444:1",
)
success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
assert success, "r2, VPNv6 update 172:31::50 not found"
# diagnostic
logger.info("Dumping label nexthop table")
tgen.gears["r1"].vtysh_cmd("show bgp vrf vrf1 label-nexthop detail", isjson=False)
label_list = set()
bgp_vpnv6_table_check(
tgen.gears["r2"],
group=PREFIXES_R11 + ["172:31::50/128"],
label_list=label_list,
)
assert (
len(label_list) == 1
), "Multiple Label values found for updates from r11 found"
oldlabel = label_list.pop()
logger.info("r1, getting the outgoing interface used by label {}".format(oldlabel))
old_outgoing_interface = mpls_entry_get_interface(tgen.gears["r1"], oldlabel)
logger.info(
"r1, outgoing interface used by label {} is {}".format(
oldlabel, old_outgoing_interface
)
)
logger.info("Moving the 172:31::50/128 prefix from r11 to r13")
tgen.gears["r11"].vtysh_cmd(
"configure terminal\nrouter bgp\naddress-family ipv6 unicast\nno network 172:31::50/128",
isjson=False,
)
tgen.gears["r13"].vtysh_cmd(
"configure terminal\nrouter bgp\naddress-family ipv6 unicast\nnetwork 172:31::50/128",
isjson=False,
)
# Check r2 removed 172:31::50 vpnv6 update with old label
test_func = functools.partial(
check_show_bgp_vpn_prefix_not_found,
tgen.gears["r2"],
"ipv6",
"172:31::50/128",
"444:1",
label=oldlabel,
)
success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
assert (
success
), "r2, vpnv6 update 172:31::50 with old label {0} still present".format(oldlabel)
# diagnostic
logger.info("Dumping label nexthop table")
tgen.gears["r1"].vtysh_cmd("show bgp vrf vrf1 label-nexthop detail", isjson=False)
# Check r2 received new 172:31::50 vpnv6 update
test_func = functools.partial(
check_show_bgp_vpn_prefix_found,
tgen.gears["r2"],
"ipv6",
"172:31::50/128",
"444:1",
)
success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
assert success, "r2, vpnv6 update 172:31::50 not found"
label_list = set()
bgp_vpnv6_table_check(
tgen.gears["r2"],
group=["172:31::13/128", "172:31::50/128"],
label_list=label_list,
)
assert (
len(label_list) == 1
), "Multiple Label values found for updates from r13 found"
newlabel = label_list.pop()
logger.info("r1, getting the outgoing interface used by label {}".format(newlabel))
new_outgoing_interface = mpls_entry_get_interface(tgen.gears["r1"], newlabel)
logger.info(
"r1, outgoing interface used by label {} is {}".format(
newlabel, new_outgoing_interface
)
)
if old_outgoing_interface == new_outgoing_interface:
assert 0, "r1, outgoing interface did not change whereas BGP update moved"
logger.info("Restoring state by removing the 172:31::50/128 prefix from r13")
tgen.gears["r13"].vtysh_cmd(
"configure terminal\nrouter bgp\naddress-family ipv6 unicast\nno network 172:31::50/128",
isjson=False,
)
def test_changing_default_label_value():
"""
Change the MPLS default value
Check that r1 VPNv6 entries have the 222 label value
Check that MPLS entry with old label value is no more present
Check that MPLS entry for local traffic has inLabel set to 222
"""
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
router = tgen.gears["r1"]
# counting the number of labels used in the VPNv6 table
label_list = set()
logger.info("r1, VPNv6 table, check the number of labels used before modification")
bgp_vpnv6_table_check_all(router, label_list)
old_len = len(label_list)
assert (
old_len != 1
), "r1, number of labels used should be greater than 1, oberved {} ".format(old_len)
logger.info("r1, vrf1, changing the default MPLS label value to export to 222")
router.vtysh_cmd(
"configure terminal\nrouter bgp 65500 vrf vrf1\naddress-family ipv6 unicast\nlabel vpn export 222\n",
isjson=False,
)
# Check r1 updated the MPLS entry with the 222 label value
logger.info(
"r1, mpls table, check that MPLS entry with inLabel set to 222 has vrf1 interface"
)
test_func = functools.partial(
check_show_mpls_table_entry_label_found, router, 222, "vrf1"
)
success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
assert success, "r1, mpls entry with label 222 not found"
# check label repartition is ok
logger.info("r1, VPNv6 table, check the number of labels used after modification")
label_list = set()
bgp_vpnv6_table_check_all(router, label_list)
new_len = len(label_list)
assert (
old_len == new_len
), "r1, number of labels after modification differ from previous, observed {}, expected {} ".format(
new_len, old_len
)
logger.info(
"r1, VPNv6 table, check that prefixes that were using the vrf label have refreshed the label value to 222"
)
bgp_vpnv6_table_check(router, group=PREFIXES_CONNECTED, label_value_expected=222)
def test_unconfigure_allocation_mode_nexthop():
"""
Test unconfiguring allocation mode per nexthop
Check on r2 that new MPLS label values have been propagated
Check that show mpls table has no entry with label 17 (previously used)
Check that all VPN updates on r1 should have label value moved to 222
Check that show mpls table will only have 222 label value
"""
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
logger.info("Unconfiguring allocation mode per nexthop")
router = tgen.gears["r1"]
dump = router.vtysh_cmd(
"configure terminal\nrouter bgp 65500 vrf vrf1\naddress-family ipv6 unicast\nno label vpn export allocation-mode per-nexthop\n",
isjson=False,
)
# Check r1 updated the MPLS entry with the 222 label value
logger.info(
"r1, mpls table, check that MPLS entry with inLabel set to 17 is not present"
)
test_func = functools.partial(
check_show_mpls_table_entry_label_not_found, router, 17
)
success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
assert success, "r1, mpls entry with label 17 still present"
# Check vpnv6 routes from r1
logger.info("Checking VPNv6 routes on r1")
label_list = set()
bgp_vpnv6_table_check_all(router, label_list=label_list, same=True)
assert len(label_list) == 1, "r1, multiple Label values found for VPNv6 updates"
new_label = label_list.pop()
assert (
new_label == 222
), "r1, wrong label value in VPNv6 table, expected 222, observed {}".format(
new_label
)
# Check mpls table with 222 value
logger.info("Checking MPLS values on show mpls table of r1")
label_list = set()
label_list.add(222)
mpls_table_check(router, label_list=label_list)
def test_reconfigure_allocation_mode_nexthop():
"""
Test re-configuring allocation mode per nexthop
Check that show mpls table has no entry with label 17
Check that all VPN updates on r1 should have multiple label values and not only 222
Check that show mpls table will have multiple label values and not only 222
"""
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
logger.info("Reconfiguring allocation mode per nexthop")
router = tgen.gears["r1"]
dump = router.vtysh_cmd(
"configure terminal\nrouter bgp 65500 vrf vrf1\naddress-family ipv6 unicast\nlabel vpn export allocation-mode per-nexthop\n",
isjson=False,
)
# Check that show mpls table has no entry with label 17
logger.info(
"r1, mpls table, check that MPLS entry with inLabel set to 17 is present"
)
test_func = functools.partial(
check_show_mpls_table_entry_label_not_found, router, 17
)
success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
assert success, "r1, mpls entry with label 17 still present"
# Check vpnv6 routes from r1
logger.info("Checking VPNv6 routes on r1")
label_list = set()
bgp_vpnv6_table_check_all(router, label_list=label_list)
assert len(label_list) != 1, "r1, only 1 label values found for VPNv6 updates"
# Check mpls table with all values
logger.info("Checking MPLS values on show mpls table of r1")
mpls_table_check(router, label_list=label_list)
def test_memory_leak():
"Run the memory leak test and report results."
tgen = get_topogen()
if not tgen.is_memleak_enabled():
pytest.skip("Memory leak test/report is disabled")
tgen.report_memory_leaks()
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))

View File

@ -97,8 +97,8 @@ static struct zebra_nhlfe *nhlfe_find(struct nhlfe_list_head *list,
static struct zebra_nhlfe *
nhlfe_add(struct zebra_lsp *lsp, enum lsp_types_t lsp_type,
enum nexthop_types_t gtype, const union g_addr *gate,
ifindex_t ifindex, vrf_id_t vrf_id, uint8_t num_labels,
const mpls_label_t *labels, bool is_backup);
ifindex_t ifindex, uint8_t num_labels, const mpls_label_t *labels,
bool is_backup);
static int nhlfe_del(struct zebra_nhlfe *nhlfe);
static void nhlfe_free(struct zebra_nhlfe *nhlfe);
static void nhlfe_out_label_update(struct zebra_nhlfe *nhlfe,
@ -212,11 +212,11 @@ static int lsp_install(struct zebra_vrf *zvrf, mpls_label_t label,
changed++;
} else {
/* Add LSP entry to this nexthop */
nhlfe = nhlfe_add(
lsp, lsp_type, nexthop->type, &nexthop->gate,
nexthop->ifindex, nexthop->vrf_id,
nexthop->nh_label->num_labels,
nexthop->nh_label->label, false /*backup*/);
nhlfe = nhlfe_add(lsp, lsp_type, nexthop->type,
&nexthop->gate, nexthop->ifindex,
nexthop->nh_label->num_labels,
nexthop->nh_label->label,
false /*backup*/);
if (!nhlfe)
return -1;
@ -1236,7 +1236,6 @@ static int nhlfe_nhop_match(struct zebra_nhlfe *nhlfe,
/*
* Locate NHLFE that matches with passed info.
* TODO: handle vrf_id if vrf backend is netns based
*/
static struct zebra_nhlfe *nhlfe_find(struct nhlfe_list_head *list,
enum lsp_types_t lsp_type,
@ -1262,8 +1261,7 @@ static struct zebra_nhlfe *nhlfe_find(struct nhlfe_list_head *list,
static struct zebra_nhlfe *
nhlfe_alloc(struct zebra_lsp *lsp, enum lsp_types_t lsp_type,
enum nexthop_types_t gtype, const union g_addr *gate,
ifindex_t ifindex, vrf_id_t vrf_id, uint8_t num_labels,
const mpls_label_t *labels)
ifindex_t ifindex, uint8_t num_labels, const mpls_label_t *labels)
{
struct zebra_nhlfe *nhlfe;
struct nexthop *nexthop;
@ -1280,7 +1278,7 @@ nhlfe_alloc(struct zebra_lsp *lsp, enum lsp_types_t lsp_type,
nexthop_add_labels(nexthop, lsp_type, num_labels, labels);
nexthop->vrf_id = vrf_id;
nexthop->vrf_id = VRF_DEFAULT;
nexthop->type = gtype;
switch (nexthop->type) {
case NEXTHOP_TYPE_IPV4:
@ -1315,20 +1313,29 @@ nhlfe_alloc(struct zebra_lsp *lsp, enum lsp_types_t lsp_type,
* Add primary or backup NHLFE. Base entry must have been created and
* duplicate check done.
*/
static struct zebra_nhlfe *
nhlfe_add(struct zebra_lsp *lsp, enum lsp_types_t lsp_type,
enum nexthop_types_t gtype, const union g_addr *gate,
ifindex_t ifindex, vrf_id_t vrf_id, uint8_t num_labels,
const mpls_label_t *labels, bool is_backup)
static struct zebra_nhlfe *nhlfe_add(struct zebra_lsp *lsp,
enum lsp_types_t lsp_type,
enum nexthop_types_t gtype,
const union g_addr *gate,
ifindex_t ifindex, uint8_t num_labels,
const mpls_label_t *labels, bool is_backup)
{
struct zebra_nhlfe *nhlfe;
if (!lsp)
return NULL;
/* Must have labels */
if (num_labels == 0 || labels == NULL) {
if (IS_ZEBRA_DEBUG_MPLS)
zlog_debug("%s: invalid nexthop: no labels", __func__);
return NULL;
}
/* Allocate new object */
nhlfe = nhlfe_alloc(lsp, lsp_type, gtype, gate, ifindex, vrf_id,
num_labels, labels);
nhlfe = nhlfe_alloc(lsp, lsp_type, gtype, gate, ifindex, num_labels,
labels);
if (!nhlfe)
return NULL;
@ -1503,18 +1510,16 @@ static json_object *nhlfe_json(struct zebra_nhlfe *nhlfe)
json_nhlfe = json_object_new_object();
json_object_string_add(json_nhlfe, "type", nhlfe_type2str(nhlfe->type));
if (nexthop->nh_label) {
json_object_int_add(json_nhlfe, "outLabel",
nexthop->nh_label->label[0]);
json_label_stack = json_object_new_array();
json_object_object_add(json_nhlfe, "outLabelStack",
json_label_stack);
for (i = 0; i < nexthop->nh_label->num_labels; i++)
json_object_array_add(
json_label_stack,
json_object_new_int(
nexthop->nh_label->label[i]));
}
json_object_int_add(json_nhlfe, "outLabel",
nexthop->nh_label->label[0]);
json_label_stack = json_object_new_array();
json_object_object_add(json_nhlfe, "outLabelStack", json_label_stack);
for (i = 0; i < nexthop->nh_label->num_labels; i++)
json_object_array_add(
json_label_stack,
json_object_new_int(nexthop->nh_label->label[i]));
json_object_int_add(json_nhlfe, "distance", nhlfe->distance);
if (CHECK_FLAG(nhlfe->flags, NHLFE_FLAG_INSTALLED))
@ -1525,10 +1530,6 @@ static json_object *nhlfe_json(struct zebra_nhlfe *nhlfe)
case NEXTHOP_TYPE_IPV4_IFINDEX:
json_object_string_addf(json_nhlfe, "nexthop", "%pI4",
&nexthop->gate.ipv4);
if (nexthop->ifindex)
json_object_string_add(json_nhlfe, "interface",
ifindex2ifname(nexthop->ifindex,
nexthop->vrf_id));
break;
case NEXTHOP_TYPE_IPV6:
case NEXTHOP_TYPE_IPV6_IFINDEX:
@ -2241,8 +2242,8 @@ zebra_mpls_lsp_add_nhlfe(struct zebra_lsp *lsp, enum lsp_types_t lsp_type,
const mpls_label_t *out_labels)
{
/* Just a public pass-through to the internal implementation */
return nhlfe_add(lsp, lsp_type, gtype, gate, ifindex, VRF_DEFAULT,
num_labels, out_labels, false /*backup*/);
return nhlfe_add(lsp, lsp_type, gtype, gate, ifindex, num_labels,
out_labels, false /*backup*/);
}
/*
@ -2256,8 +2257,8 @@ struct zebra_nhlfe *zebra_mpls_lsp_add_backup_nhlfe(
uint8_t num_labels, const mpls_label_t *out_labels)
{
/* Just a public pass-through to the internal implementation */
return nhlfe_add(lsp, lsp_type, gtype, gate, ifindex, VRF_DEFAULT,
num_labels, out_labels, true);
return nhlfe_add(lsp, lsp_type, gtype, gate, ifindex, num_labels,
out_labels, true);
}
/*
@ -2269,10 +2270,12 @@ struct zebra_nhlfe *zebra_mpls_lsp_add_nh(struct zebra_lsp *lsp,
{
struct zebra_nhlfe *nhlfe;
nhlfe = nhlfe_add(
lsp, lsp_type, nh->type, &nh->gate, nh->ifindex, nh->vrf_id,
nh->nh_label ? nh->nh_label->num_labels : 0,
nh->nh_label ? nh->nh_label->label : NULL, false /*backup*/);
if (nh->nh_label == NULL || nh->nh_label->num_labels == 0)
return NULL;
nhlfe = nhlfe_add(lsp, lsp_type, nh->type, &nh->gate, nh->ifindex,
nh->nh_label->num_labels, nh->nh_label->label,
false /*backup*/);
return nhlfe;
}
@ -2287,10 +2290,12 @@ struct zebra_nhlfe *zebra_mpls_lsp_add_backup_nh(struct zebra_lsp *lsp,
{
struct zebra_nhlfe *nhlfe;
nhlfe = nhlfe_add(lsp, lsp_type, nh->type, &nh->gate, nh->ifindex,
nh->vrf_id,
nh->nh_label ? nh->nh_label->num_labels : 0,
nh->nh_label ? nh->nh_label->label : NULL, true);
if (nh->nh_label == NULL || nh->nh_label->num_labels == 0)
return NULL;
nhlfe = nhlfe_add(lsp, lsp_type, nh->type, &nh->gate,
nh->ifindex, nh->nh_label->num_labels,
nh->nh_label->label, true);
return nhlfe;
}
@ -3108,7 +3113,7 @@ static struct zebra_nhlfe *
lsp_add_nhlfe(struct zebra_lsp *lsp, enum lsp_types_t type,
uint8_t num_out_labels, const mpls_label_t *out_labels,
enum nexthop_types_t gtype, const union g_addr *gate,
ifindex_t ifindex, vrf_id_t vrf_id, bool is_backup)
ifindex_t ifindex, bool is_backup)
{
struct zebra_nhlfe *nhlfe;
char buf[MPLS_LABEL_STRLEN];
@ -3128,18 +3133,13 @@ lsp_add_nhlfe(struct zebra_lsp *lsp, enum lsp_types_t type,
struct nexthop *nh = nhlfe->nexthop;
assert(nh);
assert(nh->nh_label);
/* Clear deleted flag (in case it was set) */
UNSET_FLAG(nhlfe->flags, NHLFE_FLAG_DELETED);
if (!nh->nh_label || num_out_labels == 0)
/* No change */
return nhlfe;
if (nh->nh_label &&
nh->nh_label->num_labels == num_out_labels &&
!memcmp(nh->nh_label->label, out_labels,
sizeof(mpls_label_t) * num_out_labels))
if (nh->nh_label->num_labels == num_out_labels
&& !memcmp(nh->nh_label->label, out_labels,
sizeof(mpls_label_t) * num_out_labels))
/* No change */
return nhlfe;
@ -3160,7 +3160,7 @@ lsp_add_nhlfe(struct zebra_lsp *lsp, enum lsp_types_t type,
}
/* Update out label(s), trigger processing. */
if (nh->nh_label && nh->nh_label->num_labels == num_out_labels)
if (nh->nh_label->num_labels == num_out_labels)
memcpy(nh->nh_label->label, out_labels,
sizeof(mpls_label_t) * num_out_labels);
else {
@ -3170,7 +3170,7 @@ lsp_add_nhlfe(struct zebra_lsp *lsp, enum lsp_types_t type,
}
} else {
/* Add LSP entry to this nexthop */
nhlfe = nhlfe_add(lsp, type, gtype, gate, ifindex, vrf_id,
nhlfe = nhlfe_add(lsp, type, gtype, gate, ifindex,
num_out_labels, out_labels, is_backup);
if (!nhlfe)
return NULL;
@ -3179,11 +3179,8 @@ lsp_add_nhlfe(struct zebra_lsp *lsp, enum lsp_types_t type,
char buf2[MPLS_LABEL_STRLEN];
nhlfe2str(nhlfe, buf, sizeof(buf));
if (num_out_labels)
mpls_label2str(num_out_labels, out_labels, buf2,
sizeof(buf2), 0, 0);
else
snprintf(buf2, sizeof(buf2), "-");
mpls_label2str(num_out_labels, out_labels, buf2,
sizeof(buf2), 0, 0);
zlog_debug("Add LSP in-label %u type %d %snexthop %s out-label(s) %s",
lsp->ile.in_label, type, backup_str, buf,
@ -3202,8 +3199,6 @@ lsp_add_nhlfe(struct zebra_lsp *lsp, enum lsp_types_t type,
/*
* Install an LSP and forwarding entry; used primarily
* from vrf zapi message processing.
* TODO: handle vrf_id parameter when mpls API extends to interface or SRTE
* changes
*/
int mpls_lsp_install(struct zebra_vrf *zvrf, enum lsp_types_t type,
mpls_label_t in_label, uint8_t num_out_labels,
@ -3225,7 +3220,7 @@ int mpls_lsp_install(struct zebra_vrf *zvrf, enum lsp_types_t type,
lsp = hash_get(lsp_table, &tmp_ile, lsp_alloc);
nhlfe = lsp_add_nhlfe(lsp, type, num_out_labels, out_labels, gtype,
gate, ifindex, VRF_DEFAULT, false /*backup*/);
gate, ifindex, false /*backup*/);
if (nhlfe == NULL)
return -1;
@ -3244,8 +3239,8 @@ static int lsp_znh_install(struct zebra_lsp *lsp, enum lsp_types_t type,
{
struct zebra_nhlfe *nhlfe;
nhlfe = lsp_add_nhlfe(lsp, type, znh->label_num, znh->labels, znh->type,
&znh->gate, znh->ifindex, znh->vrf_id,
nhlfe = lsp_add_nhlfe(lsp, type, znh->label_num, znh->labels,
znh->type, &znh->gate, znh->ifindex,
false /*backup*/);
if (nhlfe == NULL)
return -1;
@ -3282,9 +3277,9 @@ static int lsp_backup_znh_install(struct zebra_lsp *lsp, enum lsp_types_t type,
{
struct zebra_nhlfe *nhlfe;
nhlfe = lsp_add_nhlfe(lsp, type, znh->label_num, znh->labels, znh->type,
&znh->gate, znh->ifindex, znh->vrf_id,
true /*backup*/);
nhlfe = lsp_add_nhlfe(lsp, type, znh->label_num,
znh->labels, znh->type, &znh->gate,
znh->ifindex, true /*backup*/);
if (nhlfe == NULL) {
if (IS_ZEBRA_DEBUG_MPLS)
zlog_debug("%s: unable to add backup nhlfe, label: %u",
@ -3615,8 +3610,8 @@ int zebra_mpls_static_lsp_add(struct zebra_vrf *zvrf, mpls_label_t in_label,
} else {
/* Add static LSP entry to this nexthop */
nhlfe = nhlfe_add(lsp, ZEBRA_LSP_STATIC, gtype, gate, ifindex,
VRF_DEFAULT, 1, &out_label, false /*backup*/);
nhlfe = nhlfe_add(lsp, ZEBRA_LSP_STATIC, gtype, gate,
ifindex, 1, &out_label, false /*backup*/);
if (!nhlfe)
return -1;
@ -3825,8 +3820,7 @@ void zebra_mpls_print_lsp_table(struct vty *vty, struct zebra_vrf *zvrf,
break;
}
if (nexthop->type != NEXTHOP_TYPE_IFINDEX &&
nexthop->nh_label)
if (nexthop->type != NEXTHOP_TYPE_IFINDEX)
out_label_str = mpls_label2str(
nexthop->nh_label->num_labels,
&nexthop->nh_label->label[0],