diff --git a/bgpd/bgp_labelpool.c b/bgpd/bgp_labelpool.c index 883338610c..b2bb49b943 100644 --- a/bgpd/bgp_labelpool.c +++ b/bgpd/bgp_labelpool.c @@ -448,7 +448,7 @@ void bgp_lp_get( if (lp_fifo_count(&lp->requests) > lp->pending_count) { if (!bgp_zebra_request_label_range(MPLS_LABEL_BASE_ANY, - lp->next_chunksize)) + lp->next_chunksize, true)) return; lp->pending_count += lp->next_chunksize; @@ -494,8 +494,18 @@ void bgp_lp_release( bf_release_index(chunk->allocated_map, index); chunk->nfree += 1; deallocated = true; + break; } assert(deallocated); + if (deallocated && + chunk->nfree == chunk->last - chunk->first + 1 && + lp_fifo_count(&lp->requests) == 0) { + bgp_zebra_release_label_range(chunk->first, + chunk->last); + list_delete_node(lp->chunks, node); + lp_chunk_free(chunk); + lp->next_chunksize = LP_CHUNK_SIZE_MIN; + } } } } @@ -642,7 +652,11 @@ void bgp_lp_event_zebra_up(void) } /* round up */ - chunks_needed = (labels_needed / lp->next_chunksize) + 1; + if (((float)labels_needed / (float)lp->next_chunksize) > + (labels_needed / lp->next_chunksize)) + chunks_needed = (labels_needed / lp->next_chunksize) + 1; + else + chunks_needed = (labels_needed / lp->next_chunksize); labels_needed = chunks_needed * lp->next_chunksize; /* @@ -650,10 +664,10 @@ void bgp_lp_event_zebra_up(void) */ list_delete_all_node(lp->chunks); - if (!bgp_zebra_request_label_range(MPLS_LABEL_BASE_ANY, labels_needed)) + if (labels_needed && !bgp_zebra_request_label_range(MPLS_LABEL_BASE_ANY, + labels_needed, true)) return; - - lp->pending_count = labels_needed; + lp->pending_count += labels_needed; /* * Invalidate any existing labels and requeue them as requests diff --git a/bgpd/bgp_mplsvpn.h b/bgpd/bgp_mplsvpn.h index 19b6f4eb77..b2bdfcec00 100644 --- a/bgpd/bgp_mplsvpn.h +++ b/bgpd/bgp_mplsvpn.h @@ -13,6 +13,7 @@ #include "bgpd/bgp_rd.h" #include "bgpd/bgp_zebra.h" #include "bgpd/bgp_vty.h" +#include "bgpd/bgp_label.h" #define MPLS_LABEL_IS_SPECIAL(label) ((label) <= MPLS_LABEL_EXTENSION) #define MPLS_LABEL_IS_NULL(label) \ @@ -165,6 +166,25 @@ static inline int vpn_leak_to_vpn_active(struct bgp *bgp_vrf, afi_t afi, return 0; } + /* Is there a "manual" export label that isn't allocated yet? */ + if (!CHECK_FLAG(bgp_vrf->vpn_policy[afi].flags, + BGP_VPN_POLICY_TOVPN_LABEL_AUTO) && + bgp_vrf->vpn_policy[afi].tovpn_label != BGP_PREVENT_VRF_2_VRF_LEAK && + bgp_vrf->vpn_policy[afi].tovpn_label != MPLS_LABEL_NONE && + (bgp_vrf->vpn_policy[afi].tovpn_label >= MPLS_LABEL_UNRESERVED_MIN && + !CHECK_FLAG(bgp_vrf->vpn_policy[afi].flags, + BGP_VPN_POLICY_TOVPN_LABEL_MANUAL_REG))) { + if (!bgp_zebra_request_label_range(bgp_vrf->vpn_policy[afi] + .tovpn_label, + 1, false)) { + if (pmsg) + *pmsg = "manual label could not be allocated"; + return 0; + } + SET_FLAG(bgp_vrf->vpn_policy[afi].flags, + BGP_VPN_POLICY_TOVPN_LABEL_MANUAL_REG); + } + return 1; } diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c index 5d6ae589fa..cdcb9a1cf9 100644 --- a/bgpd/bgp_vty.c +++ b/bgpd/bgp_vty.c @@ -9461,7 +9461,7 @@ DEFPY (af_label_vpn_export, "Automatically assign a label\n") { VTY_DECLVAR_CONTEXT(bgp, bgp); - mpls_label_t label = MPLS_LABEL_NONE; + mpls_label_t label = (mpls_label_t)label_val; afi_t afi; int idx = 0; bool yes = true; @@ -9469,24 +9469,28 @@ DEFPY (af_label_vpn_export, if (argv_find(argv, argc, "no", &idx)) yes = false; - /* If "no ...", squash trailing parameter */ - if (!yes) - label_auto = NULL; - - if (yes) { - if (!label_auto) - label = label_val; /* parser should force unsigned */ - } - afi = vpn_policy_getafi(vty, bgp, false); if (afi == AFI_MAX) return CMD_WARNING_CONFIG_FAILED; - - if (label_auto && CHECK_FLAG(bgp->vpn_policy[afi].flags, - BGP_VPN_POLICY_TOVPN_LABEL_AUTO)) - /* no change */ - return CMD_SUCCESS; + if (yes) { + if (label_auto && CHECK_FLAG(bgp->vpn_policy[afi].flags, + BGP_VPN_POLICY_TOVPN_LABEL_AUTO)) + /* no change */ + return CMD_SUCCESS; + if (!label_auto && label == bgp->vpn_policy[afi].tovpn_label) + /* no change */ + return CMD_SUCCESS; + } else { + if (label_auto && !CHECK_FLAG(bgp->vpn_policy[afi].flags, + BGP_VPN_POLICY_TOVPN_LABEL_AUTO)) + /* no match */ + return CMD_WARNING_CONFIG_FAILED; + if (!label_auto && label_val && + label != bgp->vpn_policy[afi].tovpn_label) + /* no change */ + return CMD_WARNING_CONFIG_FAILED; + } /* * pre-change: un-export vpn routes (vpn->vrf routes unaffected) @@ -9494,9 +9498,16 @@ DEFPY (af_label_vpn_export, vpn_leak_prechange(BGP_VPN_POLICY_DIR_TOVPN, afi, bgp_get_default(), bgp); - if (!label_auto && CHECK_FLAG(bgp->vpn_policy[afi].flags, - BGP_VPN_POLICY_TOVPN_LABEL_AUTO)) { + if (CHECK_FLAG(bgp->vpn_policy[afi].flags, + BGP_VPN_POLICY_TOVPN_LABEL_MANUAL_REG)) { + bgp_zebra_release_label_range(bgp->vpn_policy[afi].tovpn_label, + bgp->vpn_policy[afi].tovpn_label); + UNSET_FLAG(bgp->vpn_policy[afi].flags, + BGP_VPN_POLICY_TOVPN_LABEL_MANUAL_REG); + } else if (CHECK_FLAG(bgp->vpn_policy[afi].flags, + BGP_VPN_POLICY_TOVPN_LABEL_AUTO)) { + /* release any previous auto label */ if (bgp->vpn_policy[afi].tovpn_label != MPLS_LABEL_NONE) { /* @@ -9513,16 +9524,32 @@ DEFPY (af_label_vpn_export, &bgp->vpn_policy[afi], bgp->vpn_policy[afi].tovpn_label); } - UNSET_FLAG(bgp->vpn_policy[afi].flags, - BGP_VPN_POLICY_TOVPN_LABEL_AUTO); } - bgp->vpn_policy[afi].tovpn_label = label; - if (label_auto) { - SET_FLAG(bgp->vpn_policy[afi].flags, - BGP_VPN_POLICY_TOVPN_LABEL_AUTO); - bgp_lp_get(LP_TYPE_VRF, &bgp->vpn_policy[afi], - vpn_leak_label_callback); + if (yes) { + if (label_auto) { + SET_FLAG(bgp->vpn_policy[afi].flags, + BGP_VPN_POLICY_TOVPN_LABEL_AUTO); + /* fetch a label */ + bgp->vpn_policy[afi].tovpn_label = MPLS_LABEL_NONE; + bgp_lp_get(LP_TYPE_VRF, &bgp->vpn_policy[afi], + vpn_leak_label_callback); + } else { + bgp->vpn_policy[afi].tovpn_label = label; + UNSET_FLAG(bgp->vpn_policy[afi].flags, + BGP_VPN_POLICY_TOVPN_LABEL_AUTO); + if (bgp->vpn_policy[afi].tovpn_label >= + MPLS_LABEL_UNRESERVED_MIN && + bgp_zebra_request_label_range(bgp->vpn_policy[afi] + .tovpn_label, + 1, false)) + SET_FLAG(bgp->vpn_policy[afi].flags, + BGP_VPN_POLICY_TOVPN_LABEL_MANUAL_REG); + } + } else { + UNSET_FLAG(bgp->vpn_policy[afi].flags, + BGP_VPN_POLICY_TOVPN_LABEL_AUTO); + bgp->vpn_policy[afi].tovpn_label = MPLS_LABEL_NONE; } /* post-change: re-export vpn routes */ diff --git a/bgpd/bgp_zebra.c b/bgpd/bgp_zebra.c index 212b7f398b..e53416044e 100644 --- a/bgpd/bgp_zebra.c +++ b/bgpd/bgp_zebra.c @@ -3423,6 +3423,9 @@ static bool bgp_zebra_label_manager_connect(void) /* tell label pool that zebra is connected */ bgp_lp_event_zebra_up(); + /* tell BGP L3VPN that label manager is available */ + if (bgp_get_default()) + vpn_leak_postchange_all(); return true; } @@ -3921,7 +3924,8 @@ void bgp_zebra_send_nexthop_label(int cmd, mpls_label_t label, zebra_send_mpls_labels(zclient, cmd, &zl); } -bool bgp_zebra_request_label_range(uint32_t base, uint32_t chunk_size) +bool bgp_zebra_request_label_range(uint32_t base, uint32_t chunk_size, + bool label_auto) { int ret; uint32_t start, end; @@ -3943,7 +3947,13 @@ bool bgp_zebra_request_label_range(uint32_t base, uint32_t chunk_size) return false; } - bgp_lp_event_chunk(start, end); + if (label_auto) + /* label automatic is serviced by the bgp label pool + * manager, which allocates label chunks in + * pre-pools, and which needs to be notified about + * new chunks availability + */ + bgp_lp_event_chunk(start, end); return true; } diff --git a/bgpd/bgp_zebra.h b/bgpd/bgp_zebra.h index 0edae041d2..4696e4dc44 100644 --- a/bgpd/bgp_zebra.h +++ b/bgpd/bgp_zebra.h @@ -124,6 +124,7 @@ extern void bgp_zebra_send_nexthop_label(int cmd, mpls_label_t label, enum lsp_types_t ltype, struct prefix *p, uint32_t num_labels, mpls_label_t out_labels[]); -extern bool bgp_zebra_request_label_range(uint32_t base, uint32_t chunk_size); +extern bool bgp_zebra_request_label_range(uint32_t base, uint32_t chunk_size, + bool label_auto); extern void bgp_zebra_release_label_range(uint32_t start, uint32_t end); #endif /* _QUAGGA_BGP_ZEBRA_H */ diff --git a/bgpd/bgpd.h b/bgpd/bgpd.h index 42e4c167f6..65aed66b80 100644 --- a/bgpd/bgpd.h +++ b/bgpd/bgpd.h @@ -219,6 +219,8 @@ struct vpn_policy { #define BGP_VPN_POLICY_TOVPN_NEXTHOP_SET (1 << 2) #define BGP_VPN_POLICY_TOVPN_SID_AUTO (1 << 3) #define BGP_VPN_POLICY_TOVPN_LABEL_PER_NEXTHOP (1 << 4) +/* Manual label is registered with zebra label manager */ +#define BGP_VPN_POLICY_TOVPN_LABEL_MANUAL_REG (1 << 5) /* * If we are importing another vrf into us keep a list of diff --git a/doc/user/zebra.rst b/doc/user/zebra.rst index ba6e3bf37f..7c50212d48 100644 --- a/doc/user/zebra.rst +++ b/doc/user/zebra.rst @@ -783,6 +783,22 @@ presence of the entry. 21 Static 10.125.0.2 IPv4 Explicit Null +MPLS label chunks +----------------- + +MPLS label chunks are handled in the zebra label manager service, +which ensures a same label value or label chunk can not be used by +multiple CP routing daemons at the same time. + +Label requests originate from CP routing daemons, and are resolved +over the default MPLS range (16-1048575). There are two kind of +requests: +- Static label requests request an exact label value or range. For +instance, segment routing label blocks requests originating from +IS-IS are part of it. +- Dynamic label requests only need a range of label values. The +'bgp l3vpn export auto' command uses such requests. + Allocated label chunks table can be dumped using the command .. clicmd:: show debugging label-table @@ -796,6 +812,15 @@ Allocated label chunks table can be dumped using the command Proto ospf: [20000/21000] Proto isis: [22000/23000] +.. clicmd:: mpls label dynamic-block (16-1048575) (16-1048575) + + Define a range of labels where dynamic label requests will + allocate label chunks from. This command guarantees that + static label values outside that range will not conflict + with the dynamic label requests. When the dynamic-block + range is configured, static label requests that match that + range are not accepted. + .. _zebra-srv6: Segment-Routing IPv6 diff --git a/tests/topotests/bgp_accept_own/pe1/bgpd.conf b/tests/topotests/bgp_accept_own/pe1/bgpd.conf index 15466b4259..1f7abac98f 100644 --- a/tests/topotests/bgp_accept_own/pe1/bgpd.conf +++ b/tests/topotests/bgp_accept_own/pe1/bgpd.conf @@ -25,7 +25,7 @@ router bgp 65001 vrf Customer neighbor 192.168.1.1 timers connect 1 address-family ipv4 unicast redistribute connected - label vpn export 10 + label vpn export 250 rd vpn export 192.168.1.2:2 rt vpn import 192.168.1.2:2 rt vpn export 192.168.1.2:2 @@ -40,7 +40,7 @@ router bgp 65001 vrf Service neighbor 192.168.2.1 timers 1 3 neighbor 192.168.2.1 timers connect 1 address-family ipv4 unicast - label vpn export 20 + label vpn export 350 rd vpn export 192.168.2.2:2 rt vpn import 192.168.2.2:2 rt vpn export 192.168.2.2:2 diff --git a/tests/topotests/bgp_l3vpn_label_export/__init__.py b/tests/topotests/bgp_l3vpn_label_export/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/topotests/bgp_l3vpn_label_export/r1/bgpd.conf b/tests/topotests/bgp_l3vpn_label_export/r1/bgpd.conf new file mode 100644 index 0000000000..bb1ed4c1ea --- /dev/null +++ b/tests/topotests/bgp_l3vpn_label_export/r1/bgpd.conf @@ -0,0 +1,22 @@ +router bgp 65001 + bgp router-id 192.0.2.1 + no bgp default ipv4-unicast + no bgp ebgp-requires-policy + neighbor 192.0.2.2 remote-as 65002 + neighbor 192.0.2.2 timers 1 3 + neighbor 192.0.2.2 timers connect 1 + neighbor 192.0.2.2 ebgp-multihop 2 + address-family ipv4 vpn + neighbor 192.0.2.2 activate + exit-address-family +! +router bgp 65001 vrf vrf1 + address-family ipv4 unicast + redistribute connected + label vpn export 1111 + rd vpn export 101:1 + rt vpn both 52:100 + import vpn + export vpn + exit-address-family +! diff --git a/tests/topotests/bgp_l3vpn_label_export/r1/ldpd.conf b/tests/topotests/bgp_l3vpn_label_export/r1/ldpd.conf new file mode 100644 index 0000000000..04ae06877a --- /dev/null +++ b/tests/topotests/bgp_l3vpn_label_export/r1/ldpd.conf @@ -0,0 +1,26 @@ +hostname r1 +log file ldpd.log +password zebra +! +! debug mpls ldp zebra +! debug mpls ldp event +! debug mpls ldp errors +! debug mpls ldp messages recv +! debug mpls ldp messages sent +! debug mpls ldp discovery hello recv +! debug mpls ldp discovery hello sent +! +mpls ldp + router-id 192.0.2.1 + ! + address-family ipv4 + discovery transport-address 192.0.2.1 + ! + interface r1-eth0 + ! + interface r1-eth1 + ! + ! +! +line vty +! diff --git a/tests/topotests/bgp_l3vpn_label_export/r1/staticd.conf b/tests/topotests/bgp_l3vpn_label_export/r1/staticd.conf new file mode 100644 index 0000000000..7f2f057bfe --- /dev/null +++ b/tests/topotests/bgp_l3vpn_label_export/r1/staticd.conf @@ -0,0 +1 @@ +ip route 192.0.2.2/32 192.168.1.2 diff --git a/tests/topotests/bgp_l3vpn_label_export/r1/zebra.conf b/tests/topotests/bgp_l3vpn_label_export/r1/zebra.conf new file mode 100644 index 0000000000..7bdacb1ca3 --- /dev/null +++ b/tests/topotests/bgp_l3vpn_label_export/r1/zebra.conf @@ -0,0 +1,7 @@ +! +interface lo + ip address 192.0.2.1/32 +! +interface r1-eth0 + ip address 192.168.1.1/24 +! diff --git a/tests/topotests/bgp_l3vpn_label_export/r2/bgpd.conf b/tests/topotests/bgp_l3vpn_label_export/r2/bgpd.conf new file mode 100644 index 0000000000..18a11cfb40 --- /dev/null +++ b/tests/topotests/bgp_l3vpn_label_export/r2/bgpd.conf @@ -0,0 +1,23 @@ +router bgp 65002 + bgp router-id 192.0.2.2 + no bgp default ipv4-unicast + no bgp ebgp-requires-policy + neighbor 192.168.1.1 remote-as 65001 + neighbor 192.168.1.1 timers 1 3 + neighbor 192.168.1.1 timers connect 1 + neighbor 192.168.1.1 ebgp-multihop 2 + neighbor 192.168.1.1 update-source 192.0.2.2 + address-family ipv4 vpn + neighbor 192.168.1.1 activate + exit-address-family +! +router bgp 65002 vrf vrf1 + address-family ipv4 unicast + redistribute connected + label vpn export 2222 + rd vpn export 102:1 + rt vpn both 52:100 + import vpn + export vpn + exit-address-family +! diff --git a/tests/topotests/bgp_l3vpn_label_export/r2/ldpd.conf b/tests/topotests/bgp_l3vpn_label_export/r2/ldpd.conf new file mode 100644 index 0000000000..f4307f1ab0 --- /dev/null +++ b/tests/topotests/bgp_l3vpn_label_export/r2/ldpd.conf @@ -0,0 +1,24 @@ +hostname r2 +log file ldpd.log +password zebra +! +! debug mpls ldp zebra +! debug mpls ldp event +! debug mpls ldp errors +! debug mpls ldp messages recv +! debug mpls ldp messages sent +! debug mpls ldp discovery hello recv +! debug mpls ldp discovery hello sent +! +mpls ldp + router-id 192.0.2.2 + ! + address-family ipv4 + discovery transport-address 192.0.2.2 + ! + interface r2-eth0 + ! + ! +! +line vty +! diff --git a/tests/topotests/bgp_l3vpn_label_export/r2/staticd.conf b/tests/topotests/bgp_l3vpn_label_export/r2/staticd.conf new file mode 100644 index 0000000000..e3f5d7dba0 --- /dev/null +++ b/tests/topotests/bgp_l3vpn_label_export/r2/staticd.conf @@ -0,0 +1 @@ +ip route 192.0.2.1/32 192.168.1.1 diff --git a/tests/topotests/bgp_l3vpn_label_export/r2/zebra.conf b/tests/topotests/bgp_l3vpn_label_export/r2/zebra.conf new file mode 100644 index 0000000000..40dfa9854c --- /dev/null +++ b/tests/topotests/bgp_l3vpn_label_export/r2/zebra.conf @@ -0,0 +1,7 @@ +! +int lo + ip address 192.0.2.2/32 +! +interface r2-eth0 + ip address 192.168.1.2/24 +! diff --git a/tests/topotests/bgp_l3vpn_label_export/test_bgp_l3vpn_label_export.py b/tests/topotests/bgp_l3vpn_label_export/test_bgp_l3vpn_label_export.py new file mode 100644 index 0000000000..7c23a3e899 --- /dev/null +++ b/tests/topotests/bgp_l3vpn_label_export/test_bgp_l3vpn_label_export.py @@ -0,0 +1,587 @@ +#!/usr/bin/env python +# SPDX-License-Identifier: ISC + +# +# Copyright (c) 2023 by Louis Scalbert +# Copyright 2023 6WIND S.A. +# + +""" + +""" + +import os +import re +import sys +import json +import pytest +import functools + +from copy import deepcopy + +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.common_config import kill_router_daemons, start_router_daemons, step + +pytestmark = [pytest.mark.bgpd] + + +def build_topo(tgen): + for rtr in [1, 2]: + tgen.add_router("r{}".format(rtr)) + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) + + +def setup_module(mod): + tgen = Topogen(build_topo, mod.__name__) + tgen.start_topology() + + router_list = tgen.routers() + + for rtr in [1, 2]: + tgen.gears["r{}".format(rtr)].cmd("ip link add vrf1 type vrf table 10") + tgen.gears["r{}".format(rtr)].cmd("ip link set vrf1 up") + tgen.gears["r{}".format(rtr)].cmd( + "ip address add dev vrf1 192.0.3.{}/32".format(rtr) + ) + tgen.gears["r{}".format(rtr)].run( + "sysctl -w net.mpls.conf.r{}-eth0.input=1".format(rtr) + ) + tgen.gears["r{}".format(rtr)].run("sysctl -w net.mpls.conf.vrf1.input=1") + + for i, (rname, router) in enumerate(router_list.items(), 1): + router.load_config( + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) + ) + router.load_config( + TopoRouter.RD_STATIC, os.path.join(CWD, "{}/staticd.conf".format(rname)) + ) + router.load_config( + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) + ) + router.load_config( + TopoRouter.RD_LDP, os.path.join(CWD, "{}/ldpd.conf".format(rname)) + ) + + tgen.start_router() + + +def teardown_module(mod): + tgen = get_topogen() + tgen.stop_topology() + + +def check_bgp_vpn_prefix(label, rname="r1", rd=None): + tgen = get_topogen() + + if rd: + output = json.loads( + tgen.gears[rname].vtysh_cmd( + "show bgp ipv4 vpn rd {} 192.0.3.2/32 json".format(rd) + ) + ) + else: + output = json.loads( + tgen.gears[rname].vtysh_cmd( + "show bgp vrf vrf1 ipv4 unicast 192.0.3.2/32 json" + ) + ) + + if label == "auto": + expected = { + "paths": [ + { + "valid": True, + "aspath": {"string": "65002"}, + "nexthops": [{"ip": "192.0.2.2"}], + }, + ] + } + elif label and not rd: + expected = { + "paths": [ + { + "valid": True, + "remoteLabel": label, + "aspath": {"string": "65002"}, + "nexthops": [{"ip": "192.0.2.2"}], + }, + ] + } + elif label and rd: + expected = { + "102:1": { + "prefix": "192.0.3.2/32", + "paths": [ + { + "valid": True, + "remoteLabel": label, + "nexthops": [{"ip": "0.0.0.0"}], + } + ], + } + } + else: + expected = {} + + return topotest.json_cmp(output, expected, exact=(label is None)) + + +def check_mpls_table(label, protocol): + tgen = get_topogen() + + if label == "auto": + cmd = "show mpls table json" + else: + cmd = "show mpls table {} json".format(label) + + output = json.loads(tgen.gears["r2"].vtysh_cmd(cmd)) + + if label == "auto" and protocol: + output_copy = deepcopy(output) + for key, data in output_copy.items(): + for nexthop in data.get("nexthops", []): + if nexthop.get("type", None) != protocol: + continue + output = data + break + + if protocol: + expected = { + "nexthops": [ + { + "type": protocol, + }, + ] + } + else: + expected = {} + + return topotest.json_cmp(output, expected, exact=(protocol is None)) + + +def check_mpls_ldp_binding(): + tgen = get_topogen() + + output = json.loads( + tgen.gears["r1"].vtysh_cmd("show mpls ldp binding 192.0.2.2/32 json") + ) + expected = { + "bindings": [ + { + "prefix": "192.0.2.2/32", + "localLabel": "16", # first available label + "inUse": 1, + }, + ] + } + + return topotest.json_cmp(output, expected) + + +def test_convergence(): + "Test protocol convergence" + + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Check BGP and LDP convergence") + test_func = functools.partial(check_bgp_vpn_prefix, 2222) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see BGP prefix on R1" + + test_func = functools.partial(check_mpls_ldp_binding) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see LDP binding on R2" + + test_func = functools.partial(check_mpls_table, 16, "LDP") + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see LDP label on R2" + + test_func = functools.partial(check_mpls_table, 2222, "BGP") + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see BGP label on R2" + + output = tgen.net["r2"].cmd("vtysh -c 'show debugging label-table' | grep Proto") + assert re.match( + r"Proto ldp: \[16/(1[7-9]|[2-9]\d+|\d{3,})\]", output + ), "Failed to see LDP label chunk" + + output = tgen.gears["r2"].vtysh_cmd("show debugging label-table") + assert "Proto bgp: [2222/2222]" in output, "Failed to see BGP label chunk" + + +def test_vpn_label_export_16(): + "Test that assigning the label value of 16 is not possible because it used by LDP" + + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + tgen.gears["r2"].vtysh_cmd( + "conf\n" + "router bgp 65002 vrf vrf1\n" + "address-family ipv4 unicast\n" + "label vpn export 16" + ) + + step("Check that label vpn export 16 fails") + test_func = functools.partial(check_bgp_vpn_prefix, None) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Unexpected BGP prefix on R1" + + test_func = functools.partial(check_mpls_ldp_binding) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see LDP binding on R2" + + test_func = functools.partial(check_mpls_table, 16, "LDP") + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see LDP label on R2" + + test_func = functools.partial(check_mpls_table, 2222, None) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Unexpected BGP label on R2" + + output = tgen.net["r2"].cmd("vtysh -c 'show debugging label-table' | grep Proto") + assert re.match( + r"Proto ldp: \[16/(1[7-9]|[2-9]\d+|\d{3,})\]", output + ), "Failed to see LDP label chunk" + + output = tgen.gears["r2"].vtysh_cmd("show debugging label-table") + assert "Proto bgp" not in output, "Unexpected BGP label chunk" + + +def test_vpn_label_export_2222(): + "Test that setting back the label value of 2222 works" + + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + tgen.gears["r2"].vtysh_cmd( + "conf\n" + "router bgp 65002 vrf vrf1\n" + "address-family ipv4 unicast\n" + "label vpn export 2222" + ) + + step("Check that label vpn export 2222 is OK") + test_func = functools.partial(check_bgp_vpn_prefix, 2222) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see BGP prefix on R1" + + test_func = functools.partial(check_mpls_ldp_binding) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see LDP binding on R2" + + test_func = functools.partial(check_mpls_table, 16, "LDP") + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see LDP label on R2" + + test_func = functools.partial(check_mpls_table, "auto", "BGP") + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Unexpected BGP label on R2" + + output = tgen.net["r2"].cmd("vtysh -c 'show debugging label-table' | grep Proto") + assert re.match( + r"Proto ldp: \[16/(1[7-9]|[2-9]\d+|\d{3,})\]", output + ), "Failed to see LDP label chunk" + + output = tgen.gears["r2"].vtysh_cmd("show debugging label-table") + assert "Proto bgp: [2222/2222]" in output, "Failed to see BGP label chunk" + + +def test_vpn_label_export_auto(): + "Test that setting label vpn export auto works" + + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + tgen.gears["r2"].vtysh_cmd( + "conf\n" + "router bgp 65002 vrf vrf1\n" + "address-family ipv4 unicast\n" + "label vpn export auto" + ) + + step("Check that label vpn export auto is OK") + test_func = functools.partial(check_bgp_vpn_prefix, "auto") + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see BGP prefix on R1" + + test_func = functools.partial(check_mpls_ldp_binding) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see LDP binding on R2" + + test_func = functools.partial(check_mpls_table, 16, "LDP") + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see LDP label on R2" + + test_func = functools.partial(check_mpls_table, "auto", "BGP") + _, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5) + assert result is None, "Failed to see BGP label on R2" + + output = tgen.net["r2"].cmd("vtysh -c 'show debugging label-table' | grep Proto") + assert re.match( + r"Proto ldp: \[16/(1[7-9]|[2-9]\d+|\d{3,})\]", output + ), "Failed to see LDP label chunk" + + output = tgen.gears["r2"].vtysh_cmd("show debugging label-table") + assert "Proto bgp: " in output, "Failed to see BGP label chunk" + + +def test_vpn_label_export_no_auto(): + "Test that UNsetting label vpn export auto removes the prefix from R1 table and R2 LDP table" + + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + output = json.loads( + tgen.gears["r1"].vtysh_cmd("show bgp vrf vrf1 ipv4 unicast 192.0.3.2/32 json") + ) + + auto_label = output.get("paths")[0].get("remoteLabel", None) + assert auto_label is not None, "Failed to fetch prefix label on R1" + + tgen.gears["r2"].vtysh_cmd( + "conf\n" + "router bgp 65002 vrf vrf1\n" + "address-family ipv4 unicast\n" + "no label vpn export auto" + ) + + step("Check that no label vpn export auto is OK") + test_func = functools.partial(check_bgp_vpn_prefix, 3, rname="r2", rd="102:1") + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Unexpected BGP prefix on R2" + + test_func = functools.partial(check_mpls_ldp_binding) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see LDP binding on R2" + + test_func = functools.partial(check_mpls_table, 16, "LDP") + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see LDP label on R2" + + test_func = functools.partial(check_mpls_table, auto_label, None) + _, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5) + assert result is None, "Unexpected BGP label on R2" + + output = tgen.net["r2"].cmd("vtysh -c 'show debugging label-table' | grep Proto") + assert re.match( + r"Proto ldp: \[16/(1[7-9]|[2-9]\d+|\d{3,})\]", output + ), "Failed to see LDP label chunk" + + output = tgen.gears["r2"].vtysh_cmd("show debugging label-table") + assert "Proto bgp: " not in output, "Unexpected BGP label chunk" + + +def test_vpn_label_export_auto_back(): + "Test that setting back label vpn export auto works" + + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + output = json.loads( + tgen.gears["r2"].vtysh_cmd("show bgp vrf vrf1 ipv4 unicast 192.0.3.2/32 json") + ) + + tgen.gears["r2"].vtysh_cmd( + "conf\n" + "router bgp 65002 vrf vrf1\n" + "address-family ipv4 unicast\n" + "label vpn export auto" + ) + + step("Check that label vpn export auto is OK") + test_func = functools.partial(check_bgp_vpn_prefix, "auto") + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see BGP prefix on R1" + + test_func = functools.partial(check_mpls_ldp_binding) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see LDP binding on R2" + + test_func = functools.partial(check_mpls_table, 16, "LDP") + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see LDP label on R2" + + test_func = functools.partial(check_mpls_table, "auto", "BGP") + _, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5) + assert result is None, "Failed to see BGP label on R2" + + output = tgen.net["r2"].cmd("vtysh -c 'show debugging label-table' | grep Proto") + assert re.match( + r"Proto ldp: \[16/(1[7-9]|[2-9]\d+|\d{3,})\]", output + ), "Failed to see LDP label chunk" + + output = tgen.gears["r2"].vtysh_cmd("show debugging label-table") + assert "Proto bgp: " in output, "Failed to see BGP label chunk" + + +def test_vpn_label_export_manual_from_auto(): + "Test that setting a manual label value from the BGP chunk range works" + + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + output = json.loads( + tgen.gears["r1"].vtysh_cmd("show bgp vrf vrf1 ipv4 unicast 192.0.3.2/32 json") + ) + + auto_label = output.get("paths")[0].get("remoteLabel", None) + assert auto_label is not None, "Failed to fetch prefix label on R1" + + auto_label = auto_label + 1 + + tgen.gears["r2"].vtysh_cmd( + "conf\n" + "router bgp 65002 vrf vrf1\n" + "address-family ipv4 unicast\n" + "label vpn export {}".format(auto_label) + ) + + step("Check that label vpn export {} is OK".format(auto_label)) + test_func = functools.partial( + check_bgp_vpn_prefix, auto_label, rname="r2", rd="102:1" + ) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see BGP prefix on R2" + + test_func = functools.partial(check_mpls_ldp_binding) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see LDP binding on R2" + + test_func = functools.partial(check_mpls_table, 16, "LDP") + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see LDP label on R2" + + test_func = functools.partial(check_mpls_table, auto_label, "BGP") + _, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5) + assert result is None, "Failed to see BGP label on R2" + + output = tgen.net["r2"].cmd("vtysh -c 'show debugging label-table' | grep Proto") + assert re.match( + r"Proto ldp: \[16/(1[7-9]|[2-9]\d+|\d{3,})\]", output + ), "Failed to see LDP label chunk" + + output = tgen.gears["r2"].vtysh_cmd("show debugging label-table") + assert "Proto bgp: " in output, "Failed to see BGP label chunk" + + +def test_vpn_label_configure_dynamic_range(): + "Test that if a dynamic range is configured, then the next dynamic allocations will be done in that block" + + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + tgen.gears["r2"].vtysh_cmd("conf\n" "mpls label dynamic-block 500 1000\n") + tgen.gears["r2"].vtysh_cmd( + "conf\n" + "router bgp 65002 vrf vrf1\n" + "address-family ipv4 unicast\n" + "label vpn export auto" + ) + step("Check that label vpn export auto starting at 500 is OK") + test_func = functools.partial(check_bgp_vpn_prefix, 500, rname="r2", rd="102:1") + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Unexpected BGP prefix on R2" + + test_func = functools.partial(check_mpls_table, 500, "BGP") + _, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5) + assert result is None, "Unexpected BGP label on R2" + + output = tgen.gears["r2"].vtysh_cmd("show debugging label-table") + assert "Proto bgp: " in output, "Failed to see BGP label chunk" + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) + + +def test_vpn_label_restart_ldp(): + "Test that if a dynamic range is configured, then when LDP restarts, it follows the new dynamic range" + + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + router_list = tgen.routers() + + step("Kill LDP on R2") + kill_router_daemons(tgen, "r2", ["ldpd"]) + + output = tgen.gears["r2"].vtysh_cmd("show debugging label-table") + assert "Proto ldp: " not in output, "Unexpected LDP label chunk" + + step("Bring up LDP on R2") + + start_router_daemons(tgen, "r2", ["ldpd"]) + + test_func = functools.partial(check_mpls_table, 628, "LDP") + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see LDP label on R2" + + output = tgen.gears["r2"].vtysh_cmd("show debugging label-table") + assert "Proto ldp: [628/691]" in output, "Failed to see LDP label chunk [628/691]" + assert "Proto ldp: [692/755]" in output, "Failed to see LDP label chunk [692/755]" + + +def test_vpn_label_unconfigure_dynamic_range(): + "Test that if the dynamic range is unconfigured, then the next dynamic allocations will be done at the first free place." + + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + tgen.gears["r2"].vtysh_cmd("conf\n" "no mpls label dynamic-block 500 1000\n") + step("Check that unconfiguring label vpn export auto will remove BGP label chunk") + tgen.gears["r2"].vtysh_cmd( + "conf\n" + "router bgp 65002 vrf vrf1\n" + "address-family ipv4 unicast\n" + "no label vpn export auto" + ) + + output = tgen.gears["r2"].vtysh_cmd("show debugging label-table") + assert "Proto bgp: " not in output, "Unexpected BGP label chunk" + + tgen.gears["r2"].vtysh_cmd( + "conf\n" + "router bgp 65002 vrf vrf1\n" + "address-family ipv4 unicast\n" + "label vpn export auto" + ) + step("Check that label vpn export auto starting at 16 is OK") + test_func = functools.partial(check_bgp_vpn_prefix, 16, rname="r2", rd="102:1") + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Unexpected BGP prefix on R2" + + test_func = functools.partial(check_mpls_table, 16, "BGP") + _, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5) + assert result is None, "Unexpected BGP label on R2" + + output = tgen.gears["r2"].vtysh_cmd("show debugging label-table") + assert "Proto bgp: " in output, "Failed to see BGP label chunk" diff --git a/tests/topotests/bgp_lu_topo1/R2/labelpool.summ.json b/tests/topotests/bgp_lu_topo1/R2/labelpool.summ.json index d35e4ef463..17b9accb4a 100644 --- a/tests/topotests/bgp_lu_topo1/R2/labelpool.summ.json +++ b/tests/topotests/bgp_lu_topo1/R2/labelpool.summ.json @@ -2,5 +2,5 @@ "ledger":0, "inUse":0, "requests":0, - "labelChunks":1 + "labelChunks":0 } diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf2/r1/vpnv4_rib.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf2/r1/vpnv4_rib.json index 3cc2fddcfa..7a4e0d7452 100644 --- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf2/r1/vpnv4_rib.json +++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf2/r1/vpnv4_rib.json @@ -1,7 +1,6 @@ { "vrfId": 0, "vrfName": "default", - "tableVersion": 2, "routerId": "1.1.1.1", "defaultLocPrf": 100, "localAS": 1, diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf2/r2/vpnv4_rib.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf2/r2/vpnv4_rib.json index 95570541c8..0dcdec678f 100644 --- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf2/r2/vpnv4_rib.json +++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf2/r2/vpnv4_rib.json @@ -1,7 +1,6 @@ { "vrfId": 0, "vrfName": "default", - "tableVersion": 2, "routerId": "2.2.2.2", "defaultLocPrf": 100, "localAS": 2, diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv4_rib.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv4_rib.json index 3cc2fddcfa..7a4e0d7452 100644 --- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv4_rib.json +++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv4_rib.json @@ -1,7 +1,6 @@ { "vrfId": 0, "vrfName": "default", - "tableVersion": 2, "routerId": "1.1.1.1", "defaultLocPrf": 100, "localAS": 1, diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv4_rib_sid_vpn_export_disabled.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv4_rib_sid_vpn_export_disabled.json index eb3433301b..205079574c 100644 --- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv4_rib_sid_vpn_export_disabled.json +++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv4_rib_sid_vpn_export_disabled.json @@ -1,7 +1,6 @@ { "vrfId": 0, "vrfName": "default", - "tableVersion": 4, "routerId": "1.1.1.1", "defaultLocPrf": 100, "localAS": 1, diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv4_rib_sid_vpn_export_reenabled.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv4_rib_sid_vpn_export_reenabled.json index 5517fc738a..7a4e0d7452 100644 --- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv4_rib_sid_vpn_export_reenabled.json +++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv4_rib_sid_vpn_export_reenabled.json @@ -1,7 +1,6 @@ { "vrfId": 0, "vrfName": "default", - "tableVersion": 6, "routerId": "1.1.1.1", "defaultLocPrf": 100, "localAS": 1, diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv6_rib.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv6_rib.json index 25b7a8616f..0fdd3d6dc0 100644 --- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv6_rib.json +++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv6_rib.json @@ -1,7 +1,6 @@ { "vrfId": 0, "vrfName": "default", - "tableVersion": 2, "routerId": "1.1.1.1", "defaultLocPrf": 100, "localAS": 1, diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv6_rib_sid_vpn_export_disabled.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv6_rib_sid_vpn_export_disabled.json index a1f21585d7..e289df1d44 100644 --- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv6_rib_sid_vpn_export_disabled.json +++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv6_rib_sid_vpn_export_disabled.json @@ -1,7 +1,6 @@ { "vrfId": 0, "vrfName": "default", - "tableVersion": 4, "routerId": "1.1.1.1", "defaultLocPrf": 100, "localAS": 1, diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv6_rib_sid_vpn_export_reenabled.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv6_rib_sid_vpn_export_reenabled.json index 7eeccd1496..0fdd3d6dc0 100644 --- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv6_rib_sid_vpn_export_reenabled.json +++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv6_rib_sid_vpn_export_reenabled.json @@ -1,7 +1,6 @@ { "vrfId": 0, "vrfName": "default", - "tableVersion": 6, "routerId": "1.1.1.1", "defaultLocPrf": 100, "localAS": 1, diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv4_rib.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv4_rib.json index 95570541c8..0dcdec678f 100644 --- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv4_rib.json +++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv4_rib.json @@ -1,7 +1,6 @@ { "vrfId": 0, "vrfName": "default", - "tableVersion": 2, "routerId": "2.2.2.2", "defaultLocPrf": 100, "localAS": 2, diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv4_rib_sid_vpn_export_disabled.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv4_rib_sid_vpn_export_disabled.json index d801671fdc..a440ab4248 100644 --- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv4_rib_sid_vpn_export_disabled.json +++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv4_rib_sid_vpn_export_disabled.json @@ -1,7 +1,6 @@ { "vrfId": 0, "vrfName": "default", - "tableVersion": 4, "routerId": "2.2.2.2", "defaultLocPrf": 100, "localAS": 2, diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv4_rib_sid_vpn_export_reenabled.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv4_rib_sid_vpn_export_reenabled.json index 25da05b0d4..0dcdec678f 100644 --- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv4_rib_sid_vpn_export_reenabled.json +++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv4_rib_sid_vpn_export_reenabled.json @@ -1,7 +1,6 @@ { "vrfId": 0, "vrfName": "default", - "tableVersion": 6, "routerId": "2.2.2.2", "defaultLocPrf": 100, "localAS": 2, diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv6_rib.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv6_rib.json index 2cd47b9ce5..03bbcc008d 100644 --- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv6_rib.json +++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv6_rib.json @@ -1,7 +1,6 @@ { "vrfId": 0, "vrfName": "default", - "tableVersion": 2, "routerId": "2.2.2.2", "defaultLocPrf": 100, "localAS": 2, diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv6_rib_sid_vpn_export_disabled.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv6_rib_sid_vpn_export_disabled.json index f390ef69b1..5c70cf6450 100644 --- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv6_rib_sid_vpn_export_disabled.json +++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv6_rib_sid_vpn_export_disabled.json @@ -1,7 +1,6 @@ { "vrfId": 0, "vrfName": "default", - "tableVersion": 4, "routerId": "2.2.2.2", "defaultLocPrf": 100, "localAS": 2, diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv6_rib_sid_vpn_export_reenabled.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv6_rib_sid_vpn_export_reenabled.json index 3353d75eda..03bbcc008d 100644 --- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv6_rib_sid_vpn_export_reenabled.json +++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv6_rib_sid_vpn_export_reenabled.json @@ -1,7 +1,6 @@ { "vrfId": 0, "vrfName": "default", - "tableVersion": 6, "routerId": "2.2.2.2", "defaultLocPrf": 100, "localAS": 2, diff --git a/tests/topotests/bgp_vpnv4_per_nexthop_label/test_bgp_vpnv4_per_nexthop_label.py b/tests/topotests/bgp_vpnv4_per_nexthop_label/test_bgp_vpnv4_per_nexthop_label.py index ce278ed7a7..d4c355a44a 100644 --- a/tests/topotests/bgp_vpnv4_per_nexthop_label/test_bgp_vpnv4_per_nexthop_label.py +++ b/tests/topotests/bgp_vpnv4_per_nexthop_label/test_bgp_vpnv4_per_nexthop_label.py @@ -151,6 +151,16 @@ def teardown_module(_mod): tgen.stop_topology() +def check_bgp_vpnv4_prefix_presence(router, prefix): + "Check the presence of a prefix" + tgen = get_topogen() + + dump = router.vtysh_cmd("show bgp ipv4 vpn {} json".format(prefix), isjson=True) + if not dump: + return "{}, prefix ipv4 vpn {} is not installed yet".format(router.name, prefix) + return None + + def bgp_vpnv4_table_check(router, group, label_list=None, label_value_expected=None): """ Dump and check that vpnv4 entries have the same MPLS label value @@ -163,6 +173,12 @@ def bgp_vpnv4_table_check(router, group, label_list=None, label_value_expected=N stored_label_inited = False for prefix in group: + test_func = functools.partial(check_bgp_vpnv4_prefix_presence, router, prefix) + success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5) + assert success, "{}, prefix ipv4 vpn {} is not installed yet".format( + router.name, prefix + ) + dump = router.vtysh_cmd("show bgp ipv4 vpn {} json".format(prefix), isjson=True) assert dump, "{0}, {1}, route distinguisher not present".format( router.name, prefix diff --git a/tests/topotests/bgp_vpnv6_per_nexthop_label/test_bgp_vpnv6_per_nexthop_label.py b/tests/topotests/bgp_vpnv6_per_nexthop_label/test_bgp_vpnv6_per_nexthop_label.py index e936ccc1e4..3d5f8f643b 100644 --- a/tests/topotests/bgp_vpnv6_per_nexthop_label/test_bgp_vpnv6_per_nexthop_label.py +++ b/tests/topotests/bgp_vpnv6_per_nexthop_label/test_bgp_vpnv6_per_nexthop_label.py @@ -54,7 +54,7 @@ pytestmark = [pytest.mark.bgpd] PREFIXES_R11 = ["172:31::11/128", "172:31::20/128", "172:31::111/128"] PREFIXES_R12 = ["172:31::12/128", "172:31::15/128"] PREFIXES_REDIST_R14 = ["172:31::14/128"] -PREFIXES_CONNECTED = ["192:168::255/112", "192:2::/64"] +PREFIXES_CONNECTED = ["192:168::255:0/112", "192:2::/64"] def build_topo(tgen): @@ -150,6 +150,16 @@ def teardown_module(_mod): tgen.stop_topology() +def check_bgp_vpnv6_prefix_presence(router, prefix): + "Check the presence of a prefix" + tgen = get_topogen() + + dump = router.vtysh_cmd("show bgp ipv6 vpn {} json".format(prefix), isjson=True) + if not dump: + return "{}, prefix ipv6 vpn {} is not installed yet".format(router.name, prefix) + return None + + def bgp_vpnv6_table_check(router, group, label_list=None, label_value_expected=None): """ Dump and check that vpnv6 entries have the same MPLS label value @@ -162,6 +172,12 @@ def bgp_vpnv6_table_check(router, group, label_list=None, label_value_expected=N stored_label_inited = False for prefix in group: + test_func = functools.partial(check_bgp_vpnv6_prefix_presence, router, prefix) + success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5) + assert success, "{}, prefix ipv6 vpn {} is not installed yet".format( + router.name, prefix + ) + dump = router.vtysh_cmd("show bgp ipv6 vpn {} json".format(prefix), isjson=True) for rd, pathes in dump.items(): for path in pathes["paths"]: @@ -237,7 +253,9 @@ def check_show_mpls_table(router, blacklist=None, label_list=None, whitelist=Non label_list.add(in_label) for nh in label_info["nexthops"]: if "installed" not in nh.keys(): - return "{} {} is not installed yet on {}".format(in_label, label_info, router.name) + return "{} {} is not installed yet on {}".format( + in_label, label_info, router.name + ) if nh["installed"] != True or nh["type"] != "BGP": return "{}, show mpls table, nexthop is not installed".format( router.name diff --git a/tests/zebra/test_lm_plugin.c b/tests/zebra/test_lm_plugin.c index 9ad0bc4e17..9895c025f0 100644 --- a/tests/zebra/test_lm_plugin.c +++ b/tests/zebra/test_lm_plugin.c @@ -48,7 +48,7 @@ static int lm_get_chunk_pi(struct label_manager_chunk **lmc, uint32_t base, vrf_id_t vrf_id) { if (base == 0) - *lmc = create_label_chunk(10, 55, 0, 1, 50, 50 + size); + *lmc = create_label_chunk(10, 55, 0, 1, 50, 50 + size, true); else *lmc = assign_label_chunk(10, 55, 0, 1, size, base); diff --git a/zebra/label_manager.c b/zebra/label_manager.c index fa7dbb0a25..8ae6e0cc3f 100644 --- a/zebra/label_manager.c +++ b/zebra/label_manager.c @@ -51,10 +51,14 @@ DEFINE_HOOK(lm_get_chunk, DEFINE_HOOK(lm_release_chunk, (struct zserv *client, uint32_t start, uint32_t end), (client, start, end)); +/* show running-config needs an API for dynamic-block */ +DEFINE_HOOK(lm_write_label_block_config, + (struct vty *vty, struct zebra_vrf *zvrf), + (vty, zvrf)); DEFINE_HOOK(lm_cbs_inited, (), ()); -/* define wrappers to be called in zapi_msg.c (as hooks must be called in - * source file where they were defined) +/* define wrappers to be called in zapi_msg.c or zebra_mpls_vty.c (as hooks + * must be called in source file where they were defined) */ void lm_client_connect_call(struct zserv *client, vrf_id_t vrf_id) { @@ -71,6 +75,11 @@ void lm_release_chunk_call(struct zserv *client, uint32_t start, uint32_t end) hook_call(lm_release_chunk, client, start, end); } +int lm_write_label_block_config_call(struct vty *vty, struct zebra_vrf *zvrf) +{ + return hook_call(lm_write_label_block_config, vty, zvrf); +} + /* forward declarations of the static functions to be used for some hooks */ static int label_manager_connect(struct zserv *client, vrf_id_t vrf_id); static int label_manager_disconnect(struct zserv *client); @@ -80,6 +89,8 @@ static int label_manager_get_chunk(struct label_manager_chunk **lmc, vrf_id_t vrf_id); static int label_manager_release_label_chunk(struct zserv *client, uint32_t start, uint32_t end); +static int label_manager_write_label_block_config(struct vty *vty, + struct zebra_vrf *zvrf); void delete_label_chunk(void *val) { @@ -138,6 +149,8 @@ void lm_hooks_register(void) hook_register(lm_client_disconnect, label_manager_disconnect); hook_register(lm_get_chunk, label_manager_get_chunk); hook_register(lm_release_chunk, label_manager_release_label_chunk); + hook_register(lm_write_label_block_config, + label_manager_write_label_block_config); } void lm_hooks_unregister(void) { @@ -145,24 +158,129 @@ void lm_hooks_unregister(void) hook_unregister(lm_client_disconnect, label_manager_disconnect); hook_unregister(lm_get_chunk, label_manager_get_chunk); hook_unregister(lm_release_chunk, label_manager_release_label_chunk); + hook_unregister(lm_write_label_block_config, + label_manager_write_label_block_config); } -DEFPY(show_label_table, show_label_table_cmd, "show debugging label-table", +static json_object *lmc_json(struct label_manager_chunk *lmc) +{ + json_object *json = json_object_new_object(); + + json_object_string_add(json, "protocol", zebra_route_string(lmc->proto)); + json_object_int_add(json, "instance", lmc->instance); + json_object_int_add(json, "sessionId", lmc->session_id); + json_object_int_add(json, "start", lmc->start); + json_object_int_add(json, "end", lmc->end); + json_object_boolean_add(json, "dynamic", lmc->is_dynamic); + return json; +} + +DEFPY(show_label_table, show_label_table_cmd, "show debugging label-table [json$uj]", SHOW_STR DEBUG_STR - "Display allocated label chunks\n") + "Display allocated label chunks\n" + JSON_STR) { struct label_manager_chunk *lmc; struct listnode *node; + json_object *json_array = NULL, *json_global = NULL, *json_dyn_block; + + if (uj) { + json_array = json_object_new_array(); + json_global = json_object_new_object(); + json_dyn_block = json_object_new_object(); + json_object_int_add(json_dyn_block, "lowerBound", + lbl_mgr.dynamic_block_start); + json_object_int_add(json_dyn_block, "upperBound", + lbl_mgr.dynamic_block_end); + json_object_object_add(json_global, "dynamicBlock", + json_dyn_block); + } else + vty_out(vty, "Dynamic block: lower-bound %u, upper-bound %u\n", + lbl_mgr.dynamic_block_start, lbl_mgr.dynamic_block_end); for (ALL_LIST_ELEMENTS_RO(lbl_mgr.lc_list, node, lmc)) { + if (uj) { + json_object_array_add(json_array, lmc_json(lmc)); + continue; + } vty_out(vty, "Proto %s: [%u/%u]\n", zebra_route_string(lmc->proto), lmc->start, lmc->end); } - + if (uj) { + json_object_object_add(json_global, "chunks", json_array); + vty_json(vty, json_global); + } return CMD_SUCCESS; } +DEFPY(mpls_label_dynamic_block, mpls_label_dynamic_block_cmd, + "[no$no] mpls label dynamic-block [(16-1048575)$start (16-1048575)$end]", + NO_STR + MPLS_STR + "Label configuration\n" + "Configure dynamic label block\n" + "Start label\n" + "End label\n") +{ + struct listnode *node; + struct label_manager_chunk *lmc; + + /* unset dynamic range */ + if (no || + (start == MPLS_LABEL_UNRESERVED_MIN && end == MPLS_LABEL_MAX)) { + lbl_mgr.dynamic_block_start = MPLS_LABEL_UNRESERVED_MIN; + lbl_mgr.dynamic_block_end = MPLS_LABEL_MAX; + return CMD_SUCCESS; + } + if (!start || !end) { + vty_out(vty, + "%% label dynamic-block, range missing, aborting\n"); + return CMD_WARNING_CONFIG_FAILED; + } + if (start > end) { + vty_out(vty, + "%% label dynamic-block, wrong range (%ld > %ld), aborting\n", + start, end); + return CMD_WARNING_CONFIG_FAILED; + } + + for (ALL_LIST_ELEMENTS_RO(lbl_mgr.lc_list, node, lmc)) { + if (lmc->proto == NO_PROTO) + continue; + if (!lmc->is_dynamic && lmc->start >= (uint32_t)start && + lmc->end <= (uint32_t)end) { + vty_out(vty, + "%% Found a static label chunk [%u-%u] for %s in conflict with the dynamic label block\n", + lmc->start, lmc->end, + zebra_route_string(lmc->proto)); + return CMD_WARNING_CONFIG_FAILED; + } else if (lmc->is_dynamic && (lmc->end > (uint32_t)end || + lmc->start < (uint32_t)start)) { + vty_out(vty, + "%% Found a dynamic label chunk [%u-%u] for %s outside the new dynamic label block, consider restart the service\n", + lmc->start, lmc->end, + zebra_route_string(lmc->proto)); + } + } + lbl_mgr.dynamic_block_start = start; + lbl_mgr.dynamic_block_end = end; + return CMD_SUCCESS; +} + +static int label_manager_write_label_block_config(struct vty *vty, + struct zebra_vrf *zvrf) +{ + if (zvrf_id(zvrf) != VRF_DEFAULT) + return 0; + if (lbl_mgr.dynamic_block_start == MPLS_LABEL_UNRESERVED_MIN && + lbl_mgr.dynamic_block_end == MPLS_LABEL_MAX) + return 0; + vty_out(vty, "mpls label dynamic-block %u %u\n", + lbl_mgr.dynamic_block_start, lbl_mgr.dynamic_block_end); + return 1; +} + /** * Init label manager (or proxy to an external one) */ @@ -170,6 +288,8 @@ void label_manager_init(void) { lbl_mgr.lc_list = list_new(); lbl_mgr.lc_list->del = delete_label_chunk; + lbl_mgr.dynamic_block_start = MPLS_LABEL_UNRESERVED_MIN; + lbl_mgr.dynamic_block_end = MPLS_LABEL_MAX; hook_register(zserv_client_close, lm_client_disconnect_cb); /* register default hooks for the label manager actions */ @@ -179,12 +299,13 @@ void label_manager_init(void) hook_call(lm_cbs_inited); install_element(VIEW_NODE, &show_label_table_cmd); + install_element(CONFIG_NODE, &mpls_label_dynamic_block_cmd); } /* alloc and fill a label chunk */ struct label_manager_chunk * create_label_chunk(uint8_t proto, unsigned short instance, uint32_t session_id, - uint8_t keep, uint32_t start, uint32_t end) + uint8_t keep, uint32_t start, uint32_t end, bool is_dynamic) { /* alloc chunk, fill it and return it */ struct label_manager_chunk *lmc = @@ -196,6 +317,7 @@ create_label_chunk(uint8_t proto, unsigned short instance, uint32_t session_id, lmc->instance = instance; lmc->session_id = session_id; lmc->keep = keep; + lmc->is_dynamic = is_dynamic; return lmc; } @@ -223,6 +345,15 @@ assign_specific_label_chunk(uint8_t proto, unsigned short instance, return NULL; } + if ((lbl_mgr.dynamic_block_start != MPLS_LABEL_UNRESERVED_MIN || + lbl_mgr.dynamic_block_end != MPLS_LABEL_MAX) && + base >= lbl_mgr.dynamic_block_start && + end <= lbl_mgr.dynamic_block_end) { + zlog_warn("Invalid LM request arguments: base: %u, size: %u for %s in conflict with the dynamic label block", + base, size, zebra_route_string(proto)); + return NULL; + } + /* Scan the existing chunks to see if the requested range of labels * falls inside any of such chunks */ for (ALL_LIST_ELEMENTS_RO(lbl_mgr.lc_list, node, lmc)) { @@ -254,7 +385,7 @@ assign_specific_label_chunk(uint8_t proto, unsigned short instance, /* insert chunk between existing chunks */ if (insert_node) { lmc = create_label_chunk(proto, instance, session_id, keep, - base, end); + base, end, false); listnode_add_before(lbl_mgr.lc_list, insert_node, lmc); return lmc; } @@ -277,7 +408,7 @@ assign_specific_label_chunk(uint8_t proto, unsigned short instance, } lmc = create_label_chunk(proto, instance, session_id, keep, - base, end); + base, end, false); if (last_node) listnode_add_before(lbl_mgr.lc_list, last_node, lmc); else @@ -288,7 +419,7 @@ assign_specific_label_chunk(uint8_t proto, unsigned short instance, /* create a new chunk past all the existing ones and link at * tail */ lmc = create_label_chunk(proto, instance, session_id, keep, - base, end); + base, end, false); listnode_add(lbl_mgr.lc_list, lmc); return lmc; } @@ -313,9 +444,13 @@ assign_label_chunk(uint8_t proto, unsigned short instance, uint32_t session_id, { struct label_manager_chunk *lmc; struct listnode *node; - uint32_t prev_end = MPLS_LABEL_UNRESERVED_MIN; + uint32_t prev_end = lbl_mgr.dynamic_block_start - 1; + struct label_manager_chunk *lmc_block_last = NULL; - /* handle chunks request with a specific base label */ + /* handle chunks request with a specific base label + * - static label requests: BGP hardset value, Pathd + * - segment routing label requests + */ if (base != MPLS_LABEL_BASE_ANY) return assign_specific_label_chunk(proto, instance, session_id, keep, size, base); @@ -325,37 +460,44 @@ assign_label_chunk(uint8_t proto, unsigned short instance, uint32_t session_id, /* first check if there's one available */ for (ALL_LIST_ELEMENTS_RO(lbl_mgr.lc_list, node, lmc)) { - if (lmc->proto == NO_PROTO - && lmc->end - lmc->start + 1 == size) { + if (lmc->start <= prev_end) + continue; + if (lmc->proto == NO_PROTO && + lmc->end - lmc->start + 1 == size && + lmc->end <= lbl_mgr.dynamic_block_end) { lmc->proto = proto; lmc->instance = instance; lmc->session_id = session_id; lmc->keep = keep; + lmc->is_dynamic = true; return lmc; } /* check if we hadve a "hole" behind us that we can squeeze into */ - if ((lmc->start > prev_end) && (lmc->start - prev_end > size)) { + if (lmc->start - prev_end > size && + prev_end + 1 + size <= lbl_mgr.dynamic_block_end) { lmc = create_label_chunk(proto, instance, session_id, keep, prev_end + 1, - prev_end + size); + prev_end + size, true); listnode_add_before(lbl_mgr.lc_list, node, lmc); return lmc; } prev_end = lmc->end; + + /* check if we have a chunk that goes over the end block */ + if (lmc->end > lbl_mgr.dynamic_block_end) + continue; + lmc_block_last = lmc; } /* otherwise create a new one */ uint32_t start_free; - if (list_isempty(lbl_mgr.lc_list)) - start_free = MPLS_LABEL_UNRESERVED_MIN; + if (lmc_block_last == NULL) + start_free = lbl_mgr.dynamic_block_start; else - start_free = ((struct label_manager_chunk *)listgetdata( - listtail(lbl_mgr.lc_list))) - ->end - + 1; + start_free = lmc_block_last->end + 1; - if (start_free > MPLS_LABEL_UNRESERVED_MAX - size + 1) { + if (start_free > lbl_mgr.dynamic_block_end - size + 1) { flog_err(EC_ZEBRA_LM_EXHAUSTED_LABELS, "Reached max labels. Start: %u, size: %u", start_free, size); @@ -364,7 +506,7 @@ assign_label_chunk(uint8_t proto, unsigned short instance, uint32_t session_id, /* create chunk and link at tail */ lmc = create_label_chunk(proto, instance, session_id, keep, start_free, - start_free + size - 1); + start_free + size - 1, true); listnode_add(lbl_mgr.lc_list, lmc); return lmc; } diff --git a/zebra/label_manager.h b/zebra/label_manager.h index 74f40fab23..ab6ad7f639 100644 --- a/zebra/label_manager.h +++ b/zebra/label_manager.h @@ -42,6 +42,7 @@ struct label_manager_chunk { unsigned short instance; uint32_t session_id; uint8_t keep; + uint8_t is_dynamic; /* Tell if chunk is dynamic or static */ uint32_t start; /* First label of the chunk */ uint32_t end; /* Last label of the chunk */ }; @@ -61,11 +62,14 @@ DECLARE_HOOK(lm_get_chunk, DECLARE_HOOK(lm_release_chunk, (struct zserv *client, uint32_t start, uint32_t end), (client, start, end)); +DECLARE_HOOK(lm_write_label_block_config, + (struct vty *vty, struct zebra_vrf *zvrf), + (vty, zvrf)); DECLARE_HOOK(lm_cbs_inited, (), ()); -/* declare wrappers to be called in zapi_msg.c (as hooks must be called in - * source file where they were defined) +/* declare wrappers to be called in zapi_msg.c or zebra_mpls_vty.c (as hooks + * must be called in source file where they were defined) */ void lm_client_connect_call(struct zserv *client, vrf_id_t vrf_id); void lm_get_chunk_call(struct label_manager_chunk **lmc, struct zserv *client, @@ -73,6 +77,7 @@ void lm_get_chunk_call(struct label_manager_chunk **lmc, struct zserv *client, vrf_id_t vrf_id); void lm_release_chunk_call(struct zserv *client, uint32_t start, uint32_t end); +int lm_write_label_block_config_call(struct vty *vty, struct zebra_vrf *zvrf); /* API for an external LM to return responses for requests */ int lm_client_connect_response(uint8_t proto, uint16_t instance, @@ -82,7 +87,7 @@ int lm_client_connect_response(uint8_t proto, uint16_t instance, /* convenience function to allocate an lmc to be consumed by the above API */ struct label_manager_chunk * create_label_chunk(uint8_t proto, unsigned short instance, uint32_t session_id, - uint8_t keep, uint32_t start, uint32_t end); + uint8_t keep, uint32_t start, uint32_t end, bool is_dynamic); void delete_label_chunk(void *val); /* register/unregister callbacks for hooks */ @@ -95,6 +100,8 @@ void lm_hooks_unregister(void); */ struct label_manager { struct list *lc_list; + uint32_t dynamic_block_start; + uint32_t dynamic_block_end; }; void label_manager_init(void); diff --git a/zebra/zebra_mpls_vty.c b/zebra/zebra_mpls_vty.c index e64e7009b4..fd09e6b444 100644 --- a/zebra/zebra_mpls_vty.c +++ b/zebra/zebra_mpls_vty.c @@ -22,6 +22,7 @@ #include "zebra/zebra_rnh.h" #include "zebra/redistribute.h" #include "zebra/zebra_routemap.h" +#include "zebra/label_manager.h" static int zebra_mpls_transit_lsp(struct vty *vty, int add_cmd, const char *inlabel_str, const char *gate_str, @@ -270,6 +271,8 @@ static int zebra_mpls_config(struct vty *vty) write += zebra_mpls_write_lsp_config(vty, zvrf); write += zebra_mpls_write_fec_config(vty, zvrf); write += zebra_mpls_write_label_block_config(vty, zvrf); + write += lm_write_label_block_config_call(vty, zvrf); + return write; }