From 1c199f219de476e528a7649404267f33206a7e94 Mon Sep 17 00:00:00 2001 From: Philippe Guibert Date: Sun, 24 Sep 2023 22:16:36 +0200 Subject: [PATCH 01/18] bgpd: rewrite 'bgp label vpn export' command The original 'bgp label vpn export' code is confusing, the 'no form' actions are mixed with the positive form. Fix this by rewriting the code. Signed-off-by: Philippe Guibert --- bgpd/bgp_vty.c | 65 ++++++++++++++++++++++++++++++-------------------- 1 file changed, 39 insertions(+), 26 deletions(-) diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c index 8144d6e7b3..48eaf1ed77 100644 --- a/bgpd/bgp_vty.c +++ b/bgpd/bgp_vty.c @@ -9434,7 +9434,7 @@ DEFPY (af_label_vpn_export, "Automatically assign a label\n") { VTY_DECLVAR_CONTEXT(bgp, bgp); - mpls_label_t label = MPLS_LABEL_NONE; + mpls_label_t label = (mpls_label_t)label_val; afi_t afi; int idx = 0; bool yes = true; @@ -9442,24 +9442,28 @@ DEFPY (af_label_vpn_export, if (argv_find(argv, argc, "no", &idx)) yes = false; - /* If "no ...", squash trailing parameter */ - if (!yes) - label_auto = NULL; - - if (yes) { - if (!label_auto) - label = label_val; /* parser should force unsigned */ - } - afi = vpn_policy_getafi(vty, bgp, false); if (afi == AFI_MAX) return CMD_WARNING_CONFIG_FAILED; - - if (label_auto && CHECK_FLAG(bgp->vpn_policy[afi].flags, - BGP_VPN_POLICY_TOVPN_LABEL_AUTO)) - /* no change */ - return CMD_SUCCESS; + if (yes) { + if (label_auto && CHECK_FLAG(bgp->vpn_policy[afi].flags, + BGP_VPN_POLICY_TOVPN_LABEL_AUTO)) + /* no change */ + return CMD_SUCCESS; + if (!label_auto && label == bgp->vpn_policy[afi].tovpn_label) + /* no change */ + return CMD_SUCCESS; + } else { + if (label_auto && !CHECK_FLAG(bgp->vpn_policy[afi].flags, + BGP_VPN_POLICY_TOVPN_LABEL_AUTO)) + /* no match */ + return CMD_WARNING_CONFIG_FAILED; + if (!label_auto && label_val && + label != bgp->vpn_policy[afi].tovpn_label) + /* no change */ + return CMD_WARNING_CONFIG_FAILED; + } /* * pre-change: un-export vpn routes (vpn->vrf routes unaffected) @@ -9467,9 +9471,9 @@ DEFPY (af_label_vpn_export, vpn_leak_prechange(BGP_VPN_POLICY_DIR_TOVPN, afi, bgp_get_default(), bgp); - if (!label_auto && CHECK_FLAG(bgp->vpn_policy[afi].flags, - BGP_VPN_POLICY_TOVPN_LABEL_AUTO)) { - + /* release any previous auto label */ + if (CHECK_FLAG(bgp->vpn_policy[afi].flags, + BGP_VPN_POLICY_TOVPN_LABEL_AUTO)) { if (bgp->vpn_policy[afi].tovpn_label != MPLS_LABEL_NONE) { /* @@ -9486,16 +9490,25 @@ DEFPY (af_label_vpn_export, &bgp->vpn_policy[afi], bgp->vpn_policy[afi].tovpn_label); } - UNSET_FLAG(bgp->vpn_policy[afi].flags, - BGP_VPN_POLICY_TOVPN_LABEL_AUTO); } - bgp->vpn_policy[afi].tovpn_label = label; - if (label_auto) { - SET_FLAG(bgp->vpn_policy[afi].flags, - BGP_VPN_POLICY_TOVPN_LABEL_AUTO); - bgp_lp_get(LP_TYPE_VRF, &bgp->vpn_policy[afi], - vpn_leak_label_callback); + if (yes) { + if (label_auto) { + SET_FLAG(bgp->vpn_policy[afi].flags, + BGP_VPN_POLICY_TOVPN_LABEL_AUTO); + /* fetch a label */ + bgp->vpn_policy[afi].tovpn_label = MPLS_LABEL_NONE; + bgp_lp_get(LP_TYPE_VRF, &bgp->vpn_policy[afi], + vpn_leak_label_callback); + } else { + UNSET_FLAG(bgp->vpn_policy[afi].flags, + BGP_VPN_POLICY_TOVPN_LABEL_AUTO); + bgp->vpn_policy[afi].tovpn_label = label; + } + } else { + UNSET_FLAG(bgp->vpn_policy[afi].flags, + BGP_VPN_POLICY_TOVPN_LABEL_AUTO); + bgp->vpn_policy[afi].tovpn_label = MPLS_LABEL_NONE; } /* post-change: re-export vpn routes */ From d162d5f6f538e60385290fddf8ed256d2e15f628 Mon Sep 17 00:00:00 2001 From: Philippe Guibert Date: Fri, 1 Sep 2023 17:14:06 +0200 Subject: [PATCH 02/18] bgpd: fix hardset l3vpn label available in mpls pool Today, when configuring BGP L3VPN mpls, the operator may use that command to hardset a label value: > router bgp 65500 vrf vrf1 > address-family ipv4 unicast > label vpn export Today, BGP uses this value without checks, leading to potential conflicts with other control planes like LDP. For instance, if LDP initiates with a label chunk of [16;72] and BGP also uses the 50 label value, a conflict arises. The 'label manager' service in zebra oversees label allocations. While all the control plane daemons use it, BGP doesn't when a hardset label is in place. This update fixes this problem. Now, when a hardset label is set for l3vpn export, a request is made to the label manager for approval, ensuring no conflicts with other daemons. But, this means some existing BGP configurations might become non-operational if they conflict with labels already allocated to another daemon but not used. note: Labels below 16 are reserved and won't be checked for consistency by the label manager. Fixes: ddb5b4880ba8 ("bgpd: vpn-vrf route leaking") Signed-off-by: Philippe Guibert --- bgpd/bgp_labelpool.c | 5 +++-- bgpd/bgp_mplsvpn.h | 20 ++++++++++++++++++++ bgpd/bgp_vty.c | 20 +++++++++++++++++--- bgpd/bgp_zebra.c | 14 ++++++++++++-- bgpd/bgp_zebra.h | 3 ++- bgpd/bgpd.h | 2 ++ 6 files changed, 56 insertions(+), 8 deletions(-) diff --git a/bgpd/bgp_labelpool.c b/bgpd/bgp_labelpool.c index 883338610c..d33f14ac41 100644 --- a/bgpd/bgp_labelpool.c +++ b/bgpd/bgp_labelpool.c @@ -448,7 +448,7 @@ void bgp_lp_get( if (lp_fifo_count(&lp->requests) > lp->pending_count) { if (!bgp_zebra_request_label_range(MPLS_LABEL_BASE_ANY, - lp->next_chunksize)) + lp->next_chunksize, true)) return; lp->pending_count += lp->next_chunksize; @@ -650,7 +650,8 @@ void bgp_lp_event_zebra_up(void) */ list_delete_all_node(lp->chunks); - if (!bgp_zebra_request_label_range(MPLS_LABEL_BASE_ANY, labels_needed)) + if (!bgp_zebra_request_label_range(MPLS_LABEL_BASE_ANY, labels_needed, + true)) return; lp->pending_count = labels_needed; diff --git a/bgpd/bgp_mplsvpn.h b/bgpd/bgp_mplsvpn.h index 19b6f4eb77..b2bdfcec00 100644 --- a/bgpd/bgp_mplsvpn.h +++ b/bgpd/bgp_mplsvpn.h @@ -13,6 +13,7 @@ #include "bgpd/bgp_rd.h" #include "bgpd/bgp_zebra.h" #include "bgpd/bgp_vty.h" +#include "bgpd/bgp_label.h" #define MPLS_LABEL_IS_SPECIAL(label) ((label) <= MPLS_LABEL_EXTENSION) #define MPLS_LABEL_IS_NULL(label) \ @@ -165,6 +166,25 @@ static inline int vpn_leak_to_vpn_active(struct bgp *bgp_vrf, afi_t afi, return 0; } + /* Is there a "manual" export label that isn't allocated yet? */ + if (!CHECK_FLAG(bgp_vrf->vpn_policy[afi].flags, + BGP_VPN_POLICY_TOVPN_LABEL_AUTO) && + bgp_vrf->vpn_policy[afi].tovpn_label != BGP_PREVENT_VRF_2_VRF_LEAK && + bgp_vrf->vpn_policy[afi].tovpn_label != MPLS_LABEL_NONE && + (bgp_vrf->vpn_policy[afi].tovpn_label >= MPLS_LABEL_UNRESERVED_MIN && + !CHECK_FLAG(bgp_vrf->vpn_policy[afi].flags, + BGP_VPN_POLICY_TOVPN_LABEL_MANUAL_REG))) { + if (!bgp_zebra_request_label_range(bgp_vrf->vpn_policy[afi] + .tovpn_label, + 1, false)) { + if (pmsg) + *pmsg = "manual label could not be allocated"; + return 0; + } + SET_FLAG(bgp_vrf->vpn_policy[afi].flags, + BGP_VPN_POLICY_TOVPN_LABEL_MANUAL_REG); + } + return 1; } diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c index 48eaf1ed77..1ec66824c3 100644 --- a/bgpd/bgp_vty.c +++ b/bgpd/bgp_vty.c @@ -9471,9 +9471,16 @@ DEFPY (af_label_vpn_export, vpn_leak_prechange(BGP_VPN_POLICY_DIR_TOVPN, afi, bgp_get_default(), bgp); - /* release any previous auto label */ if (CHECK_FLAG(bgp->vpn_policy[afi].flags, - BGP_VPN_POLICY_TOVPN_LABEL_AUTO)) { + BGP_VPN_POLICY_TOVPN_LABEL_MANUAL_REG)) { + bgp_zebra_release_label_range(bgp->vpn_policy[afi].tovpn_label, + bgp->vpn_policy[afi].tovpn_label); + UNSET_FLAG(bgp->vpn_policy[afi].flags, + BGP_VPN_POLICY_TOVPN_LABEL_MANUAL_REG); + + } else if (CHECK_FLAG(bgp->vpn_policy[afi].flags, + BGP_VPN_POLICY_TOVPN_LABEL_AUTO)) { + /* release any previous auto label */ if (bgp->vpn_policy[afi].tovpn_label != MPLS_LABEL_NONE) { /* @@ -9501,9 +9508,16 @@ DEFPY (af_label_vpn_export, bgp_lp_get(LP_TYPE_VRF, &bgp->vpn_policy[afi], vpn_leak_label_callback); } else { + bgp->vpn_policy[afi].tovpn_label = label; UNSET_FLAG(bgp->vpn_policy[afi].flags, BGP_VPN_POLICY_TOVPN_LABEL_AUTO); - bgp->vpn_policy[afi].tovpn_label = label; + if (bgp->vpn_policy[afi].tovpn_label >= + MPLS_LABEL_UNRESERVED_MIN && + bgp_zebra_request_label_range(bgp->vpn_policy[afi] + .tovpn_label, + 1, false)) + SET_FLAG(bgp->vpn_policy[afi].flags, + BGP_VPN_POLICY_TOVPN_LABEL_MANUAL_REG); } } else { UNSET_FLAG(bgp->vpn_policy[afi].flags, diff --git a/bgpd/bgp_zebra.c b/bgpd/bgp_zebra.c index 212b7f398b..e53416044e 100644 --- a/bgpd/bgp_zebra.c +++ b/bgpd/bgp_zebra.c @@ -3423,6 +3423,9 @@ static bool bgp_zebra_label_manager_connect(void) /* tell label pool that zebra is connected */ bgp_lp_event_zebra_up(); + /* tell BGP L3VPN that label manager is available */ + if (bgp_get_default()) + vpn_leak_postchange_all(); return true; } @@ -3921,7 +3924,8 @@ void bgp_zebra_send_nexthop_label(int cmd, mpls_label_t label, zebra_send_mpls_labels(zclient, cmd, &zl); } -bool bgp_zebra_request_label_range(uint32_t base, uint32_t chunk_size) +bool bgp_zebra_request_label_range(uint32_t base, uint32_t chunk_size, + bool label_auto) { int ret; uint32_t start, end; @@ -3943,7 +3947,13 @@ bool bgp_zebra_request_label_range(uint32_t base, uint32_t chunk_size) return false; } - bgp_lp_event_chunk(start, end); + if (label_auto) + /* label automatic is serviced by the bgp label pool + * manager, which allocates label chunks in + * pre-pools, and which needs to be notified about + * new chunks availability + */ + bgp_lp_event_chunk(start, end); return true; } diff --git a/bgpd/bgp_zebra.h b/bgpd/bgp_zebra.h index 0edae041d2..4696e4dc44 100644 --- a/bgpd/bgp_zebra.h +++ b/bgpd/bgp_zebra.h @@ -124,6 +124,7 @@ extern void bgp_zebra_send_nexthop_label(int cmd, mpls_label_t label, enum lsp_types_t ltype, struct prefix *p, uint32_t num_labels, mpls_label_t out_labels[]); -extern bool bgp_zebra_request_label_range(uint32_t base, uint32_t chunk_size); +extern bool bgp_zebra_request_label_range(uint32_t base, uint32_t chunk_size, + bool label_auto); extern void bgp_zebra_release_label_range(uint32_t start, uint32_t end); #endif /* _QUAGGA_BGP_ZEBRA_H */ diff --git a/bgpd/bgpd.h b/bgpd/bgpd.h index bc2008b78b..0dd421b462 100644 --- a/bgpd/bgpd.h +++ b/bgpd/bgpd.h @@ -217,6 +217,8 @@ struct vpn_policy { #define BGP_VPN_POLICY_TOVPN_NEXTHOP_SET (1 << 2) #define BGP_VPN_POLICY_TOVPN_SID_AUTO (1 << 3) #define BGP_VPN_POLICY_TOVPN_LABEL_PER_NEXTHOP (1 << 4) +/* Manual label is registered with zebra label manager */ +#define BGP_VPN_POLICY_TOVPN_LABEL_MANUAL_REG (1 << 5) /* * If we are importing another vrf into us keep a list of From cb86d8e3a4515291f8bbd34a0a32e23f2495a315 Mon Sep 17 00:00:00 2001 From: Philippe Guibert Date: Fri, 1 Sep 2023 17:51:54 +0200 Subject: [PATCH 03/18] bgpd: fix label allocation should not be allocated at startup BGP always asks zebra for a chunk of MPLS label even if it doesn't need it. Fix this by correcting the rounding up "labels_needed" formula. Fixes: 80853c2ec7f8 ("bgpd: improve labelpool performance at scale") Signed-off-by: Philippe Guibert --- bgpd/bgp_labelpool.c | 10 +++++++--- tests/topotests/bgp_lu_topo1/R2/labelpool.summ.json | 2 +- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/bgpd/bgp_labelpool.c b/bgpd/bgp_labelpool.c index d33f14ac41..03987b6bbf 100644 --- a/bgpd/bgp_labelpool.c +++ b/bgpd/bgp_labelpool.c @@ -642,7 +642,11 @@ void bgp_lp_event_zebra_up(void) } /* round up */ - chunks_needed = (labels_needed / lp->next_chunksize) + 1; + if (((float)labels_needed / (float)lp->next_chunksize) > + (labels_needed / lp->next_chunksize)) + chunks_needed = (labels_needed / lp->next_chunksize) + 1; + else + chunks_needed = (labels_needed / lp->next_chunksize); labels_needed = chunks_needed * lp->next_chunksize; /* @@ -650,8 +654,8 @@ void bgp_lp_event_zebra_up(void) */ list_delete_all_node(lp->chunks); - if (!bgp_zebra_request_label_range(MPLS_LABEL_BASE_ANY, labels_needed, - true)) + if (labels_needed && !bgp_zebra_request_label_range(MPLS_LABEL_BASE_ANY, + labels_needed, true)) return; lp->pending_count = labels_needed; diff --git a/tests/topotests/bgp_lu_topo1/R2/labelpool.summ.json b/tests/topotests/bgp_lu_topo1/R2/labelpool.summ.json index d35e4ef463..17b9accb4a 100644 --- a/tests/topotests/bgp_lu_topo1/R2/labelpool.summ.json +++ b/tests/topotests/bgp_lu_topo1/R2/labelpool.summ.json @@ -2,5 +2,5 @@ "ledger":0, "inUse":0, "requests":0, - "labelChunks":1 + "labelChunks":0 } From 4a81210169022e58695b3de5a2683d2f563eb74d Mon Sep 17 00:00:00 2001 From: Philippe Guibert Date: Thu, 7 Sep 2023 18:31:47 +0200 Subject: [PATCH 04/18] topotests: fix accept_own test, bgp label value conflict with ldp When configuring manual label value in BGP L3VPN, the label allocation conflicts with the LDP label pool which is in use. Choose BGP label values different that the ones from LDP. Signed-off-by: Philippe Guibert --- tests/topotests/bgp_accept_own/pe1/bgpd.conf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/topotests/bgp_accept_own/pe1/bgpd.conf b/tests/topotests/bgp_accept_own/pe1/bgpd.conf index 15466b4259..1f7abac98f 100644 --- a/tests/topotests/bgp_accept_own/pe1/bgpd.conf +++ b/tests/topotests/bgp_accept_own/pe1/bgpd.conf @@ -25,7 +25,7 @@ router bgp 65001 vrf Customer neighbor 192.168.1.1 timers connect 1 address-family ipv4 unicast redistribute connected - label vpn export 10 + label vpn export 250 rd vpn export 192.168.1.2:2 rt vpn import 192.168.1.2:2 rt vpn export 192.168.1.2:2 @@ -40,7 +40,7 @@ router bgp 65001 vrf Service neighbor 192.168.2.1 timers 1 3 neighbor 192.168.2.1 timers connect 1 address-family ipv4 unicast - label vpn export 20 + label vpn export 350 rd vpn export 192.168.2.2:2 rt vpn import 192.168.2.2:2 rt vpn export 192.168.2.2:2 From 0177a0ded1e23c3bf74b984574624feeaac48e8c Mon Sep 17 00:00:00 2001 From: Philippe Guibert Date: Wed, 27 Sep 2023 07:58:22 +0200 Subject: [PATCH 05/18] bgpd: fix release label chunk when label pool unused A label chunk is used by BGP for L3VPN or LU purposes, by picking up labels from that chunk; but when those labels are release, the label chunks are never released. The below configuration sequence shows that the label chunks are not released. > router bgp 65500 > bgp router-id 1.1.1.1 > ! > address-family ipv4 unicast > label vpn export auto > rd vpn export 55:1 > rt vpn both 55:1 > export vpn > import vpn > [..] > no label vpn export auto > [..] > # show bgp labelpool summary > [..] > LabelChunks: 1 > Pending: 128 > [..] The '128' value stands for the default label chunk size, which is not released after unconfiguration. Fix this by checking after each label release, that the label chunk is still used. If not, release it. Reset the 'next_chunksize' value to the default value. Fixes: 955bfd984ffd ("bgpd: dynamic mpls label pool") Signed-off-by: Philippe Guibert --- bgpd/bgp_labelpool.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/bgpd/bgp_labelpool.c b/bgpd/bgp_labelpool.c index 03987b6bbf..46443d9d18 100644 --- a/bgpd/bgp_labelpool.c +++ b/bgpd/bgp_labelpool.c @@ -494,8 +494,18 @@ void bgp_lp_release( bf_release_index(chunk->allocated_map, index); chunk->nfree += 1; deallocated = true; + break; } assert(deallocated); + if (deallocated && + chunk->nfree == chunk->last - chunk->first + 1 && + lp_fifo_count(&lp->requests) == 0) { + bgp_zebra_release_label_range(chunk->first, + chunk->last); + list_delete_node(lp->chunks, node); + lp_chunk_free(chunk); + lp->next_chunksize = LP_CHUNK_SIZE_MIN; + } } } } From b5808ecc892cf4ddf78cd8a340555dea691fbd2c Mon Sep 17 00:00:00 2001 From: Philippe Guibert Date: Fri, 29 Sep 2023 17:35:54 +0200 Subject: [PATCH 06/18] bgpd: fix wrong 'pending' labelpool counter value at startup If BGP starts with a l3vpn configuration, the 'pending' value of the 'show bgp labelpool summary' command is set to 128, whereas the 'pending' value is 0 if the l3vpn configuration is applied after. with no config at startup: > show bgp labelpool summary > Labelpool Summary > ----------------- > Ledger: 1 > InUse: 1 > Requests: 0 > LabelChunks: 1 > Pending: 0 > Reconnects: 1 with config at startup: > show bgp labelpool summary > Labelpool Summary > ----------------- > Ledger: 1 > InUse: 1 > Requests: 0 > LabelChunks: 1 > Pending: 128 > Reconnects: 1 When BGP configuration is applied at startup, the label request fails, because the zapi connection with zebra is not yet up. At zebra up event, the label request is done again, succeeds, decrements the 'pending_count' value in 'bgp_lp_event_chunk() function, then sets the 'pending_count' value to the 'labels_needed' value. This method was correct when label requests were asyncronous: the 'pending_count' value was first set, then decremented. In syncronous label requests, the operations are swapped. Fix this by incrementing the expected 'labels_needed' value instead. Fixes: 0043ebab996e ("bgpd: Use synchronous way to get labels from Zebra") Signed-off-by: Philippe Guibert --- bgpd/bgp_labelpool.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/bgpd/bgp_labelpool.c b/bgpd/bgp_labelpool.c index 46443d9d18..b2bb49b943 100644 --- a/bgpd/bgp_labelpool.c +++ b/bgpd/bgp_labelpool.c @@ -667,8 +667,7 @@ void bgp_lp_event_zebra_up(void) if (labels_needed && !bgp_zebra_request_label_range(MPLS_LABEL_BASE_ANY, labels_needed, true)) return; - - lp->pending_count = labels_needed; + lp->pending_count += labels_needed; /* * Invalidate any existing labels and requeue them as requests From 9d32589b58af66621f00e907523d2c52a7c27fcf Mon Sep 17 00:00:00 2001 From: Philippe Guibert Date: Wed, 4 Oct 2023 14:57:27 +0200 Subject: [PATCH 07/18] zebra, test: mark mpls label chunks as dynamic or static The zebra label manager stores the mpls label chunks, but does not record if the label request was for a dynamic or a static chunk. For all label requests accepted, mark the label chunk if the 'base' parameter is set to MPLS_LABEL_BASE_ANY, unmark it otherwise. Signed-off-by: Philippe Guibert --- tests/zebra/test_lm_plugin.c | 2 +- zebra/label_manager.c | 19 ++++++++++++------- zebra/label_manager.h | 3 ++- 3 files changed, 15 insertions(+), 9 deletions(-) diff --git a/tests/zebra/test_lm_plugin.c b/tests/zebra/test_lm_plugin.c index 9ad0bc4e17..9895c025f0 100644 --- a/tests/zebra/test_lm_plugin.c +++ b/tests/zebra/test_lm_plugin.c @@ -48,7 +48,7 @@ static int lm_get_chunk_pi(struct label_manager_chunk **lmc, uint32_t base, vrf_id_t vrf_id) { if (base == 0) - *lmc = create_label_chunk(10, 55, 0, 1, 50, 50 + size); + *lmc = create_label_chunk(10, 55, 0, 1, 50, 50 + size, true); else *lmc = assign_label_chunk(10, 55, 0, 1, size, base); diff --git a/zebra/label_manager.c b/zebra/label_manager.c index fa7dbb0a25..b2926da15d 100644 --- a/zebra/label_manager.c +++ b/zebra/label_manager.c @@ -184,7 +184,7 @@ void label_manager_init(void) /* alloc and fill a label chunk */ struct label_manager_chunk * create_label_chunk(uint8_t proto, unsigned short instance, uint32_t session_id, - uint8_t keep, uint32_t start, uint32_t end) + uint8_t keep, uint32_t start, uint32_t end, bool is_dynamic) { /* alloc chunk, fill it and return it */ struct label_manager_chunk *lmc = @@ -196,6 +196,7 @@ create_label_chunk(uint8_t proto, unsigned short instance, uint32_t session_id, lmc->instance = instance; lmc->session_id = session_id; lmc->keep = keep; + lmc->is_dynamic = is_dynamic; return lmc; } @@ -254,7 +255,7 @@ assign_specific_label_chunk(uint8_t proto, unsigned short instance, /* insert chunk between existing chunks */ if (insert_node) { lmc = create_label_chunk(proto, instance, session_id, keep, - base, end); + base, end, false); listnode_add_before(lbl_mgr.lc_list, insert_node, lmc); return lmc; } @@ -277,7 +278,7 @@ assign_specific_label_chunk(uint8_t proto, unsigned short instance, } lmc = create_label_chunk(proto, instance, session_id, keep, - base, end); + base, end, false); if (last_node) listnode_add_before(lbl_mgr.lc_list, last_node, lmc); else @@ -288,7 +289,7 @@ assign_specific_label_chunk(uint8_t proto, unsigned short instance, /* create a new chunk past all the existing ones and link at * tail */ lmc = create_label_chunk(proto, instance, session_id, keep, - base, end); + base, end, false); listnode_add(lbl_mgr.lc_list, lmc); return lmc; } @@ -315,7 +316,10 @@ assign_label_chunk(uint8_t proto, unsigned short instance, uint32_t session_id, struct listnode *node; uint32_t prev_end = MPLS_LABEL_UNRESERVED_MIN; - /* handle chunks request with a specific base label */ + /* handle chunks request with a specific base label + * - static label requests: BGP hardset value, Pathd + * - segment routing label requests + */ if (base != MPLS_LABEL_BASE_ANY) return assign_specific_label_chunk(proto, instance, session_id, keep, size, base); @@ -331,6 +335,7 @@ assign_label_chunk(uint8_t proto, unsigned short instance, uint32_t session_id, lmc->instance = instance; lmc->session_id = session_id; lmc->keep = keep; + lmc->is_dynamic = true; return lmc; } /* check if we hadve a "hole" behind us that we can squeeze into @@ -338,7 +343,7 @@ assign_label_chunk(uint8_t proto, unsigned short instance, uint32_t session_id, if ((lmc->start > prev_end) && (lmc->start - prev_end > size)) { lmc = create_label_chunk(proto, instance, session_id, keep, prev_end + 1, - prev_end + size); + prev_end + size, true); listnode_add_before(lbl_mgr.lc_list, node, lmc); return lmc; } @@ -364,7 +369,7 @@ assign_label_chunk(uint8_t proto, unsigned short instance, uint32_t session_id, /* create chunk and link at tail */ lmc = create_label_chunk(proto, instance, session_id, keep, start_free, - start_free + size - 1); + start_free + size - 1, true); listnode_add(lbl_mgr.lc_list, lmc); return lmc; } diff --git a/zebra/label_manager.h b/zebra/label_manager.h index 74f40fab23..df9513281f 100644 --- a/zebra/label_manager.h +++ b/zebra/label_manager.h @@ -42,6 +42,7 @@ struct label_manager_chunk { unsigned short instance; uint32_t session_id; uint8_t keep; + uint8_t is_dynamic; /* Tell if chunk is dynamic or static */ uint32_t start; /* First label of the chunk */ uint32_t end; /* Last label of the chunk */ }; @@ -82,7 +83,7 @@ int lm_client_connect_response(uint8_t proto, uint16_t instance, /* convenience function to allocate an lmc to be consumed by the above API */ struct label_manager_chunk * create_label_chunk(uint8_t proto, unsigned short instance, uint32_t session_id, - uint8_t keep, uint32_t start, uint32_t end); + uint8_t keep, uint32_t start, uint32_t end, bool is_dynamic); void delete_label_chunk(void *val); /* register/unregister callbacks for hooks */ From b71370e83fe999b089ac7a0eecf206ab60bcd55e Mon Sep 17 00:00:00 2001 From: Philippe Guibert Date: Tue, 3 Oct 2023 17:27:42 +0200 Subject: [PATCH 08/18] zebra: fix label allocation when room space before first chunk MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit After ISIS first allocates a label chunk at [1000;2000], the '16' label value is not used when BGP tries to allocate a label chunk in auto mode. This does not happen when BGP is the only one to do the label allocation. When a label chunk has been accepted, the next label request checks if there is room space before the existing label chunk, and uses the lower label value to 17, and not 16. Fix this by changing the previous range end 'prev_end' label value to 15 which is the end of the reserved MPLS label range. Fixes: 3c8449794318 ("zebra: label manager should never return a reserved block") Signed-off-by: Philippe Guibert --- zebra/label_manager.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zebra/label_manager.c b/zebra/label_manager.c index b2926da15d..32dc2cd250 100644 --- a/zebra/label_manager.c +++ b/zebra/label_manager.c @@ -314,7 +314,7 @@ assign_label_chunk(uint8_t proto, unsigned short instance, uint32_t session_id, { struct label_manager_chunk *lmc; struct listnode *node; - uint32_t prev_end = MPLS_LABEL_UNRESERVED_MIN; + uint32_t prev_end = MPLS_LABEL_UNRESERVED_MIN - 1; /* handle chunks request with a specific base label * - static label requests: BGP hardset value, Pathd From 0832a2be53eadcf4677c317573d5ac878301a984 Mon Sep 17 00:00:00 2001 From: Philippe Guibert Date: Tue, 3 Oct 2023 10:22:05 +0200 Subject: [PATCH 09/18] zebra: add 'mpls label dynamic-block' command Hardset label values (eg. ISIS Segment-routing label blocks, hardset BGP L3VPN service label) may conflict with label chunks dynamically allocated by zebra. Add an optional 'mpls label dynamic-block' command to let the user define a range that is not in conflict with the hardset values. Restarting control planes is recommended when dynamic label chunks are already allocated. Command is aborted when any hardset label chunks conflict with the dynamic block. Signed-off-by: Philippe Guibert --- zebra/label_manager.c | 89 +++++++++++++++++++++++++++++++++++++++++- zebra/label_manager.h | 10 ++++- zebra/zebra_mpls_vty.c | 3 ++ 3 files changed, 98 insertions(+), 4 deletions(-) diff --git a/zebra/label_manager.c b/zebra/label_manager.c index 32dc2cd250..3fc1ee5bae 100644 --- a/zebra/label_manager.c +++ b/zebra/label_manager.c @@ -51,10 +51,14 @@ DEFINE_HOOK(lm_get_chunk, DEFINE_HOOK(lm_release_chunk, (struct zserv *client, uint32_t start, uint32_t end), (client, start, end)); +/* show running-config needs an API for dynamic-block */ +DEFINE_HOOK(lm_write_label_block_config, + (struct vty *vty, struct zebra_vrf *zvrf), + (vty, zvrf)); DEFINE_HOOK(lm_cbs_inited, (), ()); -/* define wrappers to be called in zapi_msg.c (as hooks must be called in - * source file where they were defined) +/* define wrappers to be called in zapi_msg.c or zebra_mpls_vty.c (as hooks + * must be called in source file where they were defined) */ void lm_client_connect_call(struct zserv *client, vrf_id_t vrf_id) { @@ -71,6 +75,11 @@ void lm_release_chunk_call(struct zserv *client, uint32_t start, uint32_t end) hook_call(lm_release_chunk, client, start, end); } +int lm_write_label_block_config_call(struct vty *vty, struct zebra_vrf *zvrf) +{ + return hook_call(lm_write_label_block_config, vty, zvrf); +} + /* forward declarations of the static functions to be used for some hooks */ static int label_manager_connect(struct zserv *client, vrf_id_t vrf_id); static int label_manager_disconnect(struct zserv *client); @@ -80,6 +89,8 @@ static int label_manager_get_chunk(struct label_manager_chunk **lmc, vrf_id_t vrf_id); static int label_manager_release_label_chunk(struct zserv *client, uint32_t start, uint32_t end); +static int label_manager_write_label_block_config(struct vty *vty, + struct zebra_vrf *zvrf); void delete_label_chunk(void *val) { @@ -138,6 +149,8 @@ void lm_hooks_register(void) hook_register(lm_client_disconnect, label_manager_disconnect); hook_register(lm_get_chunk, label_manager_get_chunk); hook_register(lm_release_chunk, label_manager_release_label_chunk); + hook_register(lm_write_label_block_config, + label_manager_write_label_block_config); } void lm_hooks_unregister(void) { @@ -145,6 +158,8 @@ void lm_hooks_unregister(void) hook_unregister(lm_client_disconnect, label_manager_disconnect); hook_unregister(lm_get_chunk, label_manager_get_chunk); hook_unregister(lm_release_chunk, label_manager_release_label_chunk); + hook_unregister(lm_write_label_block_config, + label_manager_write_label_block_config); } DEFPY(show_label_table, show_label_table_cmd, "show debugging label-table", @@ -163,6 +178,73 @@ DEFPY(show_label_table, show_label_table_cmd, "show debugging label-table", return CMD_SUCCESS; } +DEFPY(mpls_label_dynamic_block, mpls_label_dynamic_block_cmd, + "[no$no] mpls label dynamic-block [(16-1048575)$start (16-1048575)$end]", + NO_STR + MPLS_STR + "Label configuration\n" + "Configure dynamic label block\n" + "Start label\n" + "End label\n") +{ + struct listnode *node; + struct label_manager_chunk *lmc; + + /* unset dynamic range */ + if (no || + (start == MPLS_LABEL_UNRESERVED_MIN && end == MPLS_LABEL_MAX)) { + lbl_mgr.dynamic_block_start = MPLS_LABEL_UNRESERVED_MIN; + lbl_mgr.dynamic_block_end = MPLS_LABEL_MAX; + return CMD_SUCCESS; + } + if (!start || !end) { + vty_out(vty, + "%% label dynamic-block, range missing, aborting\n"); + return CMD_WARNING_CONFIG_FAILED; + } + if (start > end) { + vty_out(vty, + "%% label dynamic-block, wrong range (%ld > %ld), aborting\n", + start, end); + return CMD_WARNING_CONFIG_FAILED; + } + + for (ALL_LIST_ELEMENTS_RO(lbl_mgr.lc_list, node, lmc)) { + if (lmc->proto == NO_PROTO) + continue; + if (!lmc->is_dynamic && lmc->start >= (uint32_t)start && + lmc->end <= (uint32_t)end) { + vty_out(vty, + "%% Found a static label chunk [%u-%u] for %s in conflict with the dynamic label block\n", + lmc->start, lmc->end, + zebra_route_string(lmc->proto)); + return CMD_WARNING_CONFIG_FAILED; + } else if (lmc->is_dynamic && (lmc->end > (uint32_t)end || + lmc->start < (uint32_t)start)) { + vty_out(vty, + "%% Found a dynamic label chunk [%u-%u] for %s outside the new dynamic label block, consider restart the service\n", + lmc->start, lmc->end, + zebra_route_string(lmc->proto)); + } + } + lbl_mgr.dynamic_block_start = start; + lbl_mgr.dynamic_block_end = end; + return CMD_SUCCESS; +} + +static int label_manager_write_label_block_config(struct vty *vty, + struct zebra_vrf *zvrf) +{ + if (zvrf_id(zvrf) != VRF_DEFAULT) + return 0; + if (lbl_mgr.dynamic_block_start == MPLS_LABEL_UNRESERVED_MIN && + lbl_mgr.dynamic_block_end == MPLS_LABEL_MAX) + return 0; + vty_out(vty, "mpls label dynamic-block %u %u\n", + lbl_mgr.dynamic_block_start, lbl_mgr.dynamic_block_end); + return 1; +} + /** * Init label manager (or proxy to an external one) */ @@ -170,6 +252,8 @@ void label_manager_init(void) { lbl_mgr.lc_list = list_new(); lbl_mgr.lc_list->del = delete_label_chunk; + lbl_mgr.dynamic_block_start = MPLS_LABEL_UNRESERVED_MIN; + lbl_mgr.dynamic_block_end = MPLS_LABEL_MAX; hook_register(zserv_client_close, lm_client_disconnect_cb); /* register default hooks for the label manager actions */ @@ -179,6 +263,7 @@ void label_manager_init(void) hook_call(lm_cbs_inited); install_element(VIEW_NODE, &show_label_table_cmd); + install_element(CONFIG_NODE, &mpls_label_dynamic_block_cmd); } /* alloc and fill a label chunk */ diff --git a/zebra/label_manager.h b/zebra/label_manager.h index df9513281f..ab6ad7f639 100644 --- a/zebra/label_manager.h +++ b/zebra/label_manager.h @@ -62,11 +62,14 @@ DECLARE_HOOK(lm_get_chunk, DECLARE_HOOK(lm_release_chunk, (struct zserv *client, uint32_t start, uint32_t end), (client, start, end)); +DECLARE_HOOK(lm_write_label_block_config, + (struct vty *vty, struct zebra_vrf *zvrf), + (vty, zvrf)); DECLARE_HOOK(lm_cbs_inited, (), ()); -/* declare wrappers to be called in zapi_msg.c (as hooks must be called in - * source file where they were defined) +/* declare wrappers to be called in zapi_msg.c or zebra_mpls_vty.c (as hooks + * must be called in source file where they were defined) */ void lm_client_connect_call(struct zserv *client, vrf_id_t vrf_id); void lm_get_chunk_call(struct label_manager_chunk **lmc, struct zserv *client, @@ -74,6 +77,7 @@ void lm_get_chunk_call(struct label_manager_chunk **lmc, struct zserv *client, vrf_id_t vrf_id); void lm_release_chunk_call(struct zserv *client, uint32_t start, uint32_t end); +int lm_write_label_block_config_call(struct vty *vty, struct zebra_vrf *zvrf); /* API for an external LM to return responses for requests */ int lm_client_connect_response(uint8_t proto, uint16_t instance, @@ -96,6 +100,8 @@ void lm_hooks_unregister(void); */ struct label_manager { struct list *lc_list; + uint32_t dynamic_block_start; + uint32_t dynamic_block_end; }; void label_manager_init(void); diff --git a/zebra/zebra_mpls_vty.c b/zebra/zebra_mpls_vty.c index e64e7009b4..fd09e6b444 100644 --- a/zebra/zebra_mpls_vty.c +++ b/zebra/zebra_mpls_vty.c @@ -22,6 +22,7 @@ #include "zebra/zebra_rnh.h" #include "zebra/redistribute.h" #include "zebra/zebra_routemap.h" +#include "zebra/label_manager.h" static int zebra_mpls_transit_lsp(struct vty *vty, int add_cmd, const char *inlabel_str, const char *gate_str, @@ -270,6 +271,8 @@ static int zebra_mpls_config(struct vty *vty) write += zebra_mpls_write_lsp_config(vty, zvrf); write += zebra_mpls_write_fec_config(vty, zvrf); write += zebra_mpls_write_label_block_config(vty, zvrf); + write += lm_write_label_block_config_call(vty, zvrf); + return write; } From 7a7c4bc80ac413d4841c31abb3e22c590ad4cdb2 Mon Sep 17 00:00:00 2001 From: Philippe Guibert Date: Thu, 5 Oct 2023 15:49:10 +0200 Subject: [PATCH 10/18] zebra: rework dynamic label request algorithm The label chunk algorithm needs to be revisited to support a configured dynamic-block or the default one. Reuse the 'lbl_mgr.dynamic_block_[start/end]' variables, whereever needed, and simplify the algorithm. Signed-off-by: Philippe Guibert --- zebra/label_manager.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/zebra/label_manager.c b/zebra/label_manager.c index 3fc1ee5bae..bb722f4313 100644 --- a/zebra/label_manager.c +++ b/zebra/label_manager.c @@ -399,7 +399,7 @@ assign_label_chunk(uint8_t proto, unsigned short instance, uint32_t session_id, { struct label_manager_chunk *lmc; struct listnode *node; - uint32_t prev_end = MPLS_LABEL_UNRESERVED_MIN - 1; + uint32_t prev_end = lbl_mgr.dynamic_block_start - 1; /* handle chunks request with a specific base label * - static label requests: BGP hardset value, Pathd @@ -414,8 +414,9 @@ assign_label_chunk(uint8_t proto, unsigned short instance, uint32_t session_id, /* first check if there's one available */ for (ALL_LIST_ELEMENTS_RO(lbl_mgr.lc_list, node, lmc)) { - if (lmc->proto == NO_PROTO - && lmc->end - lmc->start + 1 == size) { + if (lmc->start <= prev_end) + continue; + if (lmc->proto == NO_PROTO && lmc->end - lmc->start + 1 == size) { lmc->proto = proto; lmc->instance = instance; lmc->session_id = session_id; @@ -425,7 +426,7 @@ assign_label_chunk(uint8_t proto, unsigned short instance, uint32_t session_id, } /* check if we hadve a "hole" behind us that we can squeeze into */ - if ((lmc->start > prev_end) && (lmc->start - prev_end > size)) { + if (lmc->start - prev_end > size) { lmc = create_label_chunk(proto, instance, session_id, keep, prev_end + 1, prev_end + size, true); @@ -438,14 +439,14 @@ assign_label_chunk(uint8_t proto, unsigned short instance, uint32_t session_id, uint32_t start_free; if (list_isempty(lbl_mgr.lc_list)) - start_free = MPLS_LABEL_UNRESERVED_MIN; + start_free = lbl_mgr.dynamic_block_start; else start_free = ((struct label_manager_chunk *)listgetdata( listtail(lbl_mgr.lc_list))) ->end + 1; - if (start_free > MPLS_LABEL_UNRESERVED_MAX - size + 1) { + if (start_free > lbl_mgr.dynamic_block_end - size + 1) { flog_err(EC_ZEBRA_LM_EXHAUSTED_LABELS, "Reached max labels. Start: %u, size: %u", start_free, size); From fccda55eacd7f1d9b6745038a2f11b1ba9354b4c Mon Sep 17 00:00:00 2001 From: Philippe Guibert Date: Tue, 3 Oct 2023 10:22:05 +0200 Subject: [PATCH 11/18] zebra: add label chunk allocation in the dynamic block range This commit adds support for the label chunk allocation in the configured dynamic block range. An additional check ensures the upper bound does not go over the upper bound of the dynamic-block. Otherwise, a chunk is created with the lower bound set to the first label element available in the defined range. Signed-off-by: Philippe Guibert --- zebra/label_manager.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/zebra/label_manager.c b/zebra/label_manager.c index bb722f4313..24968f6509 100644 --- a/zebra/label_manager.c +++ b/zebra/label_manager.c @@ -400,6 +400,7 @@ assign_label_chunk(uint8_t proto, unsigned short instance, uint32_t session_id, struct label_manager_chunk *lmc; struct listnode *node; uint32_t prev_end = lbl_mgr.dynamic_block_start - 1; + struct label_manager_chunk *lmc_block_last = NULL; /* handle chunks request with a specific base label * - static label requests: BGP hardset value, Pathd @@ -416,7 +417,9 @@ assign_label_chunk(uint8_t proto, unsigned short instance, uint32_t session_id, for (ALL_LIST_ELEMENTS_RO(lbl_mgr.lc_list, node, lmc)) { if (lmc->start <= prev_end) continue; - if (lmc->proto == NO_PROTO && lmc->end - lmc->start + 1 == size) { + if (lmc->proto == NO_PROTO && + lmc->end - lmc->start + 1 == size && + lmc->end <= lbl_mgr.dynamic_block_end) { lmc->proto = proto; lmc->instance = instance; lmc->session_id = session_id; @@ -426,7 +429,8 @@ assign_label_chunk(uint8_t proto, unsigned short instance, uint32_t session_id, } /* check if we hadve a "hole" behind us that we can squeeze into */ - if (lmc->start - prev_end > size) { + if (lmc->start - prev_end > size && + prev_end + 1 + size <= lbl_mgr.dynamic_block_end) { lmc = create_label_chunk(proto, instance, session_id, keep, prev_end + 1, prev_end + size, true); @@ -434,17 +438,19 @@ assign_label_chunk(uint8_t proto, unsigned short instance, uint32_t session_id, return lmc; } prev_end = lmc->end; + + /* check if we have a chunk that goes over the end block */ + if (lmc->end > lbl_mgr.dynamic_block_end) + continue; + lmc_block_last = lmc; } /* otherwise create a new one */ uint32_t start_free; - if (list_isempty(lbl_mgr.lc_list)) + if (lmc_block_last == NULL) start_free = lbl_mgr.dynamic_block_start; else - start_free = ((struct label_manager_chunk *)listgetdata( - listtail(lbl_mgr.lc_list))) - ->end - + 1; + start_free = lmc_block_last->end + 1; if (start_free > lbl_mgr.dynamic_block_end - size + 1) { flog_err(EC_ZEBRA_LM_EXHAUSTED_LABELS, From dfb56806afa7732b7e8474cef351cb155b7cc675 Mon Sep 17 00:00:00 2001 From: Philippe Guibert Date: Mon, 2 Oct 2023 21:17:20 +0200 Subject: [PATCH 12/18] topotests: fix bgp_vpnv[4,6]_per_nexthop prefix not updated The bgp_vpnv[4,6]_table_check() functions analyze the expected label value of VPN prefixes present in the BGP table. However, it doesn't verify if the prefixes exist before doing this. Consequently, the tests will fail if the prefixes do not show up immediately. Ensure that all expected VPN prefixes are present before executing the function. Fixes: ae5a6bc1f6ba ("topotests: add bgp mpls allocation per next-hop test") Fixes: 37a02a8dcb0d ("topotests: add bgp_vpnv6 test allocation") Signed-off-by: Philippe Guibert --- .../test_bgp_vpnv4_per_nexthop_label.py | 16 ++++++++++++++ .../test_bgp_vpnv6_per_nexthop_label.py | 22 +++++++++++++++++-- 2 files changed, 36 insertions(+), 2 deletions(-) diff --git a/tests/topotests/bgp_vpnv4_per_nexthop_label/test_bgp_vpnv4_per_nexthop_label.py b/tests/topotests/bgp_vpnv4_per_nexthop_label/test_bgp_vpnv4_per_nexthop_label.py index ce278ed7a7..d4c355a44a 100644 --- a/tests/topotests/bgp_vpnv4_per_nexthop_label/test_bgp_vpnv4_per_nexthop_label.py +++ b/tests/topotests/bgp_vpnv4_per_nexthop_label/test_bgp_vpnv4_per_nexthop_label.py @@ -151,6 +151,16 @@ def teardown_module(_mod): tgen.stop_topology() +def check_bgp_vpnv4_prefix_presence(router, prefix): + "Check the presence of a prefix" + tgen = get_topogen() + + dump = router.vtysh_cmd("show bgp ipv4 vpn {} json".format(prefix), isjson=True) + if not dump: + return "{}, prefix ipv4 vpn {} is not installed yet".format(router.name, prefix) + return None + + def bgp_vpnv4_table_check(router, group, label_list=None, label_value_expected=None): """ Dump and check that vpnv4 entries have the same MPLS label value @@ -163,6 +173,12 @@ def bgp_vpnv4_table_check(router, group, label_list=None, label_value_expected=N stored_label_inited = False for prefix in group: + test_func = functools.partial(check_bgp_vpnv4_prefix_presence, router, prefix) + success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5) + assert success, "{}, prefix ipv4 vpn {} is not installed yet".format( + router.name, prefix + ) + dump = router.vtysh_cmd("show bgp ipv4 vpn {} json".format(prefix), isjson=True) assert dump, "{0}, {1}, route distinguisher not present".format( router.name, prefix diff --git a/tests/topotests/bgp_vpnv6_per_nexthop_label/test_bgp_vpnv6_per_nexthop_label.py b/tests/topotests/bgp_vpnv6_per_nexthop_label/test_bgp_vpnv6_per_nexthop_label.py index e936ccc1e4..3d5f8f643b 100644 --- a/tests/topotests/bgp_vpnv6_per_nexthop_label/test_bgp_vpnv6_per_nexthop_label.py +++ b/tests/topotests/bgp_vpnv6_per_nexthop_label/test_bgp_vpnv6_per_nexthop_label.py @@ -54,7 +54,7 @@ pytestmark = [pytest.mark.bgpd] PREFIXES_R11 = ["172:31::11/128", "172:31::20/128", "172:31::111/128"] PREFIXES_R12 = ["172:31::12/128", "172:31::15/128"] PREFIXES_REDIST_R14 = ["172:31::14/128"] -PREFIXES_CONNECTED = ["192:168::255/112", "192:2::/64"] +PREFIXES_CONNECTED = ["192:168::255:0/112", "192:2::/64"] def build_topo(tgen): @@ -150,6 +150,16 @@ def teardown_module(_mod): tgen.stop_topology() +def check_bgp_vpnv6_prefix_presence(router, prefix): + "Check the presence of a prefix" + tgen = get_topogen() + + dump = router.vtysh_cmd("show bgp ipv6 vpn {} json".format(prefix), isjson=True) + if not dump: + return "{}, prefix ipv6 vpn {} is not installed yet".format(router.name, prefix) + return None + + def bgp_vpnv6_table_check(router, group, label_list=None, label_value_expected=None): """ Dump and check that vpnv6 entries have the same MPLS label value @@ -162,6 +172,12 @@ def bgp_vpnv6_table_check(router, group, label_list=None, label_value_expected=N stored_label_inited = False for prefix in group: + test_func = functools.partial(check_bgp_vpnv6_prefix_presence, router, prefix) + success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5) + assert success, "{}, prefix ipv6 vpn {} is not installed yet".format( + router.name, prefix + ) + dump = router.vtysh_cmd("show bgp ipv6 vpn {} json".format(prefix), isjson=True) for rd, pathes in dump.items(): for path in pathes["paths"]: @@ -237,7 +253,9 @@ def check_show_mpls_table(router, blacklist=None, label_list=None, whitelist=Non label_list.add(in_label) for nh in label_info["nexthops"]: if "installed" not in nh.keys(): - return "{} {} is not installed yet on {}".format(in_label, label_info, router.name) + return "{} {} is not installed yet on {}".format( + in_label, label_info, router.name + ) if nh["installed"] != True or nh["type"] != "BGP": return "{}, show mpls table, nexthop is not installed".format( router.name From 66c85fde7ed143cf1285b453853c85cfb61a5678 Mon Sep 17 00:00:00 2001 From: Philippe Guibert Date: Tue, 3 Oct 2023 10:58:25 +0200 Subject: [PATCH 13/18] doc: add 'mpls label dynamic-block' information Add information on the 'mpls label dynamic-block' command. Signed-off-by: Philippe Guibert --- doc/user/zebra.rst | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/doc/user/zebra.rst b/doc/user/zebra.rst index ba6e3bf37f..7c50212d48 100644 --- a/doc/user/zebra.rst +++ b/doc/user/zebra.rst @@ -783,6 +783,22 @@ presence of the entry. 21 Static 10.125.0.2 IPv4 Explicit Null +MPLS label chunks +----------------- + +MPLS label chunks are handled in the zebra label manager service, +which ensures a same label value or label chunk can not be used by +multiple CP routing daemons at the same time. + +Label requests originate from CP routing daemons, and are resolved +over the default MPLS range (16-1048575). There are two kind of +requests: +- Static label requests request an exact label value or range. For +instance, segment routing label blocks requests originating from +IS-IS are part of it. +- Dynamic label requests only need a range of label values. The +'bgp l3vpn export auto' command uses such requests. + Allocated label chunks table can be dumped using the command .. clicmd:: show debugging label-table @@ -796,6 +812,15 @@ Allocated label chunks table can be dumped using the command Proto ospf: [20000/21000] Proto isis: [22000/23000] +.. clicmd:: mpls label dynamic-block (16-1048575) (16-1048575) + + Define a range of labels where dynamic label requests will + allocate label chunks from. This command guarantees that + static label values outside that range will not conflict + with the dynamic label requests. When the dynamic-block + range is configured, static label requests that match that + range are not accepted. + .. _zebra-srv6: Segment-Routing IPv6 From 8a400bb70adddd6b701ec50c10d186322e7ae45d Mon Sep 17 00:00:00 2001 From: Philippe Guibert Date: Wed, 4 Oct 2023 09:30:22 +0200 Subject: [PATCH 14/18] topotests: bgp_srv6l3vpn_to_bgp_vrf[2,3], ignore tableVersion The expected tableVersion is wrong, when checking r1 table. The tableVersion value increments at each route updates. The previous commit brought an additional route update with the 'vpn_leak_postchange_all()' call. Keep the function call, and do not check the table version in bgp_srv6l3vpn_to_bgp_vrf[2,3] tests. Fixes: 205b62ffae2c ("bgpd: fix hardset l3vpn label available in mpls pool") Signed-off-by: Philippe Guibert --- tests/topotests/bgp_srv6l3vpn_to_bgp_vrf2/r1/vpnv4_rib.json | 1 - tests/topotests/bgp_srv6l3vpn_to_bgp_vrf2/r2/vpnv4_rib.json | 1 - tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv4_rib.json | 1 - .../r1/vpnv4_rib_sid_vpn_export_disabled.json | 1 - .../r1/vpnv4_rib_sid_vpn_export_reenabled.json | 1 - tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv6_rib.json | 1 - .../r1/vpnv6_rib_sid_vpn_export_disabled.json | 1 - .../r1/vpnv6_rib_sid_vpn_export_reenabled.json | 1 - tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv4_rib.json | 1 - .../r2/vpnv4_rib_sid_vpn_export_disabled.json | 1 - .../r2/vpnv4_rib_sid_vpn_export_reenabled.json | 1 - tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv6_rib.json | 1 - .../r2/vpnv6_rib_sid_vpn_export_disabled.json | 1 - .../r2/vpnv6_rib_sid_vpn_export_reenabled.json | 1 - 14 files changed, 14 deletions(-) diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf2/r1/vpnv4_rib.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf2/r1/vpnv4_rib.json index 3cc2fddcfa..7a4e0d7452 100644 --- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf2/r1/vpnv4_rib.json +++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf2/r1/vpnv4_rib.json @@ -1,7 +1,6 @@ { "vrfId": 0, "vrfName": "default", - "tableVersion": 2, "routerId": "1.1.1.1", "defaultLocPrf": 100, "localAS": 1, diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf2/r2/vpnv4_rib.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf2/r2/vpnv4_rib.json index 95570541c8..0dcdec678f 100644 --- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf2/r2/vpnv4_rib.json +++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf2/r2/vpnv4_rib.json @@ -1,7 +1,6 @@ { "vrfId": 0, "vrfName": "default", - "tableVersion": 2, "routerId": "2.2.2.2", "defaultLocPrf": 100, "localAS": 2, diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv4_rib.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv4_rib.json index 3cc2fddcfa..7a4e0d7452 100644 --- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv4_rib.json +++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv4_rib.json @@ -1,7 +1,6 @@ { "vrfId": 0, "vrfName": "default", - "tableVersion": 2, "routerId": "1.1.1.1", "defaultLocPrf": 100, "localAS": 1, diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv4_rib_sid_vpn_export_disabled.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv4_rib_sid_vpn_export_disabled.json index eb3433301b..205079574c 100644 --- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv4_rib_sid_vpn_export_disabled.json +++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv4_rib_sid_vpn_export_disabled.json @@ -1,7 +1,6 @@ { "vrfId": 0, "vrfName": "default", - "tableVersion": 4, "routerId": "1.1.1.1", "defaultLocPrf": 100, "localAS": 1, diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv4_rib_sid_vpn_export_reenabled.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv4_rib_sid_vpn_export_reenabled.json index 5517fc738a..7a4e0d7452 100644 --- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv4_rib_sid_vpn_export_reenabled.json +++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv4_rib_sid_vpn_export_reenabled.json @@ -1,7 +1,6 @@ { "vrfId": 0, "vrfName": "default", - "tableVersion": 6, "routerId": "1.1.1.1", "defaultLocPrf": 100, "localAS": 1, diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv6_rib.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv6_rib.json index 25b7a8616f..0fdd3d6dc0 100644 --- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv6_rib.json +++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv6_rib.json @@ -1,7 +1,6 @@ { "vrfId": 0, "vrfName": "default", - "tableVersion": 2, "routerId": "1.1.1.1", "defaultLocPrf": 100, "localAS": 1, diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv6_rib_sid_vpn_export_disabled.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv6_rib_sid_vpn_export_disabled.json index a1f21585d7..e289df1d44 100644 --- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv6_rib_sid_vpn_export_disabled.json +++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv6_rib_sid_vpn_export_disabled.json @@ -1,7 +1,6 @@ { "vrfId": 0, "vrfName": "default", - "tableVersion": 4, "routerId": "1.1.1.1", "defaultLocPrf": 100, "localAS": 1, diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv6_rib_sid_vpn_export_reenabled.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv6_rib_sid_vpn_export_reenabled.json index 7eeccd1496..0fdd3d6dc0 100644 --- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv6_rib_sid_vpn_export_reenabled.json +++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r1/vpnv6_rib_sid_vpn_export_reenabled.json @@ -1,7 +1,6 @@ { "vrfId": 0, "vrfName": "default", - "tableVersion": 6, "routerId": "1.1.1.1", "defaultLocPrf": 100, "localAS": 1, diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv4_rib.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv4_rib.json index 95570541c8..0dcdec678f 100644 --- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv4_rib.json +++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv4_rib.json @@ -1,7 +1,6 @@ { "vrfId": 0, "vrfName": "default", - "tableVersion": 2, "routerId": "2.2.2.2", "defaultLocPrf": 100, "localAS": 2, diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv4_rib_sid_vpn_export_disabled.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv4_rib_sid_vpn_export_disabled.json index d801671fdc..a440ab4248 100644 --- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv4_rib_sid_vpn_export_disabled.json +++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv4_rib_sid_vpn_export_disabled.json @@ -1,7 +1,6 @@ { "vrfId": 0, "vrfName": "default", - "tableVersion": 4, "routerId": "2.2.2.2", "defaultLocPrf": 100, "localAS": 2, diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv4_rib_sid_vpn_export_reenabled.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv4_rib_sid_vpn_export_reenabled.json index 25da05b0d4..0dcdec678f 100644 --- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv4_rib_sid_vpn_export_reenabled.json +++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv4_rib_sid_vpn_export_reenabled.json @@ -1,7 +1,6 @@ { "vrfId": 0, "vrfName": "default", - "tableVersion": 6, "routerId": "2.2.2.2", "defaultLocPrf": 100, "localAS": 2, diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv6_rib.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv6_rib.json index 2cd47b9ce5..03bbcc008d 100644 --- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv6_rib.json +++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv6_rib.json @@ -1,7 +1,6 @@ { "vrfId": 0, "vrfName": "default", - "tableVersion": 2, "routerId": "2.2.2.2", "defaultLocPrf": 100, "localAS": 2, diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv6_rib_sid_vpn_export_disabled.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv6_rib_sid_vpn_export_disabled.json index f390ef69b1..5c70cf6450 100644 --- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv6_rib_sid_vpn_export_disabled.json +++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv6_rib_sid_vpn_export_disabled.json @@ -1,7 +1,6 @@ { "vrfId": 0, "vrfName": "default", - "tableVersion": 4, "routerId": "2.2.2.2", "defaultLocPrf": 100, "localAS": 2, diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv6_rib_sid_vpn_export_reenabled.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv6_rib_sid_vpn_export_reenabled.json index 3353d75eda..03bbcc008d 100644 --- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv6_rib_sid_vpn_export_reenabled.json +++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/r2/vpnv6_rib_sid_vpn_export_reenabled.json @@ -1,7 +1,6 @@ { "vrfId": 0, "vrfName": "default", - "tableVersion": 6, "routerId": "2.2.2.2", "defaultLocPrf": 100, "localAS": 2, From 0bd8a160822bf7fe4aafab6eede73b83552f32bc Mon Sep 17 00:00:00 2001 From: Philippe Guibert Date: Thu, 5 Oct 2023 09:14:45 +0200 Subject: [PATCH 15/18] zebra: add json support to 'show debugging label-table' Add the json keyword to dump the label chunks of the zebra label manager in json format. >dut# show debugging label-table json > { > "chunks":[ > { > "protocol":"bgp", > "instance":0, > "sessionId":1, > "start":16, > "end":16, > "dynamic":true > }, > { > "protocol":"ldp", > "instance":0, > "sessionId":1, > "start":17, > "end":80, > "dynamic":true > } > ] > } Signed-off-by: Philippe Guibert --- zebra/label_manager.c | 29 ++++++++++++++++++++++++++--- 1 file changed, 26 insertions(+), 3 deletions(-) diff --git a/zebra/label_manager.c b/zebra/label_manager.c index 24968f6509..c0564d6540 100644 --- a/zebra/label_manager.c +++ b/zebra/label_manager.c @@ -162,19 +162,42 @@ void lm_hooks_unregister(void) label_manager_write_label_block_config); } -DEFPY(show_label_table, show_label_table_cmd, "show debugging label-table", +static json_object *lmc_json(struct label_manager_chunk *lmc) +{ + json_object *json = json_object_new_object(); + + json_object_string_add(json, "protocol", zebra_route_string(lmc->proto)); + json_object_int_add(json, "instance", lmc->instance); + json_object_int_add(json, "sessionId", lmc->session_id); + json_object_int_add(json, "start", lmc->start); + json_object_int_add(json, "end", lmc->end); + json_object_boolean_add(json, "dynamic", lmc->is_dynamic); + return json; +} + +DEFPY(show_label_table, show_label_table_cmd, "show debugging label-table [json$uj]", SHOW_STR DEBUG_STR - "Display allocated label chunks\n") + "Display allocated label chunks\n" + JSON_STR) { struct label_manager_chunk *lmc; struct listnode *node; + json_object *json_array = NULL; + + if (uj) + json_array = json_object_new_array(); for (ALL_LIST_ELEMENTS_RO(lbl_mgr.lc_list, node, lmc)) { + if (uj) { + json_object_array_add(json_array, lmc_json(lmc)); + continue; + } vty_out(vty, "Proto %s: [%u/%u]\n", zebra_route_string(lmc->proto), lmc->start, lmc->end); } - + if (uj) + vty_json(vty, json_array); return CMD_SUCCESS; } From c6498ace442c63c970f3f5b3db3d08abfab6446d Mon Sep 17 00:00:00 2001 From: Philippe Guibert Date: Thu, 5 Oct 2023 16:08:32 +0200 Subject: [PATCH 16/18] zebra: dump the dynamic-block bounds on vty command The 'show debugging label-table' needs to dump dynamic block information. Display the lower and upper values for the dynamic block. > # show debugging label-table json > { > "dynamicBlock":{ > "lowerBound":16, > "upperBound":1048575 > }, > [..] > # show debugging label-table > Dynamic block: lower-bound 16, upper-bound 1048575 > [..] Signed-off-by: Philippe Guibert --- zebra/label_manager.c | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/zebra/label_manager.c b/zebra/label_manager.c index c0564d6540..57a58af6bc 100644 --- a/zebra/label_manager.c +++ b/zebra/label_manager.c @@ -183,10 +183,21 @@ DEFPY(show_label_table, show_label_table_cmd, "show debugging label-table [json$ { struct label_manager_chunk *lmc; struct listnode *node; - json_object *json_array = NULL; + json_object *json_array = NULL, *json_global = NULL, *json_dyn_block; - if (uj) + if (uj) { json_array = json_object_new_array(); + json_global = json_object_new_object(); + json_dyn_block = json_object_new_object(); + json_object_int_add(json_dyn_block, "lowerBound", + lbl_mgr.dynamic_block_start); + json_object_int_add(json_dyn_block, "upperBound", + lbl_mgr.dynamic_block_end); + json_object_object_add(json_global, "dynamicBlock", + json_dyn_block); + } else + vty_out(vty, "Dynamic block: lower-bound %u, upper-bound %u\n", + lbl_mgr.dynamic_block_start, lbl_mgr.dynamic_block_end); for (ALL_LIST_ELEMENTS_RO(lbl_mgr.lc_list, node, lmc)) { if (uj) { @@ -196,8 +207,10 @@ DEFPY(show_label_table, show_label_table_cmd, "show debugging label-table [json$ vty_out(vty, "Proto %s: [%u/%u]\n", zebra_route_string(lmc->proto), lmc->start, lmc->end); } - if (uj) - vty_json(vty, json_array); + if (uj) { + json_object_object_add(json_global, "chunks", json_array); + vty_json(vty, json_global); + } return CMD_SUCCESS; } From 3cae02642844eed25cd5f165774c19cbd54e8ca2 Mon Sep 17 00:00:00 2001 From: Louis Scalbert Date: Tue, 26 Sep 2023 14:29:42 +0200 Subject: [PATCH 17/18] topotests: add bgp_l3vpn_label_export test There is no test that checks for the label allocation mechanisms involved when using BGP and/or LDP. - Some configuration changes are applied in the BGP configuration, and the impact is checked on the BGP contexts, and on the label manager. - The label manager dynamic range is reconfigured, BGP auto mode is checked against the new range, along with LDP when restarting. Signed-off-by: Louis Scalbert Signed-off-by: Philippe Guibert --- .../bgp_l3vpn_label_export/__init__.py | 0 .../bgp_l3vpn_label_export/r1/bgpd.conf | 22 + .../bgp_l3vpn_label_export/r1/ldpd.conf | 26 + .../bgp_l3vpn_label_export/r1/staticd.conf | 1 + .../bgp_l3vpn_label_export/r1/zebra.conf | 7 + .../bgp_l3vpn_label_export/r2/bgpd.conf | 23 + .../bgp_l3vpn_label_export/r2/ldpd.conf | 24 + .../bgp_l3vpn_label_export/r2/staticd.conf | 1 + .../bgp_l3vpn_label_export/r2/zebra.conf | 7 + .../test_bgp_l3vpn_label_export.py | 587 ++++++++++++++++++ 10 files changed, 698 insertions(+) create mode 100644 tests/topotests/bgp_l3vpn_label_export/__init__.py create mode 100644 tests/topotests/bgp_l3vpn_label_export/r1/bgpd.conf create mode 100644 tests/topotests/bgp_l3vpn_label_export/r1/ldpd.conf create mode 100644 tests/topotests/bgp_l3vpn_label_export/r1/staticd.conf create mode 100644 tests/topotests/bgp_l3vpn_label_export/r1/zebra.conf create mode 100644 tests/topotests/bgp_l3vpn_label_export/r2/bgpd.conf create mode 100644 tests/topotests/bgp_l3vpn_label_export/r2/ldpd.conf create mode 100644 tests/topotests/bgp_l3vpn_label_export/r2/staticd.conf create mode 100644 tests/topotests/bgp_l3vpn_label_export/r2/zebra.conf create mode 100644 tests/topotests/bgp_l3vpn_label_export/test_bgp_l3vpn_label_export.py diff --git a/tests/topotests/bgp_l3vpn_label_export/__init__.py b/tests/topotests/bgp_l3vpn_label_export/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/topotests/bgp_l3vpn_label_export/r1/bgpd.conf b/tests/topotests/bgp_l3vpn_label_export/r1/bgpd.conf new file mode 100644 index 0000000000..bb1ed4c1ea --- /dev/null +++ b/tests/topotests/bgp_l3vpn_label_export/r1/bgpd.conf @@ -0,0 +1,22 @@ +router bgp 65001 + bgp router-id 192.0.2.1 + no bgp default ipv4-unicast + no bgp ebgp-requires-policy + neighbor 192.0.2.2 remote-as 65002 + neighbor 192.0.2.2 timers 1 3 + neighbor 192.0.2.2 timers connect 1 + neighbor 192.0.2.2 ebgp-multihop 2 + address-family ipv4 vpn + neighbor 192.0.2.2 activate + exit-address-family +! +router bgp 65001 vrf vrf1 + address-family ipv4 unicast + redistribute connected + label vpn export 1111 + rd vpn export 101:1 + rt vpn both 52:100 + import vpn + export vpn + exit-address-family +! diff --git a/tests/topotests/bgp_l3vpn_label_export/r1/ldpd.conf b/tests/topotests/bgp_l3vpn_label_export/r1/ldpd.conf new file mode 100644 index 0000000000..04ae06877a --- /dev/null +++ b/tests/topotests/bgp_l3vpn_label_export/r1/ldpd.conf @@ -0,0 +1,26 @@ +hostname r1 +log file ldpd.log +password zebra +! +! debug mpls ldp zebra +! debug mpls ldp event +! debug mpls ldp errors +! debug mpls ldp messages recv +! debug mpls ldp messages sent +! debug mpls ldp discovery hello recv +! debug mpls ldp discovery hello sent +! +mpls ldp + router-id 192.0.2.1 + ! + address-family ipv4 + discovery transport-address 192.0.2.1 + ! + interface r1-eth0 + ! + interface r1-eth1 + ! + ! +! +line vty +! diff --git a/tests/topotests/bgp_l3vpn_label_export/r1/staticd.conf b/tests/topotests/bgp_l3vpn_label_export/r1/staticd.conf new file mode 100644 index 0000000000..7f2f057bfe --- /dev/null +++ b/tests/topotests/bgp_l3vpn_label_export/r1/staticd.conf @@ -0,0 +1 @@ +ip route 192.0.2.2/32 192.168.1.2 diff --git a/tests/topotests/bgp_l3vpn_label_export/r1/zebra.conf b/tests/topotests/bgp_l3vpn_label_export/r1/zebra.conf new file mode 100644 index 0000000000..7bdacb1ca3 --- /dev/null +++ b/tests/topotests/bgp_l3vpn_label_export/r1/zebra.conf @@ -0,0 +1,7 @@ +! +interface lo + ip address 192.0.2.1/32 +! +interface r1-eth0 + ip address 192.168.1.1/24 +! diff --git a/tests/topotests/bgp_l3vpn_label_export/r2/bgpd.conf b/tests/topotests/bgp_l3vpn_label_export/r2/bgpd.conf new file mode 100644 index 0000000000..18a11cfb40 --- /dev/null +++ b/tests/topotests/bgp_l3vpn_label_export/r2/bgpd.conf @@ -0,0 +1,23 @@ +router bgp 65002 + bgp router-id 192.0.2.2 + no bgp default ipv4-unicast + no bgp ebgp-requires-policy + neighbor 192.168.1.1 remote-as 65001 + neighbor 192.168.1.1 timers 1 3 + neighbor 192.168.1.1 timers connect 1 + neighbor 192.168.1.1 ebgp-multihop 2 + neighbor 192.168.1.1 update-source 192.0.2.2 + address-family ipv4 vpn + neighbor 192.168.1.1 activate + exit-address-family +! +router bgp 65002 vrf vrf1 + address-family ipv4 unicast + redistribute connected + label vpn export 2222 + rd vpn export 102:1 + rt vpn both 52:100 + import vpn + export vpn + exit-address-family +! diff --git a/tests/topotests/bgp_l3vpn_label_export/r2/ldpd.conf b/tests/topotests/bgp_l3vpn_label_export/r2/ldpd.conf new file mode 100644 index 0000000000..f4307f1ab0 --- /dev/null +++ b/tests/topotests/bgp_l3vpn_label_export/r2/ldpd.conf @@ -0,0 +1,24 @@ +hostname r2 +log file ldpd.log +password zebra +! +! debug mpls ldp zebra +! debug mpls ldp event +! debug mpls ldp errors +! debug mpls ldp messages recv +! debug mpls ldp messages sent +! debug mpls ldp discovery hello recv +! debug mpls ldp discovery hello sent +! +mpls ldp + router-id 192.0.2.2 + ! + address-family ipv4 + discovery transport-address 192.0.2.2 + ! + interface r2-eth0 + ! + ! +! +line vty +! diff --git a/tests/topotests/bgp_l3vpn_label_export/r2/staticd.conf b/tests/topotests/bgp_l3vpn_label_export/r2/staticd.conf new file mode 100644 index 0000000000..e3f5d7dba0 --- /dev/null +++ b/tests/topotests/bgp_l3vpn_label_export/r2/staticd.conf @@ -0,0 +1 @@ +ip route 192.0.2.1/32 192.168.1.1 diff --git a/tests/topotests/bgp_l3vpn_label_export/r2/zebra.conf b/tests/topotests/bgp_l3vpn_label_export/r2/zebra.conf new file mode 100644 index 0000000000..40dfa9854c --- /dev/null +++ b/tests/topotests/bgp_l3vpn_label_export/r2/zebra.conf @@ -0,0 +1,7 @@ +! +int lo + ip address 192.0.2.2/32 +! +interface r2-eth0 + ip address 192.168.1.2/24 +! diff --git a/tests/topotests/bgp_l3vpn_label_export/test_bgp_l3vpn_label_export.py b/tests/topotests/bgp_l3vpn_label_export/test_bgp_l3vpn_label_export.py new file mode 100644 index 0000000000..7c23a3e899 --- /dev/null +++ b/tests/topotests/bgp_l3vpn_label_export/test_bgp_l3vpn_label_export.py @@ -0,0 +1,587 @@ +#!/usr/bin/env python +# SPDX-License-Identifier: ISC + +# +# Copyright (c) 2023 by Louis Scalbert +# Copyright 2023 6WIND S.A. +# + +""" + +""" + +import os +import re +import sys +import json +import pytest +import functools + +from copy import deepcopy + +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.common_config import kill_router_daemons, start_router_daemons, step + +pytestmark = [pytest.mark.bgpd] + + +def build_topo(tgen): + for rtr in [1, 2]: + tgen.add_router("r{}".format(rtr)) + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) + + +def setup_module(mod): + tgen = Topogen(build_topo, mod.__name__) + tgen.start_topology() + + router_list = tgen.routers() + + for rtr in [1, 2]: + tgen.gears["r{}".format(rtr)].cmd("ip link add vrf1 type vrf table 10") + tgen.gears["r{}".format(rtr)].cmd("ip link set vrf1 up") + tgen.gears["r{}".format(rtr)].cmd( + "ip address add dev vrf1 192.0.3.{}/32".format(rtr) + ) + tgen.gears["r{}".format(rtr)].run( + "sysctl -w net.mpls.conf.r{}-eth0.input=1".format(rtr) + ) + tgen.gears["r{}".format(rtr)].run("sysctl -w net.mpls.conf.vrf1.input=1") + + for i, (rname, router) in enumerate(router_list.items(), 1): + router.load_config( + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) + ) + router.load_config( + TopoRouter.RD_STATIC, os.path.join(CWD, "{}/staticd.conf".format(rname)) + ) + router.load_config( + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) + ) + router.load_config( + TopoRouter.RD_LDP, os.path.join(CWD, "{}/ldpd.conf".format(rname)) + ) + + tgen.start_router() + + +def teardown_module(mod): + tgen = get_topogen() + tgen.stop_topology() + + +def check_bgp_vpn_prefix(label, rname="r1", rd=None): + tgen = get_topogen() + + if rd: + output = json.loads( + tgen.gears[rname].vtysh_cmd( + "show bgp ipv4 vpn rd {} 192.0.3.2/32 json".format(rd) + ) + ) + else: + output = json.loads( + tgen.gears[rname].vtysh_cmd( + "show bgp vrf vrf1 ipv4 unicast 192.0.3.2/32 json" + ) + ) + + if label == "auto": + expected = { + "paths": [ + { + "valid": True, + "aspath": {"string": "65002"}, + "nexthops": [{"ip": "192.0.2.2"}], + }, + ] + } + elif label and not rd: + expected = { + "paths": [ + { + "valid": True, + "remoteLabel": label, + "aspath": {"string": "65002"}, + "nexthops": [{"ip": "192.0.2.2"}], + }, + ] + } + elif label and rd: + expected = { + "102:1": { + "prefix": "192.0.3.2/32", + "paths": [ + { + "valid": True, + "remoteLabel": label, + "nexthops": [{"ip": "0.0.0.0"}], + } + ], + } + } + else: + expected = {} + + return topotest.json_cmp(output, expected, exact=(label is None)) + + +def check_mpls_table(label, protocol): + tgen = get_topogen() + + if label == "auto": + cmd = "show mpls table json" + else: + cmd = "show mpls table {} json".format(label) + + output = json.loads(tgen.gears["r2"].vtysh_cmd(cmd)) + + if label == "auto" and protocol: + output_copy = deepcopy(output) + for key, data in output_copy.items(): + for nexthop in data.get("nexthops", []): + if nexthop.get("type", None) != protocol: + continue + output = data + break + + if protocol: + expected = { + "nexthops": [ + { + "type": protocol, + }, + ] + } + else: + expected = {} + + return topotest.json_cmp(output, expected, exact=(protocol is None)) + + +def check_mpls_ldp_binding(): + tgen = get_topogen() + + output = json.loads( + tgen.gears["r1"].vtysh_cmd("show mpls ldp binding 192.0.2.2/32 json") + ) + expected = { + "bindings": [ + { + "prefix": "192.0.2.2/32", + "localLabel": "16", # first available label + "inUse": 1, + }, + ] + } + + return topotest.json_cmp(output, expected) + + +def test_convergence(): + "Test protocol convergence" + + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Check BGP and LDP convergence") + test_func = functools.partial(check_bgp_vpn_prefix, 2222) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see BGP prefix on R1" + + test_func = functools.partial(check_mpls_ldp_binding) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see LDP binding on R2" + + test_func = functools.partial(check_mpls_table, 16, "LDP") + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see LDP label on R2" + + test_func = functools.partial(check_mpls_table, 2222, "BGP") + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see BGP label on R2" + + output = tgen.net["r2"].cmd("vtysh -c 'show debugging label-table' | grep Proto") + assert re.match( + r"Proto ldp: \[16/(1[7-9]|[2-9]\d+|\d{3,})\]", output + ), "Failed to see LDP label chunk" + + output = tgen.gears["r2"].vtysh_cmd("show debugging label-table") + assert "Proto bgp: [2222/2222]" in output, "Failed to see BGP label chunk" + + +def test_vpn_label_export_16(): + "Test that assigning the label value of 16 is not possible because it used by LDP" + + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + tgen.gears["r2"].vtysh_cmd( + "conf\n" + "router bgp 65002 vrf vrf1\n" + "address-family ipv4 unicast\n" + "label vpn export 16" + ) + + step("Check that label vpn export 16 fails") + test_func = functools.partial(check_bgp_vpn_prefix, None) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Unexpected BGP prefix on R1" + + test_func = functools.partial(check_mpls_ldp_binding) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see LDP binding on R2" + + test_func = functools.partial(check_mpls_table, 16, "LDP") + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see LDP label on R2" + + test_func = functools.partial(check_mpls_table, 2222, None) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Unexpected BGP label on R2" + + output = tgen.net["r2"].cmd("vtysh -c 'show debugging label-table' | grep Proto") + assert re.match( + r"Proto ldp: \[16/(1[7-9]|[2-9]\d+|\d{3,})\]", output + ), "Failed to see LDP label chunk" + + output = tgen.gears["r2"].vtysh_cmd("show debugging label-table") + assert "Proto bgp" not in output, "Unexpected BGP label chunk" + + +def test_vpn_label_export_2222(): + "Test that setting back the label value of 2222 works" + + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + tgen.gears["r2"].vtysh_cmd( + "conf\n" + "router bgp 65002 vrf vrf1\n" + "address-family ipv4 unicast\n" + "label vpn export 2222" + ) + + step("Check that label vpn export 2222 is OK") + test_func = functools.partial(check_bgp_vpn_prefix, 2222) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see BGP prefix on R1" + + test_func = functools.partial(check_mpls_ldp_binding) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see LDP binding on R2" + + test_func = functools.partial(check_mpls_table, 16, "LDP") + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see LDP label on R2" + + test_func = functools.partial(check_mpls_table, "auto", "BGP") + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Unexpected BGP label on R2" + + output = tgen.net["r2"].cmd("vtysh -c 'show debugging label-table' | grep Proto") + assert re.match( + r"Proto ldp: \[16/(1[7-9]|[2-9]\d+|\d{3,})\]", output + ), "Failed to see LDP label chunk" + + output = tgen.gears["r2"].vtysh_cmd("show debugging label-table") + assert "Proto bgp: [2222/2222]" in output, "Failed to see BGP label chunk" + + +def test_vpn_label_export_auto(): + "Test that setting label vpn export auto works" + + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + tgen.gears["r2"].vtysh_cmd( + "conf\n" + "router bgp 65002 vrf vrf1\n" + "address-family ipv4 unicast\n" + "label vpn export auto" + ) + + step("Check that label vpn export auto is OK") + test_func = functools.partial(check_bgp_vpn_prefix, "auto") + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see BGP prefix on R1" + + test_func = functools.partial(check_mpls_ldp_binding) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see LDP binding on R2" + + test_func = functools.partial(check_mpls_table, 16, "LDP") + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see LDP label on R2" + + test_func = functools.partial(check_mpls_table, "auto", "BGP") + _, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5) + assert result is None, "Failed to see BGP label on R2" + + output = tgen.net["r2"].cmd("vtysh -c 'show debugging label-table' | grep Proto") + assert re.match( + r"Proto ldp: \[16/(1[7-9]|[2-9]\d+|\d{3,})\]", output + ), "Failed to see LDP label chunk" + + output = tgen.gears["r2"].vtysh_cmd("show debugging label-table") + assert "Proto bgp: " in output, "Failed to see BGP label chunk" + + +def test_vpn_label_export_no_auto(): + "Test that UNsetting label vpn export auto removes the prefix from R1 table and R2 LDP table" + + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + output = json.loads( + tgen.gears["r1"].vtysh_cmd("show bgp vrf vrf1 ipv4 unicast 192.0.3.2/32 json") + ) + + auto_label = output.get("paths")[0].get("remoteLabel", None) + assert auto_label is not None, "Failed to fetch prefix label on R1" + + tgen.gears["r2"].vtysh_cmd( + "conf\n" + "router bgp 65002 vrf vrf1\n" + "address-family ipv4 unicast\n" + "no label vpn export auto" + ) + + step("Check that no label vpn export auto is OK") + test_func = functools.partial(check_bgp_vpn_prefix, 3, rname="r2", rd="102:1") + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Unexpected BGP prefix on R2" + + test_func = functools.partial(check_mpls_ldp_binding) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see LDP binding on R2" + + test_func = functools.partial(check_mpls_table, 16, "LDP") + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see LDP label on R2" + + test_func = functools.partial(check_mpls_table, auto_label, None) + _, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5) + assert result is None, "Unexpected BGP label on R2" + + output = tgen.net["r2"].cmd("vtysh -c 'show debugging label-table' | grep Proto") + assert re.match( + r"Proto ldp: \[16/(1[7-9]|[2-9]\d+|\d{3,})\]", output + ), "Failed to see LDP label chunk" + + output = tgen.gears["r2"].vtysh_cmd("show debugging label-table") + assert "Proto bgp: " not in output, "Unexpected BGP label chunk" + + +def test_vpn_label_export_auto_back(): + "Test that setting back label vpn export auto works" + + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + output = json.loads( + tgen.gears["r2"].vtysh_cmd("show bgp vrf vrf1 ipv4 unicast 192.0.3.2/32 json") + ) + + tgen.gears["r2"].vtysh_cmd( + "conf\n" + "router bgp 65002 vrf vrf1\n" + "address-family ipv4 unicast\n" + "label vpn export auto" + ) + + step("Check that label vpn export auto is OK") + test_func = functools.partial(check_bgp_vpn_prefix, "auto") + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see BGP prefix on R1" + + test_func = functools.partial(check_mpls_ldp_binding) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see LDP binding on R2" + + test_func = functools.partial(check_mpls_table, 16, "LDP") + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see LDP label on R2" + + test_func = functools.partial(check_mpls_table, "auto", "BGP") + _, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5) + assert result is None, "Failed to see BGP label on R2" + + output = tgen.net["r2"].cmd("vtysh -c 'show debugging label-table' | grep Proto") + assert re.match( + r"Proto ldp: \[16/(1[7-9]|[2-9]\d+|\d{3,})\]", output + ), "Failed to see LDP label chunk" + + output = tgen.gears["r2"].vtysh_cmd("show debugging label-table") + assert "Proto bgp: " in output, "Failed to see BGP label chunk" + + +def test_vpn_label_export_manual_from_auto(): + "Test that setting a manual label value from the BGP chunk range works" + + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + output = json.loads( + tgen.gears["r1"].vtysh_cmd("show bgp vrf vrf1 ipv4 unicast 192.0.3.2/32 json") + ) + + auto_label = output.get("paths")[0].get("remoteLabel", None) + assert auto_label is not None, "Failed to fetch prefix label on R1" + + auto_label = auto_label + 1 + + tgen.gears["r2"].vtysh_cmd( + "conf\n" + "router bgp 65002 vrf vrf1\n" + "address-family ipv4 unicast\n" + "label vpn export {}".format(auto_label) + ) + + step("Check that label vpn export {} is OK".format(auto_label)) + test_func = functools.partial( + check_bgp_vpn_prefix, auto_label, rname="r2", rd="102:1" + ) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see BGP prefix on R2" + + test_func = functools.partial(check_mpls_ldp_binding) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see LDP binding on R2" + + test_func = functools.partial(check_mpls_table, 16, "LDP") + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see LDP label on R2" + + test_func = functools.partial(check_mpls_table, auto_label, "BGP") + _, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5) + assert result is None, "Failed to see BGP label on R2" + + output = tgen.net["r2"].cmd("vtysh -c 'show debugging label-table' | grep Proto") + assert re.match( + r"Proto ldp: \[16/(1[7-9]|[2-9]\d+|\d{3,})\]", output + ), "Failed to see LDP label chunk" + + output = tgen.gears["r2"].vtysh_cmd("show debugging label-table") + assert "Proto bgp: " in output, "Failed to see BGP label chunk" + + +def test_vpn_label_configure_dynamic_range(): + "Test that if a dynamic range is configured, then the next dynamic allocations will be done in that block" + + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + tgen.gears["r2"].vtysh_cmd("conf\n" "mpls label dynamic-block 500 1000\n") + tgen.gears["r2"].vtysh_cmd( + "conf\n" + "router bgp 65002 vrf vrf1\n" + "address-family ipv4 unicast\n" + "label vpn export auto" + ) + step("Check that label vpn export auto starting at 500 is OK") + test_func = functools.partial(check_bgp_vpn_prefix, 500, rname="r2", rd="102:1") + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Unexpected BGP prefix on R2" + + test_func = functools.partial(check_mpls_table, 500, "BGP") + _, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5) + assert result is None, "Unexpected BGP label on R2" + + output = tgen.gears["r2"].vtysh_cmd("show debugging label-table") + assert "Proto bgp: " in output, "Failed to see BGP label chunk" + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) + + +def test_vpn_label_restart_ldp(): + "Test that if a dynamic range is configured, then when LDP restarts, it follows the new dynamic range" + + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + router_list = tgen.routers() + + step("Kill LDP on R2") + kill_router_daemons(tgen, "r2", ["ldpd"]) + + output = tgen.gears["r2"].vtysh_cmd("show debugging label-table") + assert "Proto ldp: " not in output, "Unexpected LDP label chunk" + + step("Bring up LDP on R2") + + start_router_daemons(tgen, "r2", ["ldpd"]) + + test_func = functools.partial(check_mpls_table, 628, "LDP") + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see LDP label on R2" + + output = tgen.gears["r2"].vtysh_cmd("show debugging label-table") + assert "Proto ldp: [628/691]" in output, "Failed to see LDP label chunk [628/691]" + assert "Proto ldp: [692/755]" in output, "Failed to see LDP label chunk [692/755]" + + +def test_vpn_label_unconfigure_dynamic_range(): + "Test that if the dynamic range is unconfigured, then the next dynamic allocations will be done at the first free place." + + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + tgen.gears["r2"].vtysh_cmd("conf\n" "no mpls label dynamic-block 500 1000\n") + step("Check that unconfiguring label vpn export auto will remove BGP label chunk") + tgen.gears["r2"].vtysh_cmd( + "conf\n" + "router bgp 65002 vrf vrf1\n" + "address-family ipv4 unicast\n" + "no label vpn export auto" + ) + + output = tgen.gears["r2"].vtysh_cmd("show debugging label-table") + assert "Proto bgp: " not in output, "Unexpected BGP label chunk" + + tgen.gears["r2"].vtysh_cmd( + "conf\n" + "router bgp 65002 vrf vrf1\n" + "address-family ipv4 unicast\n" + "label vpn export auto" + ) + step("Check that label vpn export auto starting at 16 is OK") + test_func = functools.partial(check_bgp_vpn_prefix, 16, rname="r2", rd="102:1") + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Unexpected BGP prefix on R2" + + test_func = functools.partial(check_mpls_table, 16, "BGP") + _, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5) + assert result is None, "Unexpected BGP label on R2" + + output = tgen.gears["r2"].vtysh_cmd("show debugging label-table") + assert "Proto bgp: " in output, "Failed to see BGP label chunk" From d3f686d163cbbf9cf2d82701098818a6b150f1ef Mon Sep 17 00:00:00 2001 From: Philippe Guibert Date: Fri, 6 Oct 2023 14:53:31 +0200 Subject: [PATCH 18/18] zebra: do not accept static label requests conflicting with dynamic-block A static label allocation should not be accepted if the desired range conflicts with the configured dynamic-block configuration. Do not accept such label requests, only when dynamic blocks are configured. Signed-off-by: Philippe Guibert --- zebra/label_manager.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/zebra/label_manager.c b/zebra/label_manager.c index 57a58af6bc..8ae6e0cc3f 100644 --- a/zebra/label_manager.c +++ b/zebra/label_manager.c @@ -345,6 +345,15 @@ assign_specific_label_chunk(uint8_t proto, unsigned short instance, return NULL; } + if ((lbl_mgr.dynamic_block_start != MPLS_LABEL_UNRESERVED_MIN || + lbl_mgr.dynamic_block_end != MPLS_LABEL_MAX) && + base >= lbl_mgr.dynamic_block_start && + end <= lbl_mgr.dynamic_block_end) { + zlog_warn("Invalid LM request arguments: base: %u, size: %u for %s in conflict with the dynamic label block", + base, size, zebra_route_string(proto)); + return NULL; + } + /* Scan the existing chunks to see if the requested range of labels * falls inside any of such chunks */ for (ALL_LIST_ELEMENTS_RO(lbl_mgr.lc_list, node, lmc)) {