Merge pull request #5865 from slankdev/slankdev-zebra-srv6-manager

zebra: srv6 manager
This commit is contained in:
Mark Stapp 2021-06-04 13:41:55 -04:00 committed by GitHub
commit e4768d32b8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
107 changed files with 6049 additions and 22 deletions

View File

@ -676,6 +676,10 @@ unsigned int attrhash_key_make(const void *p)
MIX(transit_hash_key_make(bgp_attr_get_transit(attr)));
if (attr->encap_subtlvs)
MIX(encap_hash_key_make(attr->encap_subtlvs));
if (attr->srv6_l3vpn)
MIX(srv6_l3vpn_hash_key_make(attr->srv6_l3vpn));
if (attr->srv6_vpn)
MIX(srv6_vpn_hash_key_make(attr->srv6_vpn));
#ifdef ENABLE_BGP_VNC
struct bgp_attr_encap_subtlv *vnc_subtlvs =
bgp_attr_get_vnc_subtlvs(attr);
@ -1141,6 +1145,16 @@ void bgp_attr_undup(struct attr *new, struct attr *old)
if (new->lcommunity != old->lcommunity)
lcommunity_free(&new->lcommunity);
if (new->srv6_l3vpn != old->srv6_l3vpn) {
srv6_l3vpn_free(new->srv6_l3vpn);
new->srv6_l3vpn = NULL;
}
if (new->srv6_vpn != old->srv6_vpn) {
srv6_vpn_free(new->srv6_vpn);
new->srv6_vpn = NULL;
}
}
/* Free bgp attribute and aspath. */
@ -1202,6 +1216,14 @@ void bgp_attr_flush(struct attr *attr)
encap_free(attr->encap_subtlvs);
attr->encap_subtlvs = NULL;
}
if (attr->srv6_l3vpn && !attr->srv6_l3vpn->refcnt) {
srv6_l3vpn_free(attr->srv6_l3vpn);
attr->srv6_l3vpn = NULL;
}
if (attr->srv6_vpn && !attr->srv6_vpn->refcnt) {
srv6_vpn_free(attr->srv6_vpn);
attr->srv6_vpn = NULL;
}
#ifdef ENABLE_BGP_VNC
struct bgp_attr_encap_subtlv *vnc_subtlvs =
bgp_attr_get_vnc_subtlvs(attr);
@ -2676,6 +2698,7 @@ static bgp_attr_parse_ret_t bgp_attr_psid_sub(uint8_t type, uint16_t length,
sizeof(struct bgp_attr_srv6_vpn));
attr->srv6_vpn->sid_flags = sid_flags;
sid_copy(&attr->srv6_vpn->sid, &ipv6_sid);
attr->srv6_vpn = srv6_vpn_intern(attr->srv6_vpn);
}
/* Placeholder code for the SRv6 L3 Service type */
@ -2718,6 +2741,7 @@ static bgp_attr_parse_ret_t bgp_attr_psid_sub(uint8_t type, uint16_t length,
attr->srv6_l3vpn->sid_flags = sid_flags;
attr->srv6_l3vpn->endpoint_behavior = endpoint_behavior;
sid_copy(&attr->srv6_l3vpn->sid, &ipv6_sid);
attr->srv6_l3vpn = srv6_l3vpn_intern(attr->srv6_l3vpn);
}
/* Placeholder code for Unsupported TLV */
@ -4098,7 +4122,7 @@ bgp_size_t bgp_packet_attribute(struct bgp *bgp, struct peer *peer,
}
/* SRv6 Service Information Attribute. */
if (afi == AFI_IP && safi == SAFI_MPLS_VPN) {
if ((afi == AFI_IP || afi == AFI_IP6) && safi == SAFI_MPLS_VPN) {
if (attr->srv6_l3vpn) {
stream_putc(s, BGP_ATTR_FLAG_OPTIONAL
| BGP_ATTR_FLAG_TRANS);

View File

@ -317,6 +317,8 @@ static int bgp_vrf_enable(struct vrf *vrf)
bgp_instance_up(bgp);
vpn_leak_zebra_vrf_label_update(bgp, AFI_IP);
vpn_leak_zebra_vrf_label_update(bgp, AFI_IP6);
vpn_leak_zebra_vrf_sid_update(bgp, AFI_IP);
vpn_leak_zebra_vrf_sid_update(bgp, AFI_IP6);
vpn_leak_postchange(BGP_VPN_POLICY_DIR_TOVPN, AFI_IP,
bgp_get_default(), bgp);
vpn_leak_postchange(BGP_VPN_POLICY_DIR_FROMVPN, AFI_IP,

View File

@ -142,3 +142,5 @@ DEFINE_MTYPE(BGPD, BGP_FLOWSPEC_INDEX, "BGP flowspec index");
DEFINE_MTYPE(BGPD, BGP_SRV6_L3VPN, "BGP prefix-sid srv6 l3vpn servcie");
DEFINE_MTYPE(BGPD, BGP_SRV6_VPN, "BGP prefix-sid srv6 vpn service");
DEFINE_MTYPE(BGPD, BGP_SRV6_SID, "BGP srv6 segment-id");
DEFINE_MTYPE(BGPD, BGP_SRV6_FUNCTION, "BGP srv6 function");

View File

@ -139,5 +139,7 @@ DECLARE_MTYPE(BGP_FLOWSPEC_INDEX);
DECLARE_MTYPE(BGP_SRV6_L3VPN);
DECLARE_MTYPE(BGP_SRV6_VPN);
DECLARE_MTYPE(BGP_SRV6_SID);
DECLARE_MTYPE(BGP_SRV6_FUNCTION);
#endif /* _QUAGGA_BGP_MEMORY_H */

View File

@ -47,6 +47,7 @@
#include "bgpd/bgp_nexthop.h"
#include "bgpd/bgp_nht.h"
#include "bgpd/bgp_evpn.h"
#include "bgpd/bgp_memory.h"
#ifdef ENABLE_BGP_VNC
#include "bgpd/rfapi/rfapi_backend.h"
@ -356,6 +357,83 @@ void vpn_leak_zebra_vrf_label_withdraw(struct bgp *bgp, afi_t afi)
bgp->vpn_policy[afi].tovpn_zebra_vrf_label_last_sent = label;
}
/*
* This function informs zebra of the srv6-function this vrf sets on routes
* leaked to VPN. Zebra should install this srv6-function in the kernel with
* an action of "End.DT4/6's IP FIB to route the PDU."
*/
void vpn_leak_zebra_vrf_sid_update(struct bgp *bgp, afi_t afi)
{
int debug = BGP_DEBUG(vpn, VPN_LEAK_LABEL);
enum seg6local_action_t act;
struct seg6local_context ctx = {};
struct in6_addr *tovpn_sid = NULL;
struct in6_addr *tovpn_sid_ls = NULL;
struct vrf *vrf;
char buf[256] = {0};
if (bgp->vrf_id == VRF_UNKNOWN) {
if (debug)
zlog_debug("%s: vrf %s: afi %s: vrf_id not set, can't set zebra vrf label",
__func__, bgp->name_pretty, afi2str(afi));
return;
}
tovpn_sid = bgp->vpn_policy[afi].tovpn_sid;
if (!tovpn_sid) {
if (debug)
zlog_debug("%s: vrf %s: afi %s: sid not set", __func__,
bgp->name_pretty, afi2str(afi));
return;
}
if (debug) {
inet_ntop(AF_INET6, tovpn_sid, buf, sizeof(buf));
zlog_debug("%s: vrf %s: afi %s: setting sid %s for vrf id %d",
__func__, bgp->name_pretty, afi2str(afi), buf,
bgp->vrf_id);
}
vrf = vrf_lookup_by_id(bgp->vrf_id);
if (!vrf)
return;
ctx.table = vrf->data.l.table_id;
act = afi == AFI_IP ? ZEBRA_SEG6_LOCAL_ACTION_END_DT4
: ZEBRA_SEG6_LOCAL_ACTION_END_DT6;
zclient_send_localsid(zclient, tovpn_sid, bgp->vrf_id, act, &ctx);
tovpn_sid_ls = XCALLOC(MTYPE_BGP_SRV6_SID, sizeof(struct in6_addr));
*tovpn_sid_ls = *tovpn_sid;
bgp->vpn_policy[afi].tovpn_zebra_vrf_sid_last_sent = tovpn_sid_ls;
}
/*
* If zebra tells us vrf has become unconfigured, tell zebra not to
* use this srv6-function to forward to the vrf anymore
*/
void vpn_leak_zebra_vrf_sid_withdraw(struct bgp *bgp, afi_t afi)
{
int debug = BGP_DEBUG(vpn, VPN_LEAK_LABEL);
if (bgp->vrf_id == VRF_UNKNOWN) {
if (debug)
zlog_debug("%s: vrf %s: afi %s: vrf_id not set, can't set zebra vrf label",
__func__, bgp->name_pretty, afi2str(afi));
return;
}
if (debug)
zlog_debug("%s: deleting sid for vrf %s afi (id=%d)", __func__,
bgp->name_pretty, bgp->vrf_id);
zclient_send_localsid(zclient,
bgp->vpn_policy[afi].tovpn_zebra_vrf_sid_last_sent,
bgp->vrf_id, ZEBRA_SEG6_LOCAL_ACTION_UNSPEC, NULL);
XFREE(MTYPE_BGP_SRV6_SID,
bgp->vpn_policy[afi].tovpn_zebra_vrf_sid_last_sent);
}
int vpn_leak_label_callback(
mpls_label_t label,
void *labelid,
@ -417,6 +495,129 @@ int vpn_leak_label_callback(
return 0;
}
static void sid_register(struct bgp *bgp, const struct in6_addr *sid,
const char *locator_name)
{
struct bgp_srv6_function *func;
func = XCALLOC(MTYPE_BGP_SRV6_FUNCTION,
sizeof(struct bgp_srv6_function));
func->sid = *sid;
snprintf(func->locator_name, sizeof(func->locator_name),
"%s", locator_name);
listnode_add(bgp->srv6_functions, func);
}
static bool sid_exist(struct bgp *bgp, const struct in6_addr *sid)
{
struct listnode *node;
struct bgp_srv6_function *func;
for (ALL_LIST_ELEMENTS_RO(bgp->srv6_functions, node, func))
if (sid_same(&func->sid, sid))
return true;
return false;
}
/*
* if index != 0: try to allocate as index-mode
* else: try to allocate as auto-mode
*/
static bool alloc_new_sid(struct bgp *bgp, uint32_t index,
struct in6_addr *sid)
{
struct listnode *node;
struct prefix_ipv6 *chunk;
struct in6_addr sid_buf;
bool alloced = false;
if (!bgp || !sid)
return false;
for (ALL_LIST_ELEMENTS_RO(bgp->srv6_locator_chunks, node, chunk)) {
sid_buf = chunk->prefix;
if (index != 0) {
sid_buf.s6_addr[15] = index;
if (sid_exist(bgp, &sid_buf))
return false;
alloced = true;
break;
}
for (size_t i = 1; i < 255; i++) {
sid_buf.s6_addr[15] = (i & 0xff00) >> 8;
sid_buf.s6_addr[14] = (i & 0x00ff);
if (sid_exist(bgp, &sid_buf))
continue;
alloced = true;
break;
}
}
if (!alloced)
return false;
sid_register(bgp, &sid_buf, bgp->srv6_locator_name);
*sid = sid_buf;
return true;
}
void ensure_vrf_tovpn_sid(struct bgp *bgp_vpn, struct bgp *bgp_vrf, afi_t afi)
{
int debug = BGP_DEBUG(vpn, VPN_LEAK_FROM_VRF);
bool alloced = false;
char buf[256];
struct in6_addr *sid;
uint32_t tovpn_sid_index = 0;
bool tovpn_sid_auto = false;
if (debug)
zlog_debug("%s: try to allocate new SID for vrf %s: afi %s",
__func__, bgp_vrf->name_pretty, afi2str(afi));
/* skip when tovpn sid is already allocated on vrf instance */
if (bgp_vrf->vpn_policy[afi].tovpn_sid)
return;
/*
* skip when bgp vpn instance ins't allocated
* or srv6 locator chunk isn't allocated
*/
if (!bgp_vpn || !bgp_vpn->srv6_locator_chunks || !bgp_vrf)
return;
tovpn_sid_index = bgp_vrf->vpn_policy[afi].tovpn_sid_index;
tovpn_sid_auto = CHECK_FLAG(bgp_vrf->vpn_policy[afi].flags,
BGP_VPN_POLICY_TOVPN_SID_AUTO);
/* skip when VPN isn't configured on vrf-instance */
if (tovpn_sid_index == 0 && !tovpn_sid_auto)
return;
/* check invalid case both configured index and auto */
if (tovpn_sid_index != 0 && tovpn_sid_index) {
zlog_err("%s: index-mode and auto-mode both selected. ignored.",
__func__);
return;
}
sid = XCALLOC(MTYPE_BGP_SRV6_SID, sizeof(struct in6_addr));
alloced = alloc_new_sid(bgp_vpn, tovpn_sid_index, sid);
if (!alloced) {
zlog_debug("%s: not allocated new sid for vrf %s: afi %s",
__func__, bgp_vrf->name_pretty, afi2str(afi));
return;
}
if (debug) {
inet_ntop(AF_INET6, sid, buf, sizeof(buf));
zlog_debug("%s: new sid %s allocated for vrf %s: afi %s",
__func__, buf, bgp_vrf->name_pretty,
afi2str(afi));
}
bgp_vrf->vpn_policy[afi].tovpn_sid = sid;
}
static bool ecom_intersect(struct ecommunity *e1, struct ecommunity *e2)
{
uint32_t i, j;
@ -487,6 +688,32 @@ static void setlabels(struct bgp_path_info *bpi,
extra->num_labels = num_labels;
}
/*
* make encoded route SIDs match specified encoded sid set
*/
static void setsids(struct bgp_path_info *bpi,
struct in6_addr *sid,
uint32_t num_sids)
{
uint32_t i;
struct bgp_path_info_extra *extra;
if (num_sids)
assert(sid);
assert(num_sids <= BGP_MAX_SIDS);
if (!num_sids) {
if (bpi->extra)
bpi->extra->num_sids = 0;
return;
}
extra = bgp_path_info_extra_get(bpi);
for (i = 0; i < num_sids; i++)
memcpy(&extra->sid[i], &sid[i], sizeof(struct in6_addr));
extra->num_sids = num_sids;
}
/*
* returns pointer to new bgp_path_info upon success
*/
@ -502,6 +729,10 @@ leak_update(struct bgp *bgp, /* destination bgp instance */
struct bgp_path_info *bpi;
struct bgp_path_info *bpi_ultimate;
struct bgp_path_info *new;
uint32_t num_sids = 0;
if (new_attr->srv6_l3vpn || new_attr->srv6_vpn)
num_sids = 1;
if (debug)
zlog_debug(
@ -580,6 +811,18 @@ leak_update(struct bgp *bgp, /* destination bgp instance */
if (!labelssame)
setlabels(bpi, label, num_labels);
/*
* rewrite sid
*/
if (num_sids) {
if (new_attr->srv6_l3vpn)
setsids(bpi, &new_attr->srv6_l3vpn->sid,
num_sids);
else if (new_attr->srv6_vpn)
setsids(bpi, &new_attr->srv6_vpn->sid,
num_sids);
}
if (nexthop_self_flag)
bgp_path_info_set_flag(bn, bpi, BGP_PATH_ANNC_NH_SELF);
@ -642,6 +885,16 @@ leak_update(struct bgp *bgp, /* destination bgp instance */
bgp_path_info_extra_get(new);
/*
* rewrite sid
*/
if (num_sids) {
if (new_attr->srv6_l3vpn)
setsids(new, &new_attr->srv6_l3vpn->sid, num_sids);
else if (new_attr->srv6_vpn)
setsids(new, &new_attr->srv6_vpn->sid, num_sids);
}
if (num_labels)
setlabels(new, label, num_labels);
@ -898,6 +1151,17 @@ void vpn_leak_from_vrf_update(struct bgp *bgp_vpn, /* to */
SET_FLAG(static_attr.flag, ATTR_FLAG_BIT(BGP_ATTR_ORIGINATOR_ID));
static_attr.originator_id = bgp_vpn->router_id;
/* Set SID for SRv6 VPN */
if (bgp_vrf->vpn_policy[afi].tovpn_sid) {
static_attr.srv6_l3vpn = XCALLOC(MTYPE_BGP_SRV6_L3VPN,
sizeof(struct bgp_attr_srv6_l3vpn));
static_attr.srv6_l3vpn->sid_flags = 0x00;
static_attr.srv6_l3vpn->endpoint_behavior = 0xffff;
memcpy(&static_attr.srv6_l3vpn->sid,
bgp_vrf->vpn_policy[afi].tovpn_sid,
sizeof(static_attr.srv6_l3vpn->sid));
}
new_attr = bgp_attr_intern(
&static_attr); /* hashed refcounted everything */

View File

@ -77,7 +77,10 @@ extern void vpn_leak_to_vrf_withdraw(struct bgp *bgp_vpn,
extern void vpn_leak_zebra_vrf_label_update(struct bgp *bgp, afi_t afi);
extern void vpn_leak_zebra_vrf_label_withdraw(struct bgp *bgp, afi_t afi);
extern void vpn_leak_zebra_vrf_sid_update(struct bgp *bgp, afi_t afi);
extern void vpn_leak_zebra_vrf_sid_withdraw(struct bgp *bgp, afi_t afi);
extern int vpn_leak_label_callback(mpls_label_t label, void *lblid, bool alloc);
extern void ensure_vrf_tovpn_sid(struct bgp *vpn, struct bgp *vrf, afi_t afi);
extern void vrf_import_from_vrf(struct bgp *to_bgp, struct bgp *from_bgp,
afi_t afi, safi_t safi);
void vrf_unimport_from_vrf(struct bgp *to_bgp, struct bgp *from_bgp,
@ -237,6 +240,15 @@ static inline void vpn_leak_postchange(vpn_policy_direction_t direction,
vpn_leak_zebra_vrf_label_update(bgp_vrf, afi);
}
if (!bgp_vrf->vpn_policy[afi].tovpn_sid)
ensure_vrf_tovpn_sid(bgp_vpn, bgp_vrf, afi);
if (sid_diff(bgp_vrf->vpn_policy[afi].tovpn_sid,
bgp_vrf->vpn_policy[afi]
.tovpn_zebra_vrf_sid_last_sent)) {
vpn_leak_zebra_vrf_sid_update(bgp_vrf, afi);
}
vpn_leak_from_vrf_update_all(bgp_vpn, bgp_vrf, afi);
}
}

View File

@ -11442,12 +11442,33 @@ int bgp_global_afi_safis_afi_safi_ipv6_unicast_vpn_config_nexthop_destroy(
int bgp_global_afi_safis_afi_safi_ipv6_unicast_vpn_config_import_vpn_modify(
struct nb_cb_modify_args *args)
{
bool is_enable = false;
struct bgp *bgp;
switch (args->event) {
case NB_EV_VALIDATE:
bgp = nb_running_get_entry(args->dnode, NULL, false);
if (!bgp)
return NB_OK;
if (bgp->inst_type != BGP_INSTANCE_TYPE_VRF
&& bgp->inst_type != BGP_INSTANCE_TYPE_DEFAULT) {
snprintf(
args->errmsg, args->errmsg_len,
"import|export vpn valid only for bgp vrf or default instance");
return NB_ERR_VALIDATION;
}
break;
case NB_EV_PREPARE:
case NB_EV_ABORT:
return NB_OK;
case NB_EV_APPLY:
/* TODO: implement me. */
if (yang_dnode_get_bool(args->dnode, NULL))
is_enable = true;
return bgp_global_afi_safi_ip_unicast_vpn_config_import_export_vpn_modify(
args, "import", is_enable);
break;
}
@ -11461,12 +11482,32 @@ int bgp_global_afi_safis_afi_safi_ipv6_unicast_vpn_config_import_vpn_modify(
int bgp_global_afi_safis_afi_safi_ipv6_unicast_vpn_config_export_vpn_modify(
struct nb_cb_modify_args *args)
{
bool is_enable = false;
struct bgp *bgp;
switch (args->event) {
case NB_EV_VALIDATE:
bgp = nb_running_get_entry(args->dnode, NULL, false);
if (!bgp)
return NB_OK;
if (bgp->inst_type != BGP_INSTANCE_TYPE_VRF
&& bgp->inst_type != BGP_INSTANCE_TYPE_DEFAULT) {
snprintf(
args->errmsg, args->errmsg_len,
"import|export vpn valid only for bgp vrf or default instance");
return NB_ERR_VALIDATION;
}
break;
case NB_EV_PREPARE:
case NB_EV_ABORT:
return NB_OK;
case NB_EV_APPLY:
/* TODO: implement me. */
if (yang_dnode_get_bool(args->dnode, NULL))
is_enable = true;
return bgp_global_afi_safi_ip_unicast_vpn_config_import_export_vpn_modify(
args, "export", is_enable);
break;
}

View File

@ -66,9 +66,20 @@ static int bgp_isvalid_nexthop(struct bgp_nexthop_cache *bnc)
static int bgp_isvalid_labeled_nexthop(struct bgp_nexthop_cache *bnc)
{
/*
* In the case of MPLS-VPN, the label is learned from LDP or other
* protocols, and nexthop tracking is enabled for the label.
* The value is recorded as BGP_NEXTHOP_LABELED_VALID.
* In the case of SRv6-VPN, we need to track the reachability to the
* SID (in other words, IPv6 address). As in MPLS, we need to record
* the value as BGP_NEXTHOP_SID_VALID. However, this function is
* currently not implemented, and this function assumes that all
* Transit routes for SRv6-VPN are valid.
*/
return (bgp_zebra_num_connects() == 0
|| (bnc && CHECK_FLAG(bnc->flags, BGP_NEXTHOP_LABELED_VALID)
&& bnc->nexthop_num > 0));
|| (bnc && bnc->nexthop_num > 0
&& (CHECK_FLAG(bnc->flags, BGP_NEXTHOP_LABELED_VALID)
|| bnc->bgp->srv6_enabled)));
}
static void bgp_unlink_nexthop_check(struct bgp_nexthop_cache *bnc)

View File

@ -9252,6 +9252,78 @@ DEFPY (af_label_vpn_export,
return CMD_SUCCESS;
}
DEFPY (af_sid_vpn_export,
af_sid_vpn_export_cmd,
"[no] sid vpn export <(1-255)$sid_idx|auto$sid_auto>",
NO_STR
"sid value for VRF\n"
"Between current address-family and vpn\n"
"For routes leaked from current address-family to vpn\n"
"Sid allocation index\n"
"Automatically assign a label\n")
{
VTY_DECLVAR_CONTEXT(bgp, bgp);
afi_t afi;
int debug = 0;
int idx = 0;
bool yes = true;
if (argv_find(argv, argc, "no", &idx))
yes = false;
debug = (BGP_DEBUG(vpn, VPN_LEAK_TO_VRF) |
BGP_DEBUG(vpn, VPN_LEAK_FROM_VRF));
afi = vpn_policy_getafi(vty, bgp, false);
if (afi == AFI_MAX)
return CMD_WARNING_CONFIG_FAILED;
if (!yes) {
/* implement me */
vty_out(vty, "It's not implemented");
return CMD_WARNING_CONFIG_FAILED;
}
/* skip when it's already configured */
if ((sid_idx != 0 && bgp->vpn_policy[afi].tovpn_sid_index != 0)
|| (sid_auto && CHECK_FLAG(bgp->vpn_policy[afi].flags,
BGP_VPN_POLICY_TOVPN_SID_AUTO)))
return CMD_SUCCESS;
/*
* mode change between sid_idx and sid_auto isn't supported.
* user must negate sid vpn export when they want to change the mode
*/
if ((sid_auto && bgp->vpn_policy[afi].tovpn_sid_index != 0)
|| (sid_idx != 0 && CHECK_FLAG(bgp->vpn_policy[afi].flags,
BGP_VPN_POLICY_TOVPN_SID_AUTO))) {
vty_out(vty, "it's already configured as %s.\n",
sid_auto ? "auto-mode" : "idx-mode");
return CMD_WARNING_CONFIG_FAILED;
}
/* pre-change */
vpn_leak_prechange(BGP_VPN_POLICY_DIR_TOVPN, afi,
bgp_get_default(), bgp);
if (sid_auto) {
/* SID allocation auto-mode */
if (debug)
zlog_debug("%s: auto sid alloc.", __func__);
SET_FLAG(bgp->vpn_policy[afi].flags,
BGP_VPN_POLICY_TOVPN_SID_AUTO);
} else {
/* SID allocation index-mode */
if (debug)
zlog_debug("%s: idx %ld sid alloc.", __func__, sid_idx);
bgp->vpn_policy[afi].tovpn_sid_index = sid_idx;
}
/* post-change */
vpn_leak_postchange(BGP_VPN_POLICY_DIR_TOVPN, afi,
bgp_get_default(), bgp);
return CMD_SUCCESS;
}
ALIAS (af_label_vpn_export,
af_no_label_vpn_export_cmd,
"no label vpn export",
@ -9878,6 +9950,102 @@ DEFUN_NOSH (address_family_evpn,
return CMD_SUCCESS;
}
DEFUN_NOSH (bgp_segment_routing_srv6,
bgp_segment_routing_srv6_cmd,
"segment-routing srv6",
"Segment-Routing configuration\n"
"Segment-Routing SRv6 configuration\n")
{
VTY_DECLVAR_CONTEXT(bgp, bgp);
bgp->srv6_enabled = true;
vty->node = BGP_SRV6_NODE;
return CMD_SUCCESS;
}
DEFPY (bgp_srv6_locator,
bgp_srv6_locator_cmd,
"locator NAME$name",
"Specify SRv6 locator\n"
"Specify SRv6 locator\n")
{
VTY_DECLVAR_CONTEXT(bgp, bgp);
int ret;
if (strlen(bgp->srv6_locator_name) > 0
&& strcmp(name, bgp->srv6_locator_name) != 0) {
vty_out(vty, "srv6 locator is already configured\n");
return CMD_WARNING_CONFIG_FAILED;
}
snprintf(bgp->srv6_locator_name,
sizeof(bgp->srv6_locator_name), "%s", name);
ret = bgp_zebra_srv6_manager_get_locator_chunk(name);
if (ret < 0)
return CMD_WARNING_CONFIG_FAILED;
return CMD_SUCCESS;
}
DEFPY (show_bgp_srv6,
show_bgp_srv6_cmd,
"show bgp segment-routing srv6",
SHOW_STR
BGP_STR
"BGP Segment Routing\n"
"BGP Segment Routing SRv6\n")
{
struct bgp *bgp;
struct listnode *node;
struct prefix_ipv6 *chunk;
struct bgp_srv6_function *func;
struct in6_addr *tovpn4_sid;
struct in6_addr *tovpn6_sid;
char buf[256];
char buf_tovpn4_sid[256];
char buf_tovpn6_sid[256];
bgp = bgp_get_default();
if (!bgp)
return CMD_SUCCESS;
vty_out(vty, "locator_name: %s\n", bgp->srv6_locator_name);
vty_out(vty, "locator_chunks:\n");
for (ALL_LIST_ELEMENTS_RO(bgp->srv6_locator_chunks, node, chunk)) {
prefix2str(chunk, buf, sizeof(buf));
vty_out(vty, "- %s\n", buf);
}
vty_out(vty, "functions:\n");
for (ALL_LIST_ELEMENTS_RO(bgp->srv6_functions, node, func)) {
inet_ntop(AF_INET6, &func->sid, buf, sizeof(buf));
vty_out(vty, "- sid: %s\n", buf);
vty_out(vty, " locator: %s\n", func->locator_name);
}
vty_out(vty, "bgps:\n");
for (ALL_LIST_ELEMENTS_RO(bm->bgp, node, bgp)) {
vty_out(vty, "- name: %s\n",
bgp->name ? bgp->name : "default");
tovpn4_sid = bgp->vpn_policy[AFI_IP].tovpn_sid;
tovpn6_sid = bgp->vpn_policy[AFI_IP6].tovpn_sid;
if (tovpn4_sid)
inet_ntop(AF_INET6, tovpn4_sid, buf_tovpn4_sid,
sizeof(buf_tovpn4_sid));
if (tovpn6_sid)
inet_ntop(AF_INET6, tovpn6_sid, buf_tovpn6_sid,
sizeof(buf_tovpn6_sid));
vty_out(vty, " vpn_policy[AFI_IP].tovpn_sid: %s\n",
tovpn4_sid ? buf_tovpn4_sid : "none");
vty_out(vty, " vpn_policy[AFI_IP6].tovpn_sid: %s\n",
tovpn6_sid ? buf_tovpn6_sid : "none");
}
return CMD_SUCCESS;
}
DEFUN_NOSH (exit_address_family,
exit_address_family_cmd,
"exit-address-family",
@ -17895,6 +18063,14 @@ int bgp_config_write(struct vty *vty)
if (CHECK_FLAG(bgp->flags, BGP_FLAG_SHUTDOWN))
vty_out(vty, " bgp shutdown\n");
if (bgp->srv6_enabled) {
vty_frame(vty, " !\n segment-routing srv6\n");
if (strlen(bgp->srv6_locator_name))
vty_out(vty, " locator %s\n",
bgp->srv6_locator_name);
}
/* IPv4 unicast configuration. */
bgp_config_write_family(vty, bgp, AFI_IP, SAFI_UNICAST);
@ -18040,6 +18216,13 @@ static struct cmd_node bgp_flowspecv6_node = {
.prompt = "%s(config-router-af-vpnv6)# ",
};
static struct cmd_node bgp_srv6_node = {
.name = "bgp srv6",
.node = BGP_SRV6_NODE,
.parent_node = BGP_NODE,
.prompt = "%s(config-router-srv6)# ",
};
static void community_list_vty(void);
static void bgp_ac_neighbor(vector comps, struct cmd_token *token)
@ -18114,6 +18297,7 @@ void bgp_vty_init(void)
install_node(&bgp_evpn_vni_node);
install_node(&bgp_flowspecv4_node);
install_node(&bgp_flowspecv6_node);
install_node(&bgp_srv6_node);
/* Install default VTY commands to new nodes. */
install_default(BGP_NODE);
@ -18129,6 +18313,7 @@ void bgp_vty_init(void)
install_default(BGP_FLOWSPECV6_NODE);
install_default(BGP_EVPN_NODE);
install_default(BGP_EVPN_VNI_NODE);
install_default(BGP_SRV6_NODE);
/* "bgp local-mac" hidden commands. */
install_element(CONFIG_NODE, &bgp_local_mac_cmd);
@ -19457,6 +19642,13 @@ void bgp_vty_init(void)
/* tcp-mss command */
install_element(BGP_NODE, &neighbor_tcp_mss_cmd);
install_element(BGP_NODE, &no_neighbor_tcp_mss_cmd);
/* srv6 commands */
install_element(VIEW_NODE, &show_bgp_srv6_cmd);
install_element(BGP_NODE, &bgp_segment_routing_srv6_cmd);
install_element(BGP_SRV6_NODE, &bgp_srv6_locator_cmd);
install_element(BGP_IPV4_NODE, &af_sid_vpn_export_cmd);
install_element(BGP_IPV6_NODE, &af_sid_vpn_export_cmd);
}
#include "memory.h"

View File

@ -1171,6 +1171,7 @@ void bgp_zebra_announce(struct bgp_dest *dest, const struct prefix *p,
unsigned int valid_nh_count = 0;
int has_valid_label = 0;
bool allow_recursion = false;
int has_valid_sid = 0;
uint8_t distance;
struct peer *peer;
struct bgp_path_info *mpinfo;
@ -1395,9 +1396,20 @@ void bgp_zebra_announce(struct bgp_dest *dest, const struct prefix *p,
sizeof(struct ethaddr));
api_nh->weight = nh_weight;
if (mpinfo->extra
&& !sid_zero(&mpinfo->extra->sid[0])
&& !CHECK_FLAG(api.flags, ZEBRA_FLAG_EVPN_ROUTE)) {
has_valid_sid = 1;
memcpy(&api_nh->seg6_segs, &mpinfo->extra->sid[0],
sizeof(api_nh->seg6_segs));
}
valid_nh_count++;
}
if (has_valid_sid && !(CHECK_FLAG(api.flags, ZEBRA_FLAG_EVPN_ROUTE)))
SET_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_SEG6);
is_add = (valid_nh_count || nhg_id) ? true : false;
if (is_add && CHECK_FLAG(bm->flags, BM_FLAG_SEND_EXTRA_DATA_TO_ZEBRA)) {
@ -1453,6 +1465,8 @@ void bgp_zebra_announce(struct bgp_dest *dest, const struct prefix *p,
char eth_buf[ETHER_ADDR_STRLEN + 7] = {'\0'};
char buf1[ETHER_ADDR_STRLEN];
char label_buf[20];
char sid_buf[20];
char segs_buf[256];
int i;
zlog_debug(
@ -1495,15 +1509,22 @@ void bgp_zebra_announce(struct bgp_dest *dest, const struct prefix *p,
&& !CHECK_FLAG(api.flags, ZEBRA_FLAG_EVPN_ROUTE))
snprintf(label_buf, sizeof(label_buf),
"label %u", api_nh->labels[0]);
if (has_valid_sid
&& !CHECK_FLAG(api.flags, ZEBRA_FLAG_EVPN_ROUTE)) {
inet_ntop(AF_INET6, &api_nh->seg6_segs,
sid_buf, sizeof(sid_buf));
snprintf(segs_buf, sizeof(segs_buf), "segs %s",
sid_buf);
}
if (CHECK_FLAG(api.flags, ZEBRA_FLAG_EVPN_ROUTE)
&& !is_zero_mac(&api_nh->rmac))
snprintf(eth_buf, sizeof(eth_buf), " RMAC %s",
prefix_mac2str(&api_nh->rmac,
buf1, sizeof(buf1)));
zlog_debug(" nhop [%d]: %s if %u VRF %u wt %u %s %s",
zlog_debug(" nhop [%d]: %s if %u VRF %u wt %u %s %s %s",
i + 1, nh_buf, api_nh->ifindex,
api_nh->vrf_id, api_nh->weight,
label_buf, eth_buf);
label_buf, segs_buf, eth_buf);
}
int recursion_flag = 0;
@ -2976,6 +2997,35 @@ static int bgp_ifp_create(struct interface *ifp)
return 0;
}
static void bgp_zebra_process_srv6_locator_chunk(ZAPI_CALLBACK_ARGS)
{
struct stream *s = NULL;
struct bgp *bgp = bgp_get_default();
struct listnode *node;
struct prefix_ipv6 *c;
struct srv6_locator_chunk s6c = {};
struct prefix_ipv6 *chunk = NULL;
s = zclient->ibuf;
zapi_srv6_locator_chunk_decode(s, &s6c);
if (strcmp(bgp->srv6_locator_name, s6c.locator_name) != 0) {
zlog_err("%s: Locator name unmatch %s:%s", __func__,
bgp->srv6_locator_name, s6c.locator_name);
return;
}
for (ALL_LIST_ELEMENTS_RO(bgp->srv6_locator_chunks, node, c)) {
if (!prefix_cmp(c, &s6c.prefix))
return;
}
chunk = prefix_ipv6_new();
*chunk = s6c.prefix;
listnode_add(bgp->srv6_locator_chunks, chunk);
vpn_leak_postchange_all();
}
void bgp_zebra_init(struct thread_master *master, unsigned short instance)
{
zclient_num_connects = 0;
@ -3018,6 +3068,8 @@ void bgp_zebra_init(struct thread_master *master, unsigned short instance)
zclient->iptable_notify_owner = iptable_notify_owner;
zclient->route_notify_owner = bgp_zebra_route_notify_owner;
zclient->instance = instance;
zclient->process_srv6_locator_chunk =
bgp_zebra_process_srv6_locator_chunk;
}
void bgp_zebra_destroy(void)
@ -3415,3 +3467,8 @@ int bgp_zebra_stale_timer_update(struct bgp *bgp)
zlog_debug("send capabilty success");
return BGP_GR_SUCCESS;
}
int bgp_zebra_srv6_manager_get_locator_chunk(const char *name)
{
return srv6_manager_get_locator_chunk(zclient, name);
}

View File

@ -113,4 +113,5 @@ extern void bgp_zebra_announce_default(struct bgp *bgp, struct nexthop *nh,
extern int bgp_zebra_send_capabilities(struct bgp *bgp, bool disable);
extern int bgp_zebra_update(afi_t afi, safi_t safi, vrf_id_t vrf_id, int type);
extern int bgp_zebra_stale_timer_update(struct bgp *bgp);
extern int bgp_zebra_srv6_manager_get_locator_chunk(const char *name);
#endif /* _QUAGGA_BGP_ZEBRA_H */

View File

@ -1327,6 +1327,22 @@ int bgp_peer_gr_init(struct peer *peer)
return BGP_GR_SUCCESS;
}
static void bgp_srv6_init(struct bgp *bgp)
{
bgp->srv6_enabled = false;
memset(bgp->srv6_locator_name, 0, sizeof(bgp->srv6_locator_name));
bgp->srv6_locator_chunks = list_new();
bgp->srv6_functions = list_new();
}
static void bgp_srv6_cleanup(struct bgp *bgp)
{
if (bgp->srv6_locator_chunks)
list_delete(&bgp->srv6_locator_chunks);
if (bgp->srv6_functions)
list_delete(&bgp->srv6_functions);
}
/* Allocate new peer object, implicitely locked. */
struct peer *peer_new(struct bgp *bgp)
{
@ -3238,6 +3254,7 @@ static struct bgp *bgp_create(as_t *as, const char *name,
bgp_evpn_init(bgp);
bgp_evpn_vrf_es_init(bgp);
bgp_pbr_init(bgp);
bgp_srv6_init(bgp);
/*initilize global GR FSM */
bgp_global_gr_init(bgp);
@ -3754,6 +3771,7 @@ void bgp_free(struct bgp *bgp)
bgp_evpn_cleanup(bgp);
bgp_pbr_cleanup(bgp);
bgp_srv6_cleanup(bgp);
XFREE(MTYPE_BGP_EVPN_INFO, bgp->evpn_info);
for (afi = AFI_IP; afi < AFI_MAX; afi++) {

View File

@ -29,6 +29,7 @@
#include "lib/json.h"
#include "vrf.h"
#include "vty.h"
#include "srv6.h"
#include "iana_afi.h"
/* For union sockunion. */
@ -222,6 +223,7 @@ struct vpn_policy {
#define BGP_VPN_POLICY_TOVPN_LABEL_AUTO (1 << 0)
#define BGP_VPN_POLICY_TOVPN_RD_SET (1 << 1)
#define BGP_VPN_POLICY_TOVPN_NEXTHOP_SET (1 << 2)
#define BGP_VPN_POLICY_TOVPN_SID_AUTO (1 << 3)
/*
* If we are importing another vrf into us keep a list of
@ -234,6 +236,13 @@ struct vpn_policy {
* vrf names that we are being exported to.
*/
struct list *export_vrf;
/*
* Segment-Routing SRv6 Mode
*/
uint32_t tovpn_sid_index; /* unset => set to 0 */
struct in6_addr *tovpn_sid;
struct in6_addr *tovpn_zebra_vrf_sid_last_sent;
};
/*
@ -322,6 +331,11 @@ struct bgp_snmp_stats {
uint32_t routes_deleted;
};
struct bgp_srv6_function {
struct in6_addr sid;
char locator_name[SRV6_LOCNAME_SIZE];
};
/* BGP instance structure. */
struct bgp {
/* AS number of this BGP instance. */
@ -718,6 +732,12 @@ struct bgp {
/* BGP route flap dampening configuration */
struct bgp_damp_config damp[AFI_MAX][SAFI_MAX];
/* BGP VPN SRv6 backend */
bool srv6_enabled;
char srv6_locator_name[SRV6_LOCNAME_SIZE];
struct list *srv6_locator_chunks;
struct list *srv6_functions;
QOBJ_FIELDS;
};
DECLARE_QOBJ_TYPE(bgp);

View File

@ -2597,6 +2597,19 @@ address-family:
The CLI will disallow attempts to configure incompatible leaking
modes.
.. _bgp-l3vpn-srv6:
L3VPN SRv6
----------
.. clicmd:: segment-routing srv6
Use SRv6 backend with BGP L3VPN, and go to its configuration node.
.. clicmd:: locator NAME
Specify the SRv6 locator to be used for SRv6 L3VPN. The Locator name must
be set in zebra, but user can set it in any order.
.. _bgp-evpn:
@ -3528,6 +3541,40 @@ Displaying Update Group Information
Display Information about update-group events in FRR.
Segment-Routing IPv6
--------------------
.. clicmd:: show bgp segment-routing srv6
This command displays information about SRv6 L3VPN in bgpd. Specifically,
what kind of Locator is being used, and its Locator chunk information.
And the SID of the SRv6 Function that is actually managed on bgpd.
In the following example, bgpd is using a Locator named loc1, and two SRv6
Functions are managed to perform VPNv6 VRF redirect for vrf10 and vrf20.
::
router# show bgp segment-routing srv6
locator_name: loc1
locator_chunks:
- 2001:db8:1:1::/64
functions:
- sid: 2001:db8:1:1::100
locator: loc1
- sid: 2001:db8:1:1::200
locator: loc1
bgps:
- name: default
vpn_policy[AFI_IP].tovpn_sid: none
vpn_policy[AFI_IP6].tovpn_sid: none
- name: vrf10
vpn_policy[AFI_IP].tovpn_sid: none
vpn_policy[AFI_IP6].tovpn_sid: 2001:db8:1:1::100
- name: vrf20
vpn_policy[AFI_IP].tovpn_sid: none
vpn_policy[AFI_IP6].tovpn_sid: 2001:db8:1:1::200
.. _bgp-route-reflector:
Route Reflector

View File

@ -147,4 +147,143 @@ keyword. At present, no sharp commands will be preserved in the config.
Show imported Traffic Engineering Data Base
.. clicmd:: sharp install seg6-routes [vrf NAME] <A.B.C.D|X:X::X:X> nexthop-seg6 X:X::X:X encap X:X::X:X (1-1000000)
This command installs a route for SRv6 Transit behavior (on Linux it is
known as seg6 route). The count, destination, vrf, etc. have the same
meaning as in the ``sharp install routes`` command. With this command,
sharpd will request zebra to configure seg6 route via ZEBRA_ROUTE_ADD
ZAPI. As in the following example.
::
router# sharp install seg6-routes 1::A nexthop-seg6 2001::2 encap A:: 1
router# sharp install seg6-routes 1::B nexthop-seg6 2001::2 encap B:: 1
router# show ipv6 route
D>* 1::A/128 [150/0] via 2001::2, dum0, seg6 a::, weight 1, 00:00:01
D>* 1::B/128 [150/0] via 2001::2, dum0, seg6 b::, weight 1, 00:00:01
bash# ip -6 route list
1::A encap seg6 mode encap segs 1 [ a:: ] via 2001::2 dev dum0 proto 194 metric 20 pref medium
1::B encap seg6 mode encap segs 1 [ b:: ] via 2001::2 dev dum0 proto 194 metric 20 pref medium
.. clicmd:: sharp install seg6local-routes [vrf NAME] X:X::X:X nexthop-seg6local NAME ACTION ARGS.. (1-1000000)
This command installs a route for SRv6 Endpoint behavior (on Linux it is
known as seg6local route). The count, destination, vrf, etc. have the same
meaning as in the ``sharp install routes`` command. With this command,
sharpd will request zebra to configure seg6local route via ZEBRA_ROUTE_ADD
ZAPI. As in the following example.
There are many End Functions defined in SRv6, which have been standardized
in RFC 8986. The current implementation supports End, End.X, End.T, End.DX4,
and End.DT6, which can be configured as follows.
::
router# sharp install seg6local-routes 1::1 nexthop-seg6local dum0 End 1
router# sharp install seg6local-routes 1::2 nexthop-seg6local dum0 End_X 2001::1 1
router# sharp install seg6local-routes 1::3 nexthop-seg6local dum0 End_T 10 1
router# sharp install seg6local-routes 1::4 nexthop-seg6local dum0 End_DX4 10.0.0.1 1
router# sharp install seg6local-routes 1::5 nexthop-seg6local dum0 End_DT6 10 1
router# show ipv6 route
D>* 1::1/128 [150/0] is directly connected, dum0, seg6local End USP, weight 1, 00:00:05
D>* 1::2/128 [150/0] is directly connected, dum0, seg6local End.X nh6 2001::1, weight 1, 00:00:05
D>* 1::3/128 [150/0] is directly connected, dum0, seg6local End.T table 10, weight 1, 00:00:05
D>* 1::4/128 [150/0] is directly connected, dum0, seg6local End.DX4 nh4 10.0.0.1, weight 1, 00:00:05
D>* 1::5/128 [150/0] is directly connected, dum0, seg6local End.DT6 table 10, weight 1, 00:00:05
bash# ip -6 route
1::1 encap seg6local action End dev dum0 proto 194 metric 20 pref medium
1::2 encap seg6local action End.X nh6 2001::1 dev dum0 proto 194 metric 20 pref medium
1::3 encap seg6local action End.T table 10 dev dum0 proto 194 metric 20 pref medium
1::4 encap seg6local action End.DX4 nh4 10.0.0.1 dev dum0 proto 194 metric 20 pref medium
1::5 encap seg6local action End.DT6 table 10 dev dum0 proto 194 metric 20 pref medium
.. clicmd:: show sharp segment-routing srv6
This command shows us what SRv6 locator chunk, sharp is holding as zclient.
An SRv6 locator is defined for each SRv6 router, and a single locator may
be shared by multiple protocols.
In the FRRouting implementation, the Locator chunk get request is executed
by a routing protocol daemon such as sharpd or bgpd, And then Zebra
allocates a Locator Chunk, which is a subset of the Locator Prefix, and
notifies the requesting protocol daemon of this information.
This command example shows how the locator chunk of sharpd itself is
allocated.
::
router# show segment-routing srv6 locator
Locator:
Name ID 2 2001:db8:2:2::/64 Up
router# show sharp segment-routing srv6
Locator loc1 has 1 prefix chunks
2001:db8:1:1::/64
.. clicmd:: sharp srv6-manager get-locator-chunk
This command requests the SRv6 locator to allocate a locator chunk via ZAPI.
This chunk can be owned by the protocol daemon, and the chunk obtained by
sharpd will not be used by the SRv6 mechanism of another routing protocol.
Since this request is made asynchronously, it can be issued before the SRv6
locator is configured on the zebra side, and as soon as it is ready on the
zebra side, sharpd can check the allocated locator chunk via zapi.
::
router# show segment-routing srv6 locator loc1 detail
Name: loc1
Prefix: 2001:db8:1:1::/64
Chunks:
- prefix: 2001:db8:1:1::/64, owner: system
router# show sharp segment-routing srv6
(nothing)
router# sharp srv6-manager get-locator-chunk loc1
router# show segment-routing srv6 locator loc1 detail
Name: loc1
Prefix: 2001:db8:1:1::/64
Chunks:
- prefix: 2001:db8:1:1::/64, owner: sharp
router# show sharp segment-routing srv6
Locator loc1 has 1 prefix chunks
2001:db8:1:1::/64
.. clicmd:: sharp srv6-manager release-locator-chunk
This command releases a locator chunk that has already been allocated by
ZAPI. The freed chunk will have its owner returned to the system and will
be available to another protocol daemon.
::
router# show segment-routing srv6 locator loc1 detail
Name: loc1
Prefix: 2001:db8:1:1::/64
Chunks:
- prefix: 2001:db8:1:1::/64, owner: sharp
router# show sharp segment-routing srv6
Locator loc1 has 1 prefix chunks
2001:db8:1:1::/64
router# sharp srv6-manager release-locator-chunk loc1
router# show segment-routing srv6 locator loc1 detail
Name: loc1
Prefix: 2001:db8:1:1::/64
Chunks:
- prefix: 2001:db8:1:1::/64, owner: system
router# show sharp segment-routing srv6
(nothing)

View File

@ -630,6 +630,137 @@ presence of the entry.
21 Static 10.125.0.2 IPv4 Explicit Null
.. _zebra-srv6:
Segment-Routing IPv6
====================
Segment-Routing is source routing paradigm that allows
network operator to encode network intent into the packets.
SRv6 is an implementation of Segment-Routing
with application of IPv6 and segment-routing-header.
All routing daemon can use the Segment-Routing base
framework implemented on zebra to use SRv6 routing mechanism.
In that case, user must configure initial srv6 setting on
FRR's cli or frr.conf or zebra.conf. This section shows how
to configure SRv6 on FRR. Of course SRv6 can be used as standalone,
and this section also helps that case.
.. index:: show segment-routing srv6 locator [json]
.. clicmd:: show segment-routing srv6 locator [json]
This command dump SRv6-locator configured on zebra. SRv6-locator is used
to route to the node before performing the SRv6-function. and that works as
aggregation of SRv6-function's IDs. Following console log shows two
SRv6-locators loc1 and loc2. All locators are identified by unique IPv6
prefix. User can get that information as JSON string when ``json`` key word
at the end of cli is presented.
::
router# sh segment-routing srv6 locator
Locator:
Name ID Prefix Status
-------------------- ------- ------------------------ -------
loc1 1 2001:db8:1:1::/64 Up
loc2 2 2001:db8:2:2::/64 Up
.. index:: show segment-routing srv6 locator NAME detail [json]
.. clicmd:: show segment-routing srv6 locator NAME detail [json]
As shown in the example, by specifying the name of the locator, you
can see the detailed information for each locator. Locator can be
represented by a single IPv6 prefix, but SRv6 is designed to share this
Locator among multiple Routing Protocols. For this purpose, zebra divides
the IPv6 prefix block that makes the Locator unique into multiple chunks,
and manages the ownership of each chunk.
For example, loc1 has system as its owner. For example, loc1 is owned by
system, which means that it is not yet proprietary to any routing protocol.
For example, loc2 has sharp as its owner. This means that the shaprd for
function development holds the owner of the chunk of this locator, and no
other routing protocol will use this area.
::
router# show segment-routing srv6 locator loc1 detail
Name: loc1
Prefix: 2001:db8:1:1::/64
Chunks:
- prefix: 2001:db8:1:1::/64, owner: system
router# show segment-routing srv6 locator loc2 detail
Name: loc2
Prefix: 2001:db8:2:2::/64
Chunks:
- prefix: 2001:db8:2:2::/64, owner: sharp
.. index:: segment-routing
.. clicmd:: segment-routing
Move from configure mode to segment-routing node.
.. index:: srv6
.. clicmd:: srv6
Move from segment-routing node to srv6 node.
.. index:: locators
.. clicmd:: locators
Move from srv6 node to locator node. In this locator node, user can
configure detailed settings such as the actual srv6 locator.
.. index:: locator NAME
.. clicmd:: locator NAME
Create a new locator. If the name of an existing locator is specified,
move to specified locator's configuration node to change the settings it.
.. index:: prefix X:X::X:X/M [function-bits-length 32]
.. clicmd:: prefix X:X::X:X/M [function-bits-length 32]
Set the ipv6 prefix block of the locator. SRv6 locator is defined by
RFC8986. The actual routing protocol specifies the locator and allocates a
SID to be used by each routing protocol. This SID is included in the locator
as an IPv6 prefix.
Following example console log shows the typical configuration of SRv6
data-plane. After a new SRv6 locator, named loc1, is created, loc1's prefix
is configured as ``2001:db8:1:1::/64``. If user or some routing daemon
allocates new SID on this locator, new SID will allocated in range of this
prefix. For example, if some routing daemon creates new SID on locator
(``2001:db8:1:1::/64``), Then new SID will be ``2001:db8:1:1:7::/80``,
``2001:db8:1:1:8::/80``, and so on. Each locator has default SID that is
SRv6 local function "End". Usually default SID is allocated as
``PREFIX:1::``. (``PREFIX`` is locator's prefix) For example, if user
configure the locator's prefix as ``2001:db8:1:1::/64``, then default SID
will be ``2001:db8:1:1:1::``)
The function bits range is 16bits by default. If operator want to change
function bits range, they can configure with ``function-bits-length``
option.
::
router# configure terminal
router(config)# segment-routinig
router(config-sr)# srv6
router(config-srv6)# locators
router(config-srv6-locs)# locator loc1
router(config-srv6-loc)# prefix 2001:db8:1:1::/64
router(config-srv6-loc)# show run
...
segment-routing
srv6
locators
locator loc1
prefix 2001:db8:1:1::/64
!
...
.. _multicast-rib-commands:
Multicast RIB Commands

View File

@ -889,6 +889,15 @@ enum node_type node_parent(enum node_type node)
case PCEP_PCC_NODE:
ret = PCEP_NODE;
break;
case SRV6_NODE:
ret = SEGMENT_ROUTING_NODE;
break;
case SRV6_LOCS_NODE:
ret = SRV6_NODE;
break;
case SRV6_LOC_NODE:
ret = SRV6_LOCS_NODE;
break;
default:
ret = CONFIG_NODE;
break;

View File

@ -120,6 +120,7 @@ enum node_type {
BGP_VNC_L2_GROUP_NODE, /* BGP VNC L2 group */
RFP_DEFAULTS_NODE, /* RFP defaults node */
BGP_EVPN_NODE, /* BGP EVPN node. */
BGP_SRV6_NODE, /* BGP SRv6 node. */
OSPF_NODE, /* OSPF protocol mode */
OSPF6_NODE, /* OSPF protocol for IPv6 mode */
LDP_NODE, /* LDP protocol mode */
@ -155,6 +156,9 @@ enum node_type {
PCEP_PCE_CONFIG_NODE, /* PCE shared configuration node */
PCEP_PCE_NODE, /* PCE configuration node */
PCEP_PCC_NODE, /* PCC configuration node */
SRV6_NODE, /* SRv6 node */
SRV6_LOCS_NODE, /* SRv6 locators node */
SRV6_LOC_NODE, /* SRv6 locator node */
VTY_NODE, /* Vty node. */
FPM_NODE, /* Dataplane FPM node. */
LINK_PARAMS_NODE, /* Link-parameters node */

View File

@ -36,6 +36,7 @@
DEFINE_MTYPE_STATIC(LIB, NEXTHOP, "Nexthop");
DEFINE_MTYPE_STATIC(LIB, NH_LABEL, "Nexthop label");
DEFINE_MTYPE_STATIC(LIB, NH_SRV6, "Nexthop srv6");
static int _nexthop_labels_cmp(const struct nexthop *nh1,
const struct nexthop *nh2)
@ -66,6 +67,39 @@ static int _nexthop_labels_cmp(const struct nexthop *nh1,
(nhl1->num_labels * sizeof(mpls_label_t)));
}
static int _nexthop_srv6_cmp(const struct nexthop *nh1,
const struct nexthop *nh2)
{
int ret = 0;
if (!nh1->nh_srv6 && !nh2->nh_srv6)
return 0;
if (nh1->nh_srv6 && !nh2->nh_srv6)
return 1;
if (!nh1->nh_srv6 && nh2->nh_srv6)
return -1;
if (nh1->nh_srv6->seg6local_action > nh2->nh_srv6->seg6local_action)
return 1;
if (nh2->nh_srv6->seg6local_action < nh1->nh_srv6->seg6local_action)
return -1;
ret = memcmp(&nh1->nh_srv6->seg6local_ctx,
&nh2->nh_srv6->seg6local_ctx,
sizeof(struct seg6local_context));
if (ret != 0)
return ret;
ret = memcmp(&nh1->nh_srv6->seg6_segs,
&nh2->nh_srv6->seg6_segs,
sizeof(struct in6_addr));
return ret;
}
int nexthop_g_addr_cmp(enum nexthop_types_t type, const union g_addr *addr1,
const union g_addr *addr2)
{
@ -199,6 +233,10 @@ int nexthop_cmp(const struct nexthop *next1, const struct nexthop *next2)
return ret;
ret = _nexthop_labels_cmp(next1, next2);
if (ret != 0)
return ret;
ret = _nexthop_srv6_cmp(next1, next2);
return ret;
}
@ -353,6 +391,8 @@ struct nexthop *nexthop_new(void)
void nexthop_free(struct nexthop *nexthop)
{
nexthop_del_labels(nexthop);
nexthop_del_srv6_seg6local(nexthop);
nexthop_del_srv6_seg6(nexthop);
if (nexthop->resolved)
nexthops_free(nexthop->resolved);
XFREE(MTYPE_NEXTHOP, nexthop);
@ -523,6 +563,57 @@ void nexthop_del_labels(struct nexthop *nexthop)
nexthop->nh_label_type = ZEBRA_LSP_NONE;
}
void nexthop_add_srv6_seg6local(struct nexthop *nexthop, uint32_t action,
const struct seg6local_context *ctx)
{
if (action == ZEBRA_SEG6_LOCAL_ACTION_UNSPEC)
return;
if (!nexthop->nh_srv6)
nexthop->nh_srv6 = XCALLOC(MTYPE_NH_SRV6,
sizeof(struct nexthop_srv6));
nexthop->nh_srv6->seg6local_action = action;
nexthop->nh_srv6->seg6local_ctx = *ctx;
}
void nexthop_del_srv6_seg6local(struct nexthop *nexthop)
{
if (!nexthop->nh_srv6)
return;
nexthop->nh_srv6->seg6local_action = ZEBRA_SEG6_LOCAL_ACTION_UNSPEC;
if (sid_zero(&nexthop->nh_srv6->seg6_segs))
XFREE(MTYPE_NH_SRV6, nexthop->nh_srv6);
}
void nexthop_add_srv6_seg6(struct nexthop *nexthop,
const struct in6_addr *segs)
{
if (!segs)
return;
if (!nexthop->nh_srv6)
nexthop->nh_srv6 = XCALLOC(MTYPE_NH_SRV6,
sizeof(struct nexthop_srv6));
nexthop->nh_srv6->seg6_segs = *segs;
}
void nexthop_del_srv6_seg6(struct nexthop *nexthop)
{
if (!nexthop->nh_srv6)
return;
memset(&nexthop->nh_srv6->seg6_segs, 0,
sizeof(nexthop->nh_srv6->seg6_segs));
if (nexthop->nh_srv6->seg6local_action ==
ZEBRA_SEG6_LOCAL_ACTION_UNSPEC)
XFREE(MTYPE_NH_SRV6, nexthop->nh_srv6);
}
const char *nexthop2str(const struct nexthop *nexthop, char *str, int size)
{
switch (nexthop->type) {
@ -668,6 +759,14 @@ uint32_t nexthop_hash_quick(const struct nexthop *nexthop)
key = jhash_1word(nexthop->backup_idx[i], key);
}
if (nexthop->nh_srv6) {
key = jhash_1word(nexthop->nh_srv6->seg6local_action, key);
key = jhash(&nexthop->nh_srv6->seg6local_ctx,
sizeof(nexthop->nh_srv6->seg6local_ctx), key);
key = jhash(&nexthop->nh_srv6->seg6_segs,
sizeof(nexthop->nh_srv6->seg6_segs), key);
}
return key;
}
@ -720,6 +819,17 @@ void nexthop_copy_no_recurse(struct nexthop *copy,
nexthop_add_labels(copy, nexthop->nh_label_type,
nexthop->nh_label->num_labels,
&nexthop->nh_label->label[0]);
if (nexthop->nh_srv6) {
if (nexthop->nh_srv6->seg6local_action !=
ZEBRA_SEG6_LOCAL_ACTION_UNSPEC)
nexthop_add_srv6_seg6local(copy,
nexthop->nh_srv6->seg6local_action,
&nexthop->nh_srv6->seg6local_ctx);
if (!sid_zero(&nexthop->nh_srv6->seg6_segs))
nexthop_add_srv6_seg6(copy,
&nexthop->nh_srv6->seg6_segs);
}
}
void nexthop_copy(struct nexthop *copy, const struct nexthop *nexthop,

View File

@ -26,6 +26,7 @@
#include "prefix.h"
#include "mpls.h"
#include "vxlan.h"
#include "srv6.h"
#ifdef __cplusplus
extern "C" {
@ -139,6 +140,9 @@ struct nexthop {
/* SR-TE color used for matching SR-TE policies */
uint32_t srte_color;
/* SRv6 information */
struct nexthop_srv6 *nh_srv6;
};
/* Utility to append one nexthop to another. */
@ -157,6 +161,12 @@ void nexthops_free(struct nexthop *nexthop);
void nexthop_add_labels(struct nexthop *nexthop, enum lsp_types_t ltype,
uint8_t num_labels, const mpls_label_t *labels);
void nexthop_del_labels(struct nexthop *);
void nexthop_add_srv6_seg6local(struct nexthop *nexthop, uint32_t action,
const struct seg6local_context *ctx);
void nexthop_del_srv6_seg6local(struct nexthop *nexthop);
void nexthop_add_srv6_seg6(struct nexthop *nexthop,
const struct in6_addr *segs);
void nexthop_del_srv6_seg6(struct nexthop *nexthop);
/*
* Allocate a new nexthop object and initialize it from various args.

View File

@ -22,6 +22,10 @@
#include "srv6.h"
#include "log.h"
DEFINE_QOBJ_TYPE(srv6_locator);
DEFINE_MTYPE_STATIC(LIB, SRV6_LOCATOR, "SRV6 locator");
DEFINE_MTYPE_STATIC(LIB, SRV6_LOCATOR_CHUNK, "SRV6 locator chunk");
const char *seg6local_action2str(uint32_t action)
{
switch (action) {
@ -76,7 +80,8 @@ int snprintf_seg6_segs(char *str,
}
const char *seg6local_context2str(char *str, size_t size,
struct seg6local_context *ctx, uint32_t action)
const struct seg6local_context *ctx,
uint32_t action)
{
char b0[128];
@ -116,3 +121,84 @@ const char *seg6local_context2str(char *str, size_t size,
return str;
}
}
struct srv6_locator *srv6_locator_alloc(const char *name)
{
struct srv6_locator *locator = NULL;
locator = XCALLOC(MTYPE_SRV6_LOCATOR, sizeof(struct srv6_locator));
strlcpy(locator->name, name, sizeof(locator->name));
locator->chunks = list_new();
QOBJ_REG(locator, srv6_locator);
return locator;
}
struct srv6_locator_chunk *srv6_locator_chunk_alloc(void)
{
struct srv6_locator_chunk *chunk = NULL;
chunk = XCALLOC(MTYPE_SRV6_LOCATOR_CHUNK,
sizeof(struct srv6_locator_chunk));
return chunk;
}
void srv6_locator_free(struct srv6_locator *locator)
{
XFREE(MTYPE_SRV6_LOCATOR, locator);
}
void srv6_locator_chunk_free(struct srv6_locator_chunk *chunk)
{
XFREE(MTYPE_SRV6_LOCATOR_CHUNK, chunk);
}
json_object *srv6_locator_chunk_json(const struct srv6_locator_chunk *chunk)
{
char str[256];
json_object *jo_root = NULL;
jo_root = json_object_new_object();
prefix2str(&chunk->prefix, str, sizeof(str));
json_object_string_add(jo_root, "prefix", str);
json_object_string_add(jo_root, "proto",
zebra_route_string(chunk->proto));
return jo_root;
}
json_object *srv6_locator_json(const struct srv6_locator *loc)
{
char str[256];
struct listnode *node;
struct srv6_locator_chunk *chunk;
json_object *jo_root = NULL;
json_object *jo_chunk = NULL;
json_object *jo_chunks = NULL;
jo_root = json_object_new_object();
/* set name */
json_object_string_add(jo_root, "name", loc->name);
/* set prefix */
prefix2str(&loc->prefix, str, sizeof(str));
json_object_string_add(jo_root, "prefix", str);
/* set function_bits_length */
json_object_int_add(jo_root, "functionBitsLength",
loc->function_bits_length);
/* set status_up */
json_object_boolean_add(jo_root, "statusUp",
loc->status_up);
/* set chunks */
jo_chunks = json_object_new_array();
json_object_object_add(jo_root, "chunks", jo_chunks);
for (ALL_LIST_ELEMENTS_RO((struct list *)loc->chunks, node, chunk)) {
jo_chunk = srv6_locator_chunk_json(chunk);
json_object_array_add(jo_chunks, jo_chunk);
}
return jo_root;
}

View File

@ -21,10 +21,14 @@
#define _FRR_SRV6_H
#include <zebra.h>
#include "prefix.h"
#include "json.h"
#include <arpa/inet.h>
#include <netinet/in.h>
#define SRV6_MAX_SIDS 16
#define SRV6_LOCNAME_SIZE 256
#ifdef __cplusplus
extern "C" {
@ -69,6 +73,59 @@ struct seg6local_context {
uint32_t table;
};
struct srv6_locator {
char name[SRV6_LOCNAME_SIZE];
struct prefix_ipv6 prefix;
/*
* Bit length of SRv6 locator described in
* draft-ietf-bess-srv6-services-05#section-3.2.1
*/
uint8_t block_bits_length;
uint8_t node_bits_length;
uint8_t function_bits_length;
uint8_t argument_bits_length;
int algonum;
uint64_t current;
bool status_up;
struct list *chunks;
QOBJ_FIELDS;
};
DECLARE_QOBJ_TYPE(srv6_locator);
struct srv6_locator_chunk {
char locator_name[SRV6_LOCNAME_SIZE];
struct prefix_ipv6 prefix;
/*
* Bit length of SRv6 locator described in
* draft-ietf-bess-srv6-services-05#section-3.2.1
*/
uint8_t block_bits_length;
uint8_t node_bits_length;
uint8_t function_bits_length;
uint8_t argument_bits_length;
/*
* For Zclient communication values
*/
uint8_t keep;
uint8_t proto;
uint16_t instance;
uint32_t session_id;
};
struct nexthop_srv6 {
/* SRv6 localsid info for Endpoint-behaviour */
enum seg6local_action_t seg6local_action;
struct seg6local_context seg6local_ctx;
/* SRv6 Headend-behaviour */
struct in6_addr seg6_segs;
};
static inline const char *seg6_mode2str(enum seg6_mode_t mode)
{
switch (mode) {
@ -119,13 +176,20 @@ static inline void *sid_copy(struct in6_addr *dst,
const char *
seg6local_action2str(uint32_t action);
const char *
seg6local_context2str(char *str, size_t size,
struct seg6local_context *ctx, uint32_t action);
const char *seg6local_context2str(char *str, size_t size,
const struct seg6local_context *ctx,
uint32_t action);
int snprintf_seg6_segs(char *str,
size_t size, const struct seg6_segs *segs);
extern struct srv6_locator *srv6_locator_alloc(const char *name);
extern struct srv6_locator_chunk *srv6_locator_chunk_alloc(void);
extern void srv6_locator_free(struct srv6_locator *locator);
extern void srv6_locator_chunk_free(struct srv6_locator_chunk *chunk);
json_object *srv6_locator_chunk_json(const struct srv6_locator_chunk *chunk);
json_object *srv6_locator_json(const struct srv6_locator *loc);
#ifdef __cplusplus
}
#endif

View File

@ -41,6 +41,7 @@
#include "lib_errors.h"
#include "srte.h"
#include "printfrr.h"
#include "srv6.h"
DEFINE_MTYPE_STATIC(LIB, ZCLIENT, "Zclient");
DEFINE_MTYPE_STATIC(LIB, REDIST_INST, "Redistribution instance IDs");
@ -435,6 +436,42 @@ enum zclient_send_status zclient_send_vrf_label(struct zclient *zclient,
return zclient_send_message(zclient);
}
enum zclient_send_status zclient_send_localsid(struct zclient *zclient,
const struct in6_addr *sid, ifindex_t oif,
enum seg6local_action_t action,
const struct seg6local_context *context)
{
struct prefix_ipv6 p = {};
struct zapi_route api = {};
struct nexthop nh = {};
p.family = AF_INET6;
p.prefixlen = 128;
p.prefix = *sid;
api.vrf_id = VRF_DEFAULT;
api.type = ZEBRA_ROUTE_BGP;
api.instance = 0;
api.safi = SAFI_UNICAST;
memcpy(&api.prefix, &p, sizeof(p));
if (action == ZEBRA_SEG6_LOCAL_ACTION_UNSPEC)
return zclient_route_send(ZEBRA_ROUTE_DELETE, zclient, &api);
SET_FLAG(api.flags, ZEBRA_FLAG_ALLOW_RECURSION);
SET_FLAG(api.message, ZAPI_MESSAGE_NEXTHOP);
nh.type = NEXTHOP_TYPE_IFINDEX;
nh.ifindex = oif;
SET_FLAG(nh.flags, ZAPI_NEXTHOP_FLAG_SEG6LOCAL);
nexthop_add_srv6_seg6local(&nh, action, context);
zapi_nexthop_from_nexthop(&api.nexthops[0], &nh);
api.nexthop_num = 1;
return zclient_route_send(ZEBRA_ROUTE_ADD, zclient, &api);
}
/* Send register requests to zebra daemon for the information in a VRF. */
void zclient_send_reg_requests(struct zclient *zclient, vrf_id_t vrf_id)
{
@ -796,6 +833,26 @@ static int zapi_nexthop_labels_cmp(const struct zapi_nexthop *next1,
return memcmp(next1->labels, next2->labels, next1->label_num);
}
static int zapi_nexthop_srv6_cmp(const struct zapi_nexthop *next1,
const struct zapi_nexthop *next2)
{
int ret = 0;
ret = memcmp(&next1->seg6_segs, &next2->seg6_segs,
sizeof(struct in6_addr));
if (ret != 0)
return ret;
if (next1->seg6local_action > next2->seg6local_action)
return 1;
if (next1->seg6local_action < next2->seg6local_action)
return -1;
return memcmp(&next1->seg6local_ctx, &next2->seg6local_ctx,
sizeof(struct seg6local_context));
}
static int zapi_nexthop_cmp_no_labels(const struct zapi_nexthop *next1,
const struct zapi_nexthop *next2)
{
@ -896,6 +953,10 @@ static int zapi_nexthop_cmp(const void *item1, const void *item2)
return ret;
ret = zapi_nexthop_labels_cmp(next1, next2);
if (ret != 0)
return ret;
ret = zapi_nexthop_srv6_cmp(next1, next2);
return ret;
}
@ -992,10 +1053,58 @@ int zapi_nexthop_encode(struct stream *s, const struct zapi_nexthop *api_nh,
stream_putc(s, api_nh->backup_idx[i]);
}
if (CHECK_FLAG(nh_flags, ZAPI_NEXTHOP_FLAG_SEG6LOCAL)) {
stream_putl(s, api_nh->seg6local_action);
stream_write(s, &api_nh->seg6local_ctx,
sizeof(struct seg6local_context));
}
if (CHECK_FLAG(nh_flags, ZAPI_NEXTHOP_FLAG_SEG6))
stream_write(s, &api_nh->seg6_segs,
sizeof(struct in6_addr));
done:
return ret;
}
int zapi_srv6_locator_chunk_encode(struct stream *s,
const struct srv6_locator_chunk *c)
{
stream_putw(s, strlen(c->locator_name));
stream_put(s, c->locator_name, strlen(c->locator_name));
stream_putw(s, c->prefix.prefixlen);
stream_put(s, &c->prefix.prefix, sizeof(c->prefix.prefix));
stream_putc(s, c->block_bits_length);
stream_putc(s, c->node_bits_length);
stream_putc(s, c->function_bits_length);
stream_putc(s, c->argument_bits_length);
return 0;
}
int zapi_srv6_locator_chunk_decode(struct stream *s,
struct srv6_locator_chunk *c)
{
uint16_t len = 0;
c->prefix.family = AF_INET6;
STREAM_GETW(s, len);
if (len > SRV6_LOCNAME_SIZE)
goto stream_failure;
STREAM_GET(c->locator_name, s, len);
STREAM_GETW(s, c->prefix.prefixlen);
STREAM_GET(&c->prefix.prefix, s, sizeof(c->prefix.prefix));
STREAM_GETC(s, c->block_bits_length);
STREAM_GETC(s, c->node_bits_length);
STREAM_GETC(s, c->function_bits_length);
STREAM_GETC(s, c->argument_bits_length);
return 0;
stream_failure:
return -1;
}
static int zapi_nhg_encode(struct stream *s, int cmd, struct zapi_nhg *api_nhg)
{
int i;
@ -1273,6 +1382,16 @@ int zapi_nexthop_decode(struct stream *s, struct zapi_nexthop *api_nh,
STREAM_GETC(s, api_nh->backup_idx[i]);
}
if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_SEG6LOCAL)) {
STREAM_GETL(s, api_nh->seg6local_action);
STREAM_GET(&api_nh->seg6local_ctx, s,
sizeof(struct seg6local_context));
}
if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_SEG6))
STREAM_GET(&api_nh->seg6_segs, s,
sizeof(struct in6_addr));
/* Success */
ret = 0;
@ -1637,6 +1756,13 @@ struct nexthop *nexthop_from_zapi_nexthop(const struct zapi_nexthop *znh)
memcpy(n->backup_idx, znh->backup_idx, n->backup_num);
}
if (znh->seg6local_action != ZEBRA_SEG6_LOCAL_ACTION_UNSPEC)
nexthop_add_srv6_seg6local(n, znh->seg6local_action,
&znh->seg6local_ctx);
if (!sid_zero(&znh->seg6_segs))
nexthop_add_srv6_seg6(n, &znh->seg6_segs);
return n;
}
@ -1681,6 +1807,23 @@ int zapi_nexthop_from_nexthop(struct zapi_nexthop *znh,
memcpy(znh->backup_idx, nh->backup_idx, znh->backup_num);
}
if (nh->nh_srv6) {
if (nh->nh_srv6->seg6local_action !=
ZEBRA_SEG6_LOCAL_ACTION_UNSPEC) {
SET_FLAG(znh->flags, ZAPI_NEXTHOP_FLAG_SEG6LOCAL);
znh->seg6local_action = nh->nh_srv6->seg6local_action;
memcpy(&znh->seg6local_ctx,
&nh->nh_srv6->seg6local_ctx,
sizeof(struct seg6local_context));
}
if (!sid_zero(&nh->nh_srv6->seg6_segs)) {
SET_FLAG(znh->flags, ZAPI_NEXTHOP_FLAG_SEG6);
memcpy(&znh->seg6_segs, &nh->nh_srv6->seg6_segs,
sizeof(struct in6_addr));
}
}
return 0;
}
@ -2598,6 +2741,76 @@ stream_failure:
return -1;
}
/**
* Function to request a srv6-locator chunk in an Asyncronous way
*
* @param zclient Zclient used to connect to table manager (zebra)
* @param locator_name Name of SRv6-locator
* @result 0 on success, -1 otherwise
*/
int srv6_manager_get_locator_chunk(struct zclient *zclient,
const char *locator_name)
{
struct stream *s;
const size_t len = strlen(locator_name);
if (zclient_debug)
zlog_debug("Getting SRv6-Locator Chunk %s", locator_name);
if (zclient->sock < 0)
return -1;
/* send request */
s = zclient->obuf;
stream_reset(s);
zclient_create_header(s, ZEBRA_SRV6_MANAGER_GET_LOCATOR_CHUNK,
VRF_DEFAULT);
/* locator_name */
stream_putw(s, len);
stream_put(s, locator_name, len);
/* Put length at the first point of the stream. */
stream_putw_at(s, 0, stream_get_endp(s));
return zclient_send_message(zclient);
}
/**
* Function to release a srv6-locator chunk
*
* @param zclient Zclient used to connect to table manager (zebra)
* @param locator_name Name of SRv6-locator
* @result 0 on success, -1 otherwise
*/
int srv6_manager_release_locator_chunk(struct zclient *zclient,
const char *locator_name)
{
struct stream *s;
const size_t len = strlen(locator_name);
if (zclient_debug)
zlog_debug("Releasing SRv6-Locator Chunk %s", locator_name);
if (zclient->sock < 0)
return -1;
/* send request */
s = zclient->obuf;
stream_reset(s);
zclient_create_header(s, ZEBRA_SRV6_MANAGER_RELEASE_LOCATOR_CHUNK,
VRF_DEFAULT);
/* locator_name */
stream_putw(s, len);
stream_put(s, locator_name, len);
/* Put length at the first point of the stream. */
stream_putw_at(s, 0, stream_get_endp(s));
return zclient_send_message(zclient);
}
/*
* Asynchronous label chunk request
*
@ -3345,7 +3558,8 @@ enum zclient_send_status zclient_send_mlag_register(struct zclient *client,
enum zclient_send_status zclient_send_mlag_deregister(struct zclient *client)
{
return zebra_message_send(client, ZEBRA_MLAG_CLIENT_UNREGISTER, VRF_DEFAULT);
return zebra_message_send(client, ZEBRA_MLAG_CLIENT_UNREGISTER,
VRF_DEFAULT);
}
enum zclient_send_status zclient_send_mlag_data(struct zclient *client,
@ -3888,6 +4102,21 @@ static int zclient_read(struct thread *thread)
case ZEBRA_MLAG_FORWARD_MSG:
zclient_mlag_handle_msg(command, zclient, length, vrf_id);
break;
case ZEBRA_SRV6_LOCATOR_ADD:
if (zclient->srv6_locator_add)
(*zclient->srv6_locator_add)(command, zclient, length,
vrf_id);
break;
case ZEBRA_SRV6_LOCATOR_DELETE:
if (zclient->srv6_locator_delete)
(*zclient->srv6_locator_delete)(command, zclient,
length, vrf_id);
break;
case ZEBRA_SRV6_MANAGER_GET_LOCATOR_CHUNK:
if (zclient->process_srv6_locator_chunk)
(*zclient->process_srv6_locator_chunk)(command, zclient,
length, vrf_id);
break;
case ZEBRA_ERROR:
zclient_handle_error(command, zclient, length, vrf_id);
break;

View File

@ -39,6 +39,7 @@
#include "mlag.h"
#include "srte.h"
#include "srv6.h"
#ifdef __cplusplus
extern "C" {
@ -216,6 +217,10 @@ typedef enum {
ZEBRA_NHG_NOTIFY_OWNER,
ZEBRA_EVPN_REMOTE_NH_ADD,
ZEBRA_EVPN_REMOTE_NH_DEL,
ZEBRA_SRV6_LOCATOR_ADD,
ZEBRA_SRV6_LOCATOR_DELETE,
ZEBRA_SRV6_MANAGER_GET_LOCATOR_CHUNK,
ZEBRA_SRV6_MANAGER_RELEASE_LOCATOR_CHUNK,
ZEBRA_ERROR,
ZEBRA_CLIENT_CAPABILITIES,
ZEBRA_OPAQUE_MESSAGE,
@ -387,6 +392,11 @@ struct zclient {
int (*mlag_process_down)(void);
int (*mlag_handle_msg)(struct stream *msg, int len);
int (*nhg_notify_owner)(ZAPI_CALLBACK_ARGS);
int (*srv6_locator_add)(ZAPI_CALLBACK_ARGS);
int (*srv6_locator_delete)(ZAPI_CALLBACK_ARGS);
int (*srv6_function_add)(ZAPI_CALLBACK_ARGS);
int (*srv6_function_delete)(ZAPI_CALLBACK_ARGS);
void (*process_srv6_locator_chunk)(ZAPI_CALLBACK_ARGS);
int (*handle_error)(enum zebra_error_types error);
int (*opaque_msg_handler)(ZAPI_CALLBACK_ARGS);
int (*opaque_register_handler)(ZAPI_CALLBACK_ARGS);
@ -459,6 +469,13 @@ struct zapi_nexthop {
/* SR-TE color. */
uint32_t srte_color;
/* SRv6 localsid info for Endpoint-behaviour */
uint32_t seg6local_action;
struct seg6local_context seg6local_ctx;
/* SRv6 Headend-behaviour */
struct in6_addr seg6_segs;
};
/*
@ -471,6 +488,8 @@ struct zapi_nexthop {
#define ZAPI_NEXTHOP_FLAG_LABEL 0x02
#define ZAPI_NEXTHOP_FLAG_WEIGHT 0x04
#define ZAPI_NEXTHOP_FLAG_HAS_BACKUP 0x08 /* Nexthop has a backup */
#define ZAPI_NEXTHOP_FLAG_SEG6 0x10
#define ZAPI_NEXTHOP_FLAG_SEG6LOCAL 0x20
/*
* ZAPI Nexthop Group. For use with protocol creation of nexthop groups.
@ -905,6 +924,11 @@ extern enum zclient_send_status
zclient_send_vrf_label(struct zclient *zclient, vrf_id_t vrf_id, afi_t afi,
mpls_label_t label, enum lsp_types_t ltype);
extern enum zclient_send_status
zclient_send_localsid(struct zclient *zclient, const struct in6_addr *sid,
ifindex_t oif, enum seg6local_action_t action,
const struct seg6local_context *context);
extern void zclient_send_reg_requests(struct zclient *, vrf_id_t);
extern void zclient_send_dereg_requests(struct zclient *, vrf_id_t);
extern enum zclient_send_status
@ -1037,6 +1061,10 @@ extern int tm_get_table_chunk(struct zclient *zclient, uint32_t chunk_size,
uint32_t *start, uint32_t *end);
extern int tm_release_table_chunk(struct zclient *zclient, uint32_t start,
uint32_t end);
extern int srv6_manager_get_locator_chunk(struct zclient *zclient,
const char *locator_name);
extern int srv6_manager_release_locator_chunk(struct zclient *zclient,
const char *locator_name);
extern enum zclient_send_status zebra_send_sr_policy(struct zclient *zclient,
int cmd,
@ -1054,6 +1082,11 @@ extern int zapi_labels_encode(struct stream *s, int cmd,
struct zapi_labels *zl);
extern int zapi_labels_decode(struct stream *s, struct zapi_labels *zl);
extern int zapi_srv6_locator_chunk_encode(struct stream *s,
const struct srv6_locator_chunk *c);
extern int zapi_srv6_locator_chunk_decode(struct stream *s,
struct srv6_locator_chunk *c);
extern enum zclient_send_status zebra_send_pw(struct zclient *zclient,
int command, struct zapi_pw *pw);
extern int zebra_read_pw_status_update(ZAPI_CALLBACK_ARGS,

View File

@ -22,6 +22,8 @@
#ifndef __SHARP_GLOBAL_H__
#define __SHARP_GLOBAL_H__
#include "lib/srv6.h"
DECLARE_MGROUP(SHARPD);
struct sharp_routes {
@ -40,6 +42,9 @@ struct sharp_routes {
uint32_t removed_routes;
int32_t repeat;
/* ZAPI_ROUTE's flag */
uint32_t flags;
uint8_t inst;
vrf_id_t vrf_id;
@ -49,6 +54,14 @@ struct sharp_routes {
char opaque[ZAPI_MESSAGE_OPAQUE_LENGTH];
};
struct sharp_srv6_locator {
/* name of locator */
char name[SRV6_LOCNAME_SIZE];
/* list of struct prefix_ipv6 */
struct list *chunks;
};
struct sharp_global {
/* Global data about route install/deletions */
struct sharp_routes r;
@ -58,6 +71,9 @@ struct sharp_global {
/* Traffic Engineering Database */
struct ls_ted *ted;
/* list of sharp_srv6_locator */
struct list *srv6_locators;
};
extern struct sharp_global sg;

View File

@ -140,6 +140,7 @@ static void sharp_global_init(void)
memset(&sg, 0, sizeof(sg));
sg.nhs = list_new();
sg.ted = NULL;
sg.srv6_locators = list_new();
}
static void sharp_start_configuration(void)

View File

@ -39,6 +39,8 @@
#include "sharpd/sharp_vty_clippy.c"
#endif
DEFINE_MTYPE_STATIC(SHARPD, SRV6_LOCATOR, "SRv6 Locator");
DEFPY(watch_redistribute, watch_redistribute_cmd,
"sharp watch [vrf NAME$vrf_name] redistribute " FRR_REDIST_STR_SHARPD,
"Sharp routing Protocol\n"
@ -332,7 +334,181 @@ DEFPY (install_routes,
rts = routes;
sharp_install_routes_helper(&prefix, sg.r.vrf_id, sg.r.inst, nhgid,
&sg.r.nhop_group, &sg.r.backup_nhop_group,
rts, sg.r.opaque);
rts, 0, sg.r.opaque);
return CMD_SUCCESS;
}
DEFPY (install_seg6_routes,
install_seg6_routes_cmd,
"sharp install seg6-routes [vrf NAME$vrf_name]\
<A.B.C.D$start4|X:X::X:X$start6>\
nexthop-seg6 X:X::X:X$seg6_nh6 encap X:X::X:X$seg6_seg\
(1-1000000)$routes [repeat (2-1000)$rpt]",
"Sharp routing Protocol\n"
"install some routes\n"
"Routes to install\n"
"The vrf we would like to install into if non-default\n"
"The NAME of the vrf\n"
"v4 Address to start /32 generation at\n"
"v6 Address to start /32 generation at\n"
"Nexthop-seg6 to use\n"
"V6 Nexthop address to use\n"
"Encap mode\n"
"Segment List to use\n"
"How many to create\n"
"Should we repeat this command\n"
"How many times to repeat this command\n")
{
struct vrf *vrf;
struct prefix prefix;
uint32_t route_flags = 0;
sg.r.total_routes = routes;
sg.r.installed_routes = 0;
if (rpt >= 2)
sg.r.repeat = rpt * 2;
else
sg.r.repeat = 0;
memset(&prefix, 0, sizeof(prefix));
memset(&sg.r.orig_prefix, 0, sizeof(sg.r.orig_prefix));
memset(&sg.r.nhop, 0, sizeof(sg.r.nhop));
memset(&sg.r.nhop_group, 0, sizeof(sg.r.nhop_group));
memset(&sg.r.backup_nhop, 0, sizeof(sg.r.nhop));
memset(&sg.r.backup_nhop_group, 0, sizeof(sg.r.nhop_group));
sg.r.opaque[0] = '\0';
sg.r.inst = 0;
if (start4.s_addr != INADDR_ANY) {
prefix.family = AF_INET;
prefix.prefixlen = 32;
prefix.u.prefix4 = start4;
} else {
prefix.family = AF_INET6;
prefix.prefixlen = 128;
prefix.u.prefix6 = start6;
}
sg.r.orig_prefix = prefix;
if (!vrf_name)
vrf_name = VRF_DEFAULT_NAME;
vrf = vrf_lookup_by_name(vrf_name);
if (!vrf) {
vty_out(vty, "The vrf NAME specified: %s does not exist\n",
vrf_name);
return CMD_WARNING;
}
sg.r.nhop.type = NEXTHOP_TYPE_IPV6;
sg.r.nhop.gate.ipv6 = seg6_nh6;
sg.r.nhop.vrf_id = vrf->vrf_id;
sg.r.nhop_group.nexthop = &sg.r.nhop;
nexthop_add_srv6_seg6(&sg.r.nhop, &seg6_seg);
sg.r.vrf_id = vrf->vrf_id;
sharp_install_routes_helper(&prefix, sg.r.vrf_id, sg.r.inst, 0,
&sg.r.nhop_group, &sg.r.backup_nhop_group,
routes, route_flags, sg.r.opaque);
return CMD_SUCCESS;
}
DEFPY (install_seg6local_routes,
install_seg6local_routes_cmd,
"sharp install seg6local-routes [vrf NAME$vrf_name]\
X:X::X:X$start6\
nexthop-seg6local NAME$seg6l_oif\
<End$seg6l_end|\
End_X$seg6l_endx X:X::X:X$seg6l_endx_nh6|\
End_T$seg6l_endt (1-4294967295)$seg6l_endt_table|\
End_DX4$seg6l_enddx4 A.B.C.D$seg6l_enddx4_nh4|\
End_DT6$seg6l_enddt6 (1-4294967295)$seg6l_enddt6_table>\
(1-1000000)$routes [repeat (2-1000)$rpt]",
"Sharp routing Protocol\n"
"install some routes\n"
"Routes to install\n"
"The vrf we would like to install into if non-default\n"
"The NAME of the vrf\n"
"v6 Address to start /32 generation at\n"
"Nexthop-seg6local to use\n"
"Output device to use\n"
"SRv6 End function to use\n"
"SRv6 End.X function to use\n"
"V6 Nexthop address to use\n"
"SRv6 End.T function to use\n"
"Redirect table id to use\n"
"SRv6 End.DX4 function to use\n"
"V4 Nexthop address to use\n"
"SRv6 End.DT6 function to use\n"
"Redirect table id to use\n"
"How many to create\n"
"Should we repeat this command\n"
"How many times to repeat this command\n")
{
struct vrf *vrf;
uint32_t route_flags = 0;
struct seg6local_context ctx = {};
enum seg6local_action_t action;
sg.r.total_routes = routes;
sg.r.installed_routes = 0;
if (rpt >= 2)
sg.r.repeat = rpt * 2;
else
sg.r.repeat = 0;
memset(&sg.r.orig_prefix, 0, sizeof(sg.r.orig_prefix));
memset(&sg.r.nhop, 0, sizeof(sg.r.nhop));
memset(&sg.r.nhop_group, 0, sizeof(sg.r.nhop_group));
memset(&sg.r.backup_nhop, 0, sizeof(sg.r.nhop));
memset(&sg.r.backup_nhop_group, 0, sizeof(sg.r.nhop_group));
sg.r.opaque[0] = '\0';
sg.r.inst = 0;
sg.r.orig_prefix.family = AF_INET6;
sg.r.orig_prefix.prefixlen = 128;
sg.r.orig_prefix.u.prefix6 = start6;
if (!vrf_name)
vrf_name = VRF_DEFAULT_NAME;
vrf = vrf_lookup_by_name(vrf_name);
if (!vrf) {
vty_out(vty, "The vrf NAME specified: %s does not exist\n",
vrf_name);
return CMD_WARNING;
}
if (seg6l_enddx4) {
action = ZEBRA_SEG6_LOCAL_ACTION_END_DX4;
ctx.nh4 = seg6l_enddx4_nh4;
} else if (seg6l_endx) {
action = ZEBRA_SEG6_LOCAL_ACTION_END_X;
ctx.nh6 = seg6l_endx_nh6;
} else if (seg6l_endt) {
action = ZEBRA_SEG6_LOCAL_ACTION_END_T;
ctx.table = seg6l_endt_table;
} else if (seg6l_enddt6) {
action = ZEBRA_SEG6_LOCAL_ACTION_END_DT6;
ctx.table = seg6l_enddt6_table;
} else {
action = ZEBRA_SEG6_LOCAL_ACTION_END;
}
sg.r.nhop.type = NEXTHOP_TYPE_IFINDEX;
sg.r.nhop.ifindex = ifname2ifindex(seg6l_oif, vrf->vrf_id);
sg.r.nhop.vrf_id = vrf->vrf_id;
sg.r.nhop_group.nexthop = &sg.r.nhop;
nexthop_add_srv6_seg6local(&sg.r.nhop, action, &ctx);
sg.r.vrf_id = vrf->vrf_id;
sharp_install_routes_helper(&sg.r.orig_prefix, sg.r.vrf_id, sg.r.inst,
0, &sg.r.nhop_group,
&sg.r.backup_nhop_group, routes,
route_flags, sg.r.opaque);
return CMD_SUCCESS;
}
@ -740,6 +916,40 @@ DEFPY (import_te,
return CMD_SUCCESS;
}
DEFPY (sharp_srv6_manager_get_locator_chunk,
sharp_srv6_manager_get_locator_chunk_cmd,
"sharp srv6-manager get-locator-chunk NAME$locator_name",
SHARP_STR
"Segment-Routing IPv6\n"
"Get SRv6 locator-chunk\n"
"SRv6 Locator name\n")
{
int ret;
struct listnode *node;
struct sharp_srv6_locator *loc;
struct sharp_srv6_locator *loc_found = NULL;
for (ALL_LIST_ELEMENTS_RO(sg.srv6_locators, node, loc)) {
if (strcmp(loc->name, locator_name))
continue;
loc_found = loc;
break;
}
if (!loc_found) {
loc = XCALLOC(MTYPE_SRV6_LOCATOR,
sizeof(struct sharp_srv6_locator));
loc->chunks = list_new();
snprintf(loc->name, SRV6_LOCNAME_SIZE, "%s", locator_name);
listnode_add(sg.srv6_locators, loc);
}
ret = sharp_zebra_srv6_manager_get_locator_chunk(locator_name);
if (ret < 0)
return CMD_WARNING_CONFIG_FAILED;
return CMD_SUCCESS;
}
DEFUN (show_sharp_ted,
show_sharp_ted_cmd,
"show sharp ted [<vertex [A.B.C.D]|edge [A.B.C.D]|subnet [A.B.C.D/M]>] [verbose|json]",
@ -861,6 +1071,88 @@ DEFUN (show_sharp_ted,
json, JSON_C_TO_STRING_PRETTY));
json_object_free(json);
}
return CMD_SUCCESS;
}
DEFPY (sharp_srv6_manager_release_locator_chunk,
sharp_srv6_manager_release_locator_chunk_cmd,
"sharp srv6-manager release-locator-chunk NAME$locator_name",
SHARP_STR
"Segment-Routing IPv6\n"
"Release SRv6 locator-chunk\n"
"SRv6 Locator name\n")
{
int ret;
struct listnode *loc_node;
struct sharp_srv6_locator *loc;
for (ALL_LIST_ELEMENTS_RO(sg.srv6_locators, loc_node, loc)) {
if (!strcmp(loc->name, locator_name)) {
list_delete_all_node(loc->chunks);
list_delete(&loc->chunks);
listnode_delete(sg.srv6_locators, loc);
break;
}
}
ret = sharp_zebra_srv6_manager_release_locator_chunk(locator_name);
if (ret < 0)
return CMD_WARNING_CONFIG_FAILED;
return CMD_SUCCESS;
}
DEFPY (show_sharp_segment_routing_srv6,
show_sharp_segment_routing_srv6_cmd,
"show sharp segment-routing srv6 [json]",
SHOW_STR
SHARP_STR
"Segment-Routing\n"
"Segment-Routing IPv6\n"
JSON_STR)
{
char str[256];
struct listnode *loc_node;
struct listnode *chunk_node;
struct sharp_srv6_locator *loc;
struct prefix_ipv6 *chunk;
bool uj = use_json(argc, argv);
json_object *jo_locs = NULL;
json_object *jo_loc = NULL;
json_object *jo_chunks = NULL;
if (uj) {
jo_locs = json_object_new_array();
for (ALL_LIST_ELEMENTS_RO(sg.srv6_locators, loc_node, loc)) {
jo_loc = json_object_new_object();
json_object_array_add(jo_locs, jo_loc);
json_object_string_add(jo_loc, "name", loc->name);
jo_chunks = json_object_new_array();
json_object_object_add(jo_loc, "chunks", jo_chunks);
for (ALL_LIST_ELEMENTS_RO(loc->chunks, chunk_node,
chunk)) {
prefix2str(chunk, str, sizeof(str));
json_array_string_add(jo_chunks, str);
}
}
vty_out(vty, "%s\n", json_object_to_json_string_ext(
jo_locs, JSON_C_TO_STRING_PRETTY));
json_object_free(jo_locs);
} else {
for (ALL_LIST_ELEMENTS_RO(sg.srv6_locators, loc_node, loc)) {
vty_out(vty, "Locator %s has %d prefix chunks\n",
loc->name, listcount(loc->chunks));
for (ALL_LIST_ELEMENTS_RO(loc->chunks, chunk_node,
chunk)) {
prefix2str(chunk, str, sizeof(str));
vty_out(vty, " %s\n", str);
}
vty_out(vty, "\n");
}
}
return CMD_SUCCESS;
}
@ -868,6 +1160,8 @@ void sharp_vty_init(void)
{
install_element(ENABLE_NODE, &install_routes_data_dump_cmd);
install_element(ENABLE_NODE, &install_routes_cmd);
install_element(ENABLE_NODE, &install_seg6_routes_cmd);
install_element(ENABLE_NODE, &install_seg6local_routes_cmd);
install_element(ENABLE_NODE, &remove_routes_cmd);
install_element(ENABLE_NODE, &vrf_label_cmd);
install_element(ENABLE_NODE, &sharp_nht_data_dump_cmd);
@ -888,5 +1182,10 @@ void sharp_vty_init(void)
install_element(ENABLE_NODE, &show_debugging_sharpd_cmd);
install_element(ENABLE_NODE, &show_sharp_ted_cmd);
install_element(ENABLE_NODE, &sharp_srv6_manager_get_locator_chunk_cmd);
install_element(ENABLE_NODE,
&sharp_srv6_manager_release_locator_chunk_cmd);
install_element(ENABLE_NODE, &show_sharp_segment_routing_srv6_cmd);
return;
}

View File

@ -230,6 +230,7 @@ struct buffer_delay {
vrf_id_t vrf_id;
uint8_t instance;
uint32_t nhgid;
uint32_t flags;
const struct nexthop_group *nhg;
const struct nexthop_group *backup_nhg;
enum where_to_restart restart;
@ -244,7 +245,8 @@ struct buffer_delay {
*/
static bool route_add(const struct prefix *p, vrf_id_t vrf_id, uint8_t instance,
uint32_t nhgid, const struct nexthop_group *nhg,
const struct nexthop_group *backup_nhg, char *opaque)
const struct nexthop_group *backup_nhg, uint32_t flags,
char *opaque)
{
struct zapi_route api;
struct zapi_nexthop *api_nh;
@ -258,6 +260,7 @@ static bool route_add(const struct prefix *p, vrf_id_t vrf_id, uint8_t instance,
api.safi = SAFI_UNICAST;
memcpy(&api.prefix, p, sizeof(*p));
api.flags = flags;
SET_FLAG(api.flags, ZEBRA_FLAG_ALLOW_RECURSION);
SET_FLAG(api.message, ZAPI_MESSAGE_NEXTHOP);
@ -335,7 +338,8 @@ static void sharp_install_routes_restart(struct prefix *p, uint32_t count,
uint32_t nhgid,
const struct nexthop_group *nhg,
const struct nexthop_group *backup_nhg,
uint32_t routes, char *opaque)
uint32_t routes, uint32_t flags,
char *opaque)
{
uint32_t temp, i;
bool v4 = false;
@ -348,7 +352,7 @@ static void sharp_install_routes_restart(struct prefix *p, uint32_t count,
for (i = count; i < routes; i++) {
bool buffered = route_add(p, vrf_id, (uint8_t)instance, nhgid,
nhg, backup_nhg, opaque);
nhg, backup_nhg, flags, opaque);
if (v4)
p->u.prefix4.s_addr = htonl(++temp);
else
@ -362,6 +366,7 @@ static void sharp_install_routes_restart(struct prefix *p, uint32_t count,
wb.instance = instance;
wb.nhgid = nhgid;
wb.nhg = nhg;
wb.flags = flags;
wb.backup_nhg = backup_nhg;
wb.opaque = opaque;
wb.restart = SHARP_INSTALL_ROUTES_RESTART;
@ -375,7 +380,7 @@ void sharp_install_routes_helper(struct prefix *p, vrf_id_t vrf_id,
uint8_t instance, uint32_t nhgid,
const struct nexthop_group *nhg,
const struct nexthop_group *backup_nhg,
uint32_t routes, char *opaque)
uint32_t routes, uint32_t flags, char *opaque)
{
zlog_debug("Inserting %u routes", routes);
@ -385,7 +390,7 @@ void sharp_install_routes_helper(struct prefix *p, vrf_id_t vrf_id,
monotime(&sg.r.t_start);
sharp_install_routes_restart(p, 0, vrf_id, instance, nhgid, nhg,
backup_nhg, routes, opaque);
backup_nhg, routes, flags, opaque);
}
static void sharp_remove_routes_restart(struct prefix *p, uint32_t count,
@ -451,7 +456,8 @@ static void handle_repeated(bool installed)
sharp_install_routes_helper(&p, sg.r.vrf_id, sg.r.inst,
sg.r.nhgid, &sg.r.nhop_group,
&sg.r.backup_nhop_group,
sg.r.total_routes, sg.r.opaque);
sg.r.total_routes, sg.r.flags,
sg.r.opaque);
}
}
@ -461,7 +467,8 @@ static void sharp_zclient_buffer_ready(void)
case SHARP_INSTALL_ROUTES_RESTART:
sharp_install_routes_restart(
&wb.p, wb.count, wb.vrf_id, wb.instance, wb.nhgid,
wb.nhg, wb.backup_nhg, wb.routes, wb.opaque);
wb.nhg, wb.backup_nhg, wb.routes, wb.flags,
wb.opaque);
return;
case SHARP_DELETE_ROUTES_RESTART:
sharp_remove_routes_restart(&wb.p, wb.count, wb.vrf_id,
@ -918,6 +925,50 @@ static int nhg_notify_owner(ZAPI_CALLBACK_ARGS)
return 0;
}
int sharp_zebra_srv6_manager_get_locator_chunk(const char *locator_name)
{
return srv6_manager_get_locator_chunk(zclient, locator_name);
}
int sharp_zebra_srv6_manager_release_locator_chunk(const char *locator_name)
{
return srv6_manager_release_locator_chunk(zclient, locator_name);
}
static void sharp_zebra_process_srv6_locator_chunk(ZAPI_CALLBACK_ARGS)
{
struct stream *s = NULL;
struct srv6_locator_chunk s6c = {};
struct listnode *node, *nnode;
struct sharp_srv6_locator *loc;
s = zclient->ibuf;
zapi_srv6_locator_chunk_decode(s, &s6c);
for (ALL_LIST_ELEMENTS(sg.srv6_locators, node, nnode, loc)) {
struct prefix_ipv6 *chunk = NULL;
struct listnode *chunk_node;
struct prefix_ipv6 *c;
if (strcmp(loc->name, s6c.locator_name) != 0) {
zlog_err("%s: Locator name unmatch %s:%s", __func__,
loc->name, s6c.locator_name);
continue;
}
for (ALL_LIST_ELEMENTS_RO(loc->chunks, chunk_node, c))
if (!prefix_cmp(c, &s6c.prefix))
return;
chunk = prefix_ipv6_new();
*chunk = s6c.prefix;
listnode_add(loc->chunks, chunk);
return;
}
zlog_err("%s: can't get locator_chunk!!", __func__);
}
void sharp_zebra_init(void)
{
struct zclient_options opt = {.receive_notify = true};
@ -939,4 +990,6 @@ void sharp_zebra_init(void)
zclient->redistribute_route_add = sharp_redistribute_route;
zclient->redistribute_route_del = sharp_redistribute_route;
zclient->opaque_msg_handler = sharp_opaque_handler;
zclient->process_srv6_locator_chunk =
sharp_zebra_process_srv6_locator_chunk;
}

View File

@ -39,7 +39,8 @@ extern void sharp_install_routes_helper(struct prefix *p, vrf_id_t vrf_id,
uint8_t instance, uint32_t nhgid,
const struct nexthop_group *nhg,
const struct nexthop_group *backup_nhg,
uint32_t routes, char *opaque);
uint32_t routes, uint32_t flags,
char *opaque);
extern void sharp_remove_routes_helper(struct prefix *p, vrf_id_t vrf_id,
uint8_t instance, uint32_t routes);
@ -65,4 +66,11 @@ extern void sharp_zebra_register_te(void);
extern void sharp_redistribute_vrf(struct vrf *vrf, int source);
extern int sharp_zebra_srv6_manager_get_locator_chunk(const char *lname);
extern int sharp_zebra_srv6_manager_release_locator_chunk(const char *lname);
extern void sharp_install_seg6local_route_helper(struct prefix *p,
uint8_t instance,
enum seg6local_action_t act,
struct seg6local_context *ctx);
#endif

View File

@ -0,0 +1,29 @@
group controller {
neighbor 10.0.0.1 {
router-id 10.0.0.101;
local-address 10.0.0.101;
local-as 2;
peer-as 1;
family {
ipv6 mpls-vpn;
}
static {
route 2001:1::/64 {
rd 2:10;
next-hop 2001::2;
extended-community [ target:2:10 ];
label 3;
attribute [0x28 0xc0 0x0500150020010db800010001000000000000000100ffff00 ];
}
route 2001:2::/64 {
rd 2:10;
next-hop 2001::2;
extended-community [ target:2:10 ];
label 3;
attribute [0x28 0xc0 0x0500150020010db800010001000000000000000100ffff00 ];
}
}
}
}

View File

@ -0,0 +1,53 @@
[exabgp.api]
encoder = text
highres = false
respawn = false
socket = ''
[exabgp.bgp]
openwait = 60
[exabgp.cache]
attributes = true
nexthops = true
[exabgp.daemon]
daemonize = true
pid = '/var/run/exabgp/exabgp.pid'
user = 'exabgp'
[exabgp.log]
all = false
configuration = true
daemon = true
destination = '/var/log/exabgp.log'
enable = true
level = INFO
message = false
network = true
packets = false
parser = false
processes = true
reactor = true
rib = false
routes = false
short = false
timers = false
[exabgp.pdb]
enable = false
[exabgp.profile]
enable = false
file = ''
[exabgp.reactor]
speed = 1.0
[exabgp.tcp]
acl = false
bind = ''
delay = 0
once = false
port = 179

View File

@ -0,0 +1,26 @@
log stdout notifications
log monitor notifications
!log commands
!
!debug bgp zebra
!debug bgp neighbor-events
!debug bgp vnc verbose
!debug bgp update-groups
!debug bgp updates in
!debug bgp updates out
!debug bgp vpn label
!debug bgp vpn leak-from-vrf
!debug bgp vpn leak-to-vrf
!debug bgp vpn rmap-event
!
router bgp 1
bgp router-id 10.0.0.1
no bgp default ipv4-unicast
no bgp ebgp-requires-policy
neighbor 10.0.0.101 remote-as 2
neighbor 10.0.0.101 timers 3 10
!
address-family ipv6 vpn
neighbor 10.0.0.101 activate
exit-address-family
!

View File

@ -0,0 +1,50 @@
{
"2:10":{
"prefix":"2001:1::\/64",
"advertisedTo":{
"10.0.0.101":{
}
},
"paths":[
{
"aspath":{
"string":"2",
"segments":[
{
"type":"as-sequence",
"list":[
2
]
}
],
"length":1
},
"origin":"IGP",
"valid":true,
"bestpath":{
"overall":true
},
"extendedCommunity":{
"string":"RT:2:10"
},
"remoteLabel":3,
"remoteSid":"2001:db8:1:1::1",
"nexthops":[
{
"ip":"2001::2",
"afi":"ipv6",
"scope":"global",
"metric":0,
"accessible":true,
"used":true
}
],
"peer":{
"peerId":"10.0.0.101",
"routerId":"10.0.0.101",
"type":"external"
}
}
]
}
}

View File

@ -0,0 +1,50 @@
{
"2:10":{
"prefix":"2001:2::\/64",
"advertisedTo":{
"10.0.0.101":{
}
},
"paths":[
{
"aspath":{
"string":"2",
"segments":[
{
"type":"as-sequence",
"list":[
2
]
}
],
"length":1
},
"origin":"IGP",
"valid":true,
"bestpath":{
"overall":true
},
"extendedCommunity":{
"string":"RT:2:10"
},
"remoteLabel":3,
"remoteSid":"2001:db8:1:1::1",
"nexthops":[
{
"ip":"2001::2",
"afi":"ipv6",
"scope":"global",
"metric":0,
"accessible":true,
"used":true
}
],
"peer":{
"peerId":"10.0.0.101",
"routerId":"10.0.0.101",
"type":"external"
}
}
]
}
}

View File

@ -0,0 +1,7 @@
hostname r1
!
interface r1-eth0
ip address 10.0.0.1/24
no shutdown
!
line vty

View File

@ -0,0 +1,121 @@
#!/usr/bin/env python
#
# test_bgp_prefix_sid2.py
# Part of NetDEF Topology Tests
#
# Copyright (c) 2020 by LINE Corporation
# Copyright (c) 2020 by Hiroki Shirokura <slank.dev@gmail.com>
#
# Permission to use, copy, modify, and/or distribute this software
# for any purpose with or without fee is hereby granted, provided
# that the above copyright notice and this permission notice appear
# in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
#
"""
test_bgp_prefix_sid2.py: Test BGP topology with EBGP on prefix-sid
"""
import json
import os
import sys
import functools
import pytest
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
from mininet.topo import Topo
class TemplateTopo(Topo):
def build(self, **_opts):
tgen = get_topogen(self)
router = tgen.add_router("r1")
switch = tgen.add_switch("s1")
switch.add_link(router)
switch = tgen.gears["s1"]
peer1 = tgen.add_exabgp_peer(
"peer1", ip="10.0.0.101", defaultRoute="via 10.0.0.1"
)
switch.add_link(peer1)
def setup_module(module):
tgen = Topogen(TemplateTopo, module.__name__)
tgen.start_topology()
router = tgen.gears["r1"]
router.load_config(
TopoRouter.RD_ZEBRA,
os.path.join(CWD, "{}/zebra.conf".format("r1"))
)
router.load_config(
TopoRouter.RD_BGP,
os.path.join(CWD, "{}/bgpd.conf".format("r1"))
)
router.start()
logger.info("starting exaBGP")
peer_list = tgen.exabgp_peers()
for pname, peer in peer_list.items():
logger.info("starting exaBGP on {}".format(pname))
peer_dir = os.path.join(CWD, pname)
env_file = os.path.join(CWD, pname, "exabgp.env")
logger.info("Running ExaBGP peer on {}".format(pname))
peer.start(peer_dir, env_file)
logger.info(pname)
def teardown_module(module):
tgen = get_topogen()
tgen.stop_topology()
def open_json_file(filename):
try:
with open(filename, "r") as f:
return json.load(f)
except IOError:
assert False, "Could not read file {}".format(filename)
def test_r1_rib():
def _check(name, cmd, expected_file):
logger.info("polling")
tgen = get_topogen()
router = tgen.gears[name]
output = json.loads(router.vtysh_cmd(cmd))
expected = open_json_file("{}/{}".format(CWD, expected_file))
return topotest.json_cmp(output, expected)
def check(name, cmd, expected_file):
logger.info("[+] check {} \"{}\" {}".format(name, cmd, expected_file))
tgen = get_topogen()
func = functools.partial(_check, name, cmd, expected_file)
success, result = topotest.run_and_expect(func, None, count=10, wait=0.5)
assert result is None, 'Failed'
check("r1", "show bgp ipv6 vpn 2001:1::/64 json", "r1/vpnv6_rib_entry1.json")
check("r1", "show bgp ipv6 vpn 2001:2::/64 json", "r1/vpnv6_rib_entry2.json")
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
ret = pytest.main(args)
sys.exit(ret)

View File

@ -0,0 +1,8 @@
frr defaults traditional
!
hostname ce1
password zebra
!
log stdout notifications
log commands
log file bgpd.log

View File

@ -0,0 +1,58 @@
{
"::/0": [
{
"prefix": "::/0",
"protocol": "static",
"vrfId": 0,
"vrfName": "default",
"selected": true,
"destSelected": true,
"distance": 1,
"metric": 0,
"installed": true,
"table": 254,
"internalStatus": 16,
"internalFlags": 73,
"internalNextHopNum": 1,
"internalNextHopActiveNum": 1,
"nexthops": [
{
"flags": 3,
"fib": true,
"ip": "2001:1::1",
"afi": "ipv6",
"interfaceName": "eth0",
"active": true,
"weight": 1
}
]
}
],
"2001:1::/64": [
{
"prefix": "2001:1::/64",
"protocol": "connected",
"vrfId": 0,
"vrfName": "default",
"selected": true,
"destSelected": true,
"distance": 0,
"metric": 0,
"installed": true,
"table": 254,
"internalStatus": 16,
"internalFlags": 8,
"internalNextHopNum": 1,
"internalNextHopActiveNum": 1,
"nexthops": [
{
"flags": 3,
"fib": true,
"directlyConnected": true,
"interfaceName": "eth0",
"active": true
}
]
}
]
}

View File

@ -0,0 +1,14 @@
log file zebra.log
!
hostname ce1
!
interface eth0
ipv6 address 2001:1::2/64
!
ip forwarding
ipv6 forwarding
!
ipv6 route ::/0 2001:1::1
!
line vty
!

View File

@ -0,0 +1,8 @@
frr defaults traditional
!
hostname ce2
password zebra
!
log stdout notifications
log commands
log file bgpd.log

View File

@ -0,0 +1,58 @@
{
"::/0": [
{
"prefix": "::/0",
"protocol": "static",
"vrfId": 0,
"vrfName": "default",
"selected": true,
"destSelected": true,
"distance": 1,
"metric": 0,
"installed": true,
"table": 254,
"internalStatus": 16,
"internalFlags": 73,
"internalNextHopNum": 1,
"internalNextHopActiveNum": 1,
"nexthops": [
{
"flags": 3,
"fib": true,
"ip": "2001:2::1",
"afi": "ipv6",
"interfaceName": "eth0",
"active": true,
"weight": 1
}
]
}
],
"2001:2::/64": [
{
"prefix": "2001:2::/64",
"protocol": "connected",
"vrfId": 0,
"vrfName": "default",
"selected": true,
"destSelected": true,
"distance": 0,
"metric": 0,
"installed": true,
"table": 254,
"internalStatus": 16,
"internalFlags": 8,
"internalNextHopNum": 1,
"internalNextHopActiveNum": 1,
"nexthops": [
{
"flags": 3,
"fib": true,
"directlyConnected": true,
"interfaceName": "eth0",
"active": true
}
]
}
]
}

View File

@ -0,0 +1,14 @@
log file zebra.log
!
hostname ce2
!
interface eth0
ipv6 address 2001:2::2/64
!
ip forwarding
ipv6 forwarding
!
ipv6 route ::/0 2001:2::1
!
line vty
!

View File

@ -0,0 +1,8 @@
frr defaults traditional
!
hostname ce3
password zebra
!
log stdout notifications
log commands
log file bgpd.log

View File

@ -0,0 +1,58 @@
{
"::/0": [
{
"prefix": "::/0",
"protocol": "static",
"vrfId": 0,
"vrfName": "default",
"selected": true,
"destSelected": true,
"distance": 1,
"metric": 0,
"installed": true,
"table": 254,
"internalStatus": 16,
"internalFlags": 73,
"internalNextHopNum": 1,
"internalNextHopActiveNum": 1,
"nexthops": [
{
"flags": 3,
"fib": true,
"ip": "2001:3::1",
"afi": "ipv6",
"interfaceName": "eth0",
"active": true,
"weight": 1
}
]
}
],
"2001:3::/64": [
{
"prefix": "2001:3::/64",
"protocol": "connected",
"vrfId": 0,
"vrfName": "default",
"selected": true,
"destSelected": true,
"distance": 0,
"metric": 0,
"installed": true,
"table": 254,
"internalStatus": 16,
"internalFlags": 8,
"internalNextHopNum": 1,
"internalNextHopActiveNum": 1,
"nexthops": [
{
"flags": 3,
"fib": true,
"directlyConnected": true,
"interfaceName": "eth0",
"active": true
}
]
}
]
}

View File

@ -0,0 +1,14 @@
log file zebra.log
!
hostname ce3
!
interface eth0
ipv6 address 2001:3::2/64
!
ip forwarding
ipv6 forwarding
!
ipv6 route ::/0 2001:3::1
!
line vty
!

View File

@ -0,0 +1,8 @@
frr defaults traditional
!
hostname ce4
password zebra
!
log stdout notifications
log commands
log file bgpd.log

View File

@ -0,0 +1,58 @@
{
"::/0": [
{
"prefix": "::/0",
"protocol": "static",
"vrfId": 0,
"vrfName": "default",
"selected": true,
"destSelected": true,
"distance": 1,
"metric": 0,
"installed": true,
"table": 254,
"internalStatus": 16,
"internalFlags": 73,
"internalNextHopNum": 1,
"internalNextHopActiveNum": 1,
"nexthops": [
{
"flags": 3,
"fib": true,
"ip": "2001:4::1",
"afi": "ipv6",
"interfaceName": "eth0",
"active": true,
"weight": 1
}
]
}
],
"2001:4::/64": [
{
"prefix": "2001:4::/64",
"protocol": "connected",
"vrfId": 0,
"vrfName": "default",
"selected": true,
"destSelected": true,
"distance": 0,
"metric": 0,
"installed": true,
"table": 254,
"internalStatus": 16,
"internalFlags": 8,
"internalNextHopNum": 1,
"internalNextHopActiveNum": 1,
"nexthops": [
{
"flags": 3,
"fib": true,
"directlyConnected": true,
"interfaceName": "eth0",
"active": true
}
]
}
]
}

View File

@ -0,0 +1,14 @@
log file zebra.log
!
hostname ce4
!
interface eth0
ipv6 address 2001:4::2/64
!
ip forwarding
ipv6 forwarding
!
ipv6 route ::/0 2001:4::1
!
line vty
!

View File

@ -0,0 +1,8 @@
frr defaults traditional
!
hostname ce5
password zebra
!
log stdout notifications
log commands
log file bgpd.log

View File

@ -0,0 +1,58 @@
{
"::/0": [
{
"prefix": "::/0",
"protocol": "static",
"vrfId": 0,
"vrfName": "default",
"selected": true,
"destSelected": true,
"distance": 1,
"metric": 0,
"installed": true,
"table": 254,
"internalStatus": 16,
"internalFlags": 73,
"internalNextHopNum": 1,
"internalNextHopActiveNum": 1,
"nexthops": [
{
"flags": 3,
"fib": true,
"ip": "2001:5::1",
"afi": "ipv6",
"interfaceName": "eth0",
"active": true,
"weight": 1
}
]
}
],
"2001:5::/64": [
{
"prefix": "2001:5::/64",
"protocol": "connected",
"vrfId": 0,
"vrfName": "default",
"selected": true,
"destSelected": true,
"distance": 0,
"metric": 0,
"installed": true,
"table": 254,
"internalStatus": 16,
"internalFlags": 8,
"internalNextHopNum": 1,
"internalNextHopActiveNum": 1,
"nexthops": [
{
"flags": 3,
"fib": true,
"directlyConnected": true,
"interfaceName": "eth0",
"active": true
}
]
}
]
}

View File

@ -0,0 +1,14 @@
log file zebra.log
!
hostname ce5
!
interface eth0
ipv6 address 2001:5::2/64
!
ip forwarding
ipv6 forwarding
!
ipv6 route ::/0 2001:5::1
!
line vty
!

View File

@ -0,0 +1,8 @@
frr defaults traditional
!
hostname ce6
password zebra
!
log stdout notifications
log commands
log file bgpd.log

View File

@ -0,0 +1,58 @@
{
"::/0": [
{
"prefix": "::/0",
"protocol": "static",
"vrfId": 0,
"vrfName": "default",
"selected": true,
"destSelected": true,
"distance": 1,
"metric": 0,
"installed": true,
"table": 254,
"internalStatus": 16,
"internalFlags": 73,
"internalNextHopNum": 1,
"internalNextHopActiveNum": 1,
"nexthops": [
{
"flags": 3,
"fib": true,
"ip": "2001:6::1",
"afi": "ipv6",
"interfaceName": "eth0",
"active": true,
"weight": 1
}
]
}
],
"2001:6::/64": [
{
"prefix": "2001:6::/64",
"protocol": "connected",
"vrfId": 0,
"vrfName": "default",
"selected": true,
"destSelected": true,
"distance": 0,
"metric": 0,
"installed": true,
"table": 254,
"internalStatus": 16,
"internalFlags": 8,
"internalNextHopNum": 1,
"internalNextHopActiveNum": 1,
"nexthops": [
{
"flags": 3,
"fib": true,
"directlyConnected": true,
"interfaceName": "eth0",
"active": true
}
]
}
]
}

View File

@ -0,0 +1,14 @@
log file zebra.log
!
hostname ce6
!
interface eth0
ipv6 address 2001:6::2/64
!
ip forwarding
ipv6 forwarding
!
ipv6 route ::/0 2001:6::1
!
line vty
!

View File

@ -0,0 +1,64 @@
frr defaults traditional
!
hostname r1
password zebra
!
log stdout notifications
log monitor notifications
log commands
!
!debug bgp neighbor-events
!debug bgp zebra
!debug bgp vnc verbose
!debug bgp update-groups
!debug bgp updates in
!debug bgp updates out
!debug bgp vpn label
!debug bgp vpn leak-from-vrf
!debug bgp vpn leak-to-vrf
!debug bgp vpn rmap-event
!
router bgp 1
bgp router-id 1.1.1.1
no bgp ebgp-requires-policy
no bgp default ipv4-unicast
neighbor 2001::2 remote-as 2
neighbor 2001::2 timers 3 10
neighbor 2001::2 timers connect 1
!
address-family ipv6 vpn
neighbor 2001::2 activate
exit-address-family
!
segment-routing srv6
locator loc1
!
!
router bgp 1 vrf vrf10
bgp router-id 1.1.1.1
no bgp ebgp-requires-policy
no bgp default ipv4-unicast
!
address-family ipv6 unicast
sid vpn export auto
rd vpn export 1:10
rt vpn both 99:99
import vpn
export vpn
redistribute connected
exit-address-family
!
router bgp 1 vrf vrf20
bgp router-id 1.1.1.1
no bgp ebgp-requires-policy
no bgp default ipv4-unicast
!
address-family ipv6 unicast
sid vpn export auto
rd vpn export 1:20
rt vpn both 88:88
import vpn
export vpn
redistribute connected
exit-address-family
!

View File

@ -0,0 +1,170 @@
{
"vrfId": 0,
"vrfName": "default",
"tableVersion": 2,
"routerId": "1.1.1.1",
"defaultLocPrf": 100,
"localAS": 1,
"routes": {
"routeDistinguishers": {
"1:10": {
"2001:1::/64": [
{
"valid": true,
"bestpath": true,
"selectionReason": "First path received",
"pathFrom": "external",
"prefix": "2001:1::",
"prefixLen": 64,
"network": "2001:1::/64",
"metric": 0,
"weight": 32768,
"peerId": "(unspec)",
"path": "",
"origin": "incomplete",
"announceNexthopSelf": true,
"nhVrfName": "vrf10",
"nexthops": [
{
"ip": "::",
"hostname": "r1",
"afi": "ipv6",
"used": true
}
]
}
],
"2001:3::/64": [
{
"valid": true,
"bestpath": true,
"selectionReason": "First path received",
"pathFrom": "external",
"prefix": "2001:3::",
"prefixLen": 64,
"network": "2001:3::/64",
"metric": 0,
"weight": 32768,
"peerId": "(unspec)",
"path": "",
"origin": "incomplete",
"announceNexthopSelf": true,
"nhVrfName": "vrf10",
"nexthops": [
{
"ip": "::",
"hostname": "r1",
"afi": "ipv6",
"used": true
}
]
}
]
},
"1:20": {
"2001:5::/64": [
{
"valid": true,
"bestpath": true,
"selectionReason": "First path received",
"pathFrom": "external",
"prefix": "2001:5::",
"prefixLen": 64,
"network": "2001:5::/64",
"metric": 0,
"weight": 32768,
"peerId": "(unspec)",
"path": "",
"origin": "incomplete",
"announceNexthopSelf": true,
"nhVrfName": "vrf20",
"nexthops": [
{
"ip": "::",
"hostname": "r1",
"afi": "ipv6",
"used": true
}
]
}
]
},
"2:10": {
"2001:2::/64": [
{
"valid": true,
"bestpath": true,
"selectionReason": "First path received",
"pathFrom": "external",
"prefix": "2001:2::",
"prefixLen": 64,
"network": "2001:2::/64",
"metric": 0,
"weight": 0,
"peerId": "2001::2",
"path": "2",
"origin": "incomplete",
"nexthops": [
{
"ip": "2001::2",
"hostname": "r2",
"afi": "ipv6",
"used": true
}
]
}
]
},
"2:20": {
"2001:4::/64": [
{
"valid": true,
"bestpath": true,
"selectionReason": "First path received",
"pathFrom": "external",
"prefix": "2001:4::",
"prefixLen": 64,
"network": "2001:4::/64",
"metric": 0,
"weight": 0,
"peerId": "2001::2",
"path": "2",
"origin": "incomplete",
"nexthops": [
{
"ip": "2001::2",
"hostname": "r2",
"afi": "ipv6",
"used": true
}
]
}
],
"2001:6::/64": [
{
"valid": true,
"bestpath": true,
"selectionReason": "First path received",
"pathFrom": "external",
"prefix": "2001:6::",
"prefixLen": 64,
"network": "2001:6::/64",
"metric": 0,
"weight": 0,
"peerId": "2001::2",
"path": "2",
"origin": "incomplete",
"nexthops": [
{
"ip": "2001::2",
"hostname": "r2",
"afi": "ipv6",
"used": true
}
]
}
]
}
}
}
}

View File

@ -0,0 +1,89 @@
{
"2001:1::/64": [
{
"prefix": "2001:1::/64",
"protocol": "connected",
"vrfName": "vrf10",
"selected": true,
"destSelected": true,
"distance": 0,
"metric": 0,
"installed": true,
"table": 10,
"internalStatus": 16,
"internalFlags": 8,
"internalNextHopNum": 1,
"internalNextHopActiveNum": 1,
"nexthops": [
{
"flags": 3,
"fib": true,
"directlyConnected": true,
"interfaceName": "eth1",
"active": true
}
]
}
],
"2001:2::/64": [
{
"prefix": "2001:2::/64",
"protocol": "bgp",
"vrfName": "vrf10",
"selected": true,
"destSelected": true,
"distance": 20,
"metric": 0,
"installed": true,
"table": 10,
"internalStatus": 16,
"internalFlags": 8,
"internalNextHopNum": 1,
"internalNextHopActiveNum": 1,
"nexthops": [
{
"flags": 3,
"fib": true,
"afi": "ipv6",
"interfaceName": "eth0",
"vrf": "default",
"active": true,
"labels": [
3
],
"weight": 1,
"seg6": {
"segs": "2001:db8:2:2::100"
}
}
],
"asPath": "2"
}
],
"2001:3::/64": [
{
"prefix": "2001:3::/64",
"protocol": "connected",
"vrfName": "vrf10",
"selected": true,
"destSelected": true,
"distance": 0,
"metric": 0,
"installed": true,
"table": 10,
"internalStatus": 16,
"internalFlags": 8,
"internalNextHopNum": 1,
"internalNextHopActiveNum": 1,
"nexthops": [
{
"flags": 3,
"fib": true,
"directlyConnected": true,
"interfaceName": "eth2",
"active": true
}
]
}
]
}

View File

@ -0,0 +1,98 @@
{
"2001:4::/64": [
{
"prefix": "2001:4::/64",
"protocol": "bgp",
"vrfName": "vrf20",
"selected": true,
"destSelected": true,
"distance": 20,
"metric": 0,
"installed": true,
"table": 20,
"internalStatus": 16,
"internalFlags": 8,
"internalNextHopNum": 1,
"internalNextHopActiveNum": 1,
"nexthops": [
{
"flags": 3,
"fib": true,
"afi": "ipv6",
"interfaceName": "eth0",
"vrf": "default",
"active": true,
"labels": [
3
],
"weight": 1,
"seg6": {
"segs": "2001:db8:2:2::200"
}
}
],
"asPath": "2"
}
],
"2001:5::/64": [
{
"prefix": "2001:5::/64",
"protocol": "connected",
"vrfName": "vrf20",
"selected": true,
"destSelected": true,
"distance": 0,
"metric": 0,
"installed": true,
"table": 20,
"internalStatus": 16,
"internalFlags": 8,
"internalNextHopNum": 1,
"internalNextHopActiveNum": 1,
"nexthops": [
{
"flags": 3,
"fib": true,
"directlyConnected": true,
"interfaceName": "eth3",
"active": true
}
]
}
],
"2001:6::/64": [
{
"prefix": "2001:6::/64",
"protocol": "bgp",
"vrfName": "vrf20",
"selected": true,
"destSelected": true,
"distance": 20,
"metric": 0,
"installed": true,
"table": 20,
"internalStatus": 16,
"internalFlags": 8,
"internalNextHopNum": 1,
"internalNextHopActiveNum": 1,
"nexthops": [
{
"flags": 3,
"fib": true,
"afi": "ipv6",
"interfaceName": "eth0",
"vrf": "default",
"active": true,
"labels": [
3
],
"weight": 1,
"seg6": {
"segs": "2001:db8:2:2::200"
}
}
],
"asPath": "2"
}
]
}

View File

@ -0,0 +1,40 @@
log file zebra.log
!
hostname r1
password zebra
!
log stdout notifications
log monitor notifications
log commands
!
debug zebra packet
debug zebra dplane
debug zebra kernel
!
interface eth0
ipv6 address 2001::1/64
!
interface eth1 vrf vrf10
ipv6 address 2001:1::1/64
!
interface eth2 vrf vrf10
ipv6 address 2001:3::1/64
!
interface eth3 vrf vrf20
ipv6 address 2001:5::1/64
!
segment-routing
srv6
locators
locator loc1
prefix 2001:db8:1:1::/64
!
!
!
ip forwarding
ipv6 forwarding
!
ipv6 route 2001:db8:2:2::/64 2001::2
!
line vty
!

View File

@ -0,0 +1,65 @@
frr defaults traditional
!
hostname r2
password zebra
!
log stdout notifications
log monitor notifications
log commands
!
!debug bgp neighbor-events
!debug bgp zebra
!debug bgp vnc verbose
!debug bgp update-groups
!debug bgp updates in
!debug bgp updates out
!debug bgp updates
!debug bgp vpn label
!debug bgp vpn leak-from-vrf
!debug bgp vpn leak-to-vrf
!debug bgp vpn rmap-event
!
router bgp 2
bgp router-id 2.2.2.2
no bgp ebgp-requires-policy
no bgp default ipv4-unicast
neighbor 2001::1 remote-as 1
neighbor 2001::1 timers 3 10
neighbor 2001::1 timers connect 1
!
address-family ipv6 vpn
neighbor 2001::1 activate
exit-address-family
!
segment-routing srv6
locator loc1
!
!
router bgp 2 vrf vrf10
bgp router-id 2.2.2.2
no bgp ebgp-requires-policy
no bgp default ipv4-unicast
!
address-family ipv6 unicast
sid vpn export auto
rd vpn export 2:10
rt vpn both 99:99
import vpn
export vpn
redistribute connected
exit-address-family
!
router bgp 2 vrf vrf20
bgp router-id 2.2.2.2
no bgp ebgp-requires-policy
no bgp default ipv4-unicast
!
address-family ipv6 unicast
sid vpn export auto
rd vpn export 2:20
rt vpn both 88:88
import vpn
export vpn
redistribute connected
exit-address-family
!

View File

@ -0,0 +1,170 @@
{
"vrfId": 0,
"vrfName": "default",
"tableVersion": 2,
"routerId": "2.2.2.2",
"defaultLocPrf": 100,
"localAS": 2,
"routes": {
"routeDistinguishers": {
"1:10": {
"2001:1::/64": [
{
"valid": true,
"bestpath": true,
"selectionReason": "First path received",
"pathFrom": "external",
"prefix": "2001:1::",
"prefixLen": 64,
"network": "2001:1::/64",
"metric": 0,
"weight": 0,
"peerId": "2001::1",
"path": "1",
"origin": "incomplete",
"nexthops": [
{
"ip": "2001::1",
"hostname": "r1",
"afi": "ipv6",
"used": true
}
]
}
],
"2001:3::/64": [
{
"valid": true,
"bestpath": true,
"selectionReason": "First path received",
"pathFrom": "external",
"prefix": "2001:3::",
"prefixLen": 64,
"network": "2001:3::/64",
"metric": 0,
"weight": 0,
"peerId": "2001::1",
"path": "1",
"origin": "incomplete",
"nexthops": [
{
"ip": "2001::1",
"hostname": "r1",
"afi": "ipv6",
"used": true
}
]
}
]
},
"1:20": {
"2001:5::/64": [
{
"valid": true,
"bestpath": true,
"selectionReason": "First path received",
"pathFrom": "external",
"prefix": "2001:5::",
"prefixLen": 64,
"network": "2001:5::/64",
"metric": 0,
"weight": 0,
"peerId": "2001::1",
"path": "1",
"origin": "incomplete",
"nexthops": [
{
"ip": "2001::1",
"hostname": "r1",
"afi": "ipv6",
"used": true
}
]
}
]
},
"2:10": {
"2001:2::/64": [
{
"valid": true,
"bestpath": true,
"selectionReason": "First path received",
"pathFrom": "external",
"prefix": "2001:2::",
"prefixLen": 64,
"network": "2001:2::/64",
"metric": 0,
"weight": 32768,
"peerId": "(unspec)",
"path": "",
"origin": "incomplete",
"announceNexthopSelf": true,
"nhVrfName": "vrf10",
"nexthops": [
{
"ip": "::",
"hostname": "r2",
"afi": "ipv6",
"used": true
}
]
}
]
},
"2:20": {
"2001:4::/64": [
{
"valid": true,
"bestpath": true,
"selectionReason": "First path received",
"pathFrom": "external",
"prefix": "2001:4::",
"prefixLen": 64,
"network": "2001:4::/64",
"metric": 0,
"weight": 32768,
"peerId": "(unspec)",
"path": "",
"origin": "incomplete",
"announceNexthopSelf": true,
"nhVrfName": "vrf20",
"nexthops": [
{
"ip": "::",
"hostname": "r2",
"afi": "ipv6",
"used": true
}
]
}
],
"2001:6::/64": [
{
"valid": true,
"bestpath": true,
"selectionReason": "First path received",
"pathFrom": "external",
"prefix": "2001:6::",
"prefixLen": 64,
"network": "2001:6::/64",
"metric": 0,
"weight": 32768,
"peerId": "(unspec)",
"path": "",
"origin": "incomplete",
"announceNexthopSelf": true,
"nhVrfName": "vrf20",
"nexthops": [
{
"ip": "::",
"hostname": "r2",
"afi": "ipv6",
"used": true
}
]
}
]
}
}
}
}

View File

@ -0,0 +1,98 @@
{
"2001:1::/64": [
{
"prefix": "2001:1::/64",
"protocol": "bgp",
"vrfName": "vrf10",
"selected": true,
"destSelected": true,
"distance": 20,
"metric": 0,
"installed": true,
"table": 10,
"internalStatus": 16,
"internalFlags": 8,
"internalNextHopNum": 1,
"internalNextHopActiveNum": 1,
"nexthops": [
{
"flags": 3,
"fib": true,
"afi": "ipv6",
"interfaceName": "eth0",
"vrf": "default",
"active": true,
"labels": [
3
],
"weight": 1,
"seg6": {
"segs": "2001:db8:1:1::100"
}
}
],
"asPath": "1"
}
],
"2001:2::/64": [
{
"prefix": "2001:2::/64",
"protocol": "connected",
"vrfName": "vrf10",
"selected": true,
"destSelected": true,
"distance": 0,
"metric": 0,
"installed": true,
"table": 10,
"internalStatus": 16,
"internalFlags": 8,
"internalNextHopNum": 1,
"internalNextHopActiveNum": 1,
"nexthops": [
{
"flags": 3,
"fib": true,
"directlyConnected": true,
"interfaceName": "eth1",
"active": true
}
]
}
],
"2001:3::/64": [
{
"prefix": "2001:3::/64",
"protocol": "bgp",
"vrfName": "vrf10",
"selected": true,
"destSelected": true,
"distance": 20,
"metric": 0,
"installed": true,
"table": 10,
"internalStatus": 16,
"internalFlags": 8,
"internalNextHopNum": 1,
"internalNextHopActiveNum": 1,
"nexthops": [
{
"flags": 3,
"fib": true,
"afi": "ipv6",
"interfaceName": "eth0",
"vrf": "default",
"active": true,
"labels": [
3
],
"weight": 1,
"seg6": {
"segs": "2001:db8:1:1::100"
}
}
],
"asPath": "1"
}
]
}

View File

@ -0,0 +1,89 @@
{
"2001:4::/64": [
{
"prefix": "2001:4::/64",
"protocol": "connected",
"vrfName": "vrf20",
"selected": true,
"destSelected": true,
"distance": 0,
"metric": 0,
"installed": true,
"table": 20,
"internalStatus": 16,
"internalFlags": 8,
"internalNextHopNum": 1,
"internalNextHopActiveNum": 1,
"nexthops": [
{
"flags": 3,
"fib": true,
"directlyConnected": true,
"interfaceName": "eth2",
"active": true
}
]
}
],
"2001:5::/64": [
{
"prefix": "2001:5::/64",
"protocol": "bgp",
"vrfName": "vrf20",
"selected": true,
"destSelected": true,
"distance": 20,
"metric": 0,
"installed": true,
"table": 20,
"internalStatus": 16,
"internalFlags": 8,
"internalNextHopNum": 1,
"internalNextHopActiveNum": 1,
"nexthops": [
{
"flags": 3,
"fib": true,
"afi": "ipv6",
"interfaceName": "eth0",
"vrf": "default",
"active": true,
"labels": [
3
],
"weight": 1,
"seg6": {
"segs": "2001:db8:1:1::200"
}
}
],
"asPath": "1"
}
],
"2001:6::/64": [
{
"prefix": "2001:6::/64",
"protocol": "connected",
"vrfName": "vrf20",
"selected": true,
"destSelected": true,
"distance": 0,
"metric": 0,
"installed": true,
"table": 20,
"internalStatus": 16,
"internalFlags": 8,
"internalNextHopNum": 1,
"internalNextHopActiveNum": 1,
"nexthops": [
{
"flags": 3,
"fib": true,
"directlyConnected": true,
"interfaceName": "eth3",
"active": true
}
]
}
]
}

View File

@ -0,0 +1,40 @@
log file zebra.log
!
hostname r2
password zebra
!
log stdout notifications
log monitor notifications
log commands
!
debug zebra packet
debug zebra dplane
debug zebra kernel
!
interface eth0
ipv6 address 2001::2/64
!
interface eth1 vrf vrf10
ipv6 address 2001:2::1/64
!
interface eth2 vrf vrf20
ipv6 address 2001:4::1/64
!
interface eth3 vrf vrf20
ipv6 address 2001:6::1/64
!
segment-routing
srv6
locators
locator loc1
prefix 2001:db8:2:2::/64
!
!
!
ip forwarding
ipv6 forwarding
!
ipv6 route 2001:db8:1:1::/64 2001::1
!
line vty
!

View File

@ -0,0 +1,204 @@
#!/usr/bin/env python
#
# Part of NetDEF Topology Tests
#
# Copyright (c) 2018, LabN Consulting, L.L.C.
# Authored by Lou Berger <lberger@labn.net>
#
# Permission to use, copy, modify, and/or distribute this software
# for any purpose with or without fee is hereby granted, provided
# that the above copyright notice and this permission notice appear
# in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
#
import os
import re
import sys
import json
import functools
import pytest
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
# Import topogen and topotest helpers
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
from lib.common_config import required_linux_kernel_version
from mininet.topo import Topo
class Topology(Topo):
"""
CE1 CE3 CE5
(eth0) (eth0) (eth0)
:2 :2 :2
| | |
2001: 2001: 2001:
1::/64 3::/64 5::/64
| | |
:1 :1 :1
+-(eth1)--(eth2)---(eth3)-+
| \ / | |
| (vrf10) (vrf20) |
| R1 |
+----------(eth0)---------+
:1
|
2001::/64
|
:2
(eth0)
+----------(eth0)--------------+
| R2 |
| (vrf10) (vrf20) |
| / / \ |
+-(eth1)-----(eth2)-----(eth3)-+
:1 :1 :1
| | |
+------+ +------+ +------+
/ 2001: \ / 2001: \ / 2001: \
\ 2::/64 / \ 4::/64 / \ 6::/64 /
+------+ +------+ +------+
| | |
:2 :2 :2
(eth0) (eth0) (eth0)
CE2 CE4 CE6
"""
def build(self, *_args, **_opts):
tgen = get_topogen(self)
tgen.add_router("r1")
tgen.add_router("r2")
tgen.add_router("ce1")
tgen.add_router("ce2")
tgen.add_router("ce3")
tgen.add_router("ce4")
tgen.add_router("ce5")
tgen.add_router("ce6")
tgen.add_link(tgen.gears["r1"], tgen.gears["r2"], "eth0", "eth0")
tgen.add_link(tgen.gears["ce1"], tgen.gears["r1"], "eth0", "eth1")
tgen.add_link(tgen.gears["ce2"], tgen.gears["r2"], "eth0", "eth1")
tgen.add_link(tgen.gears["ce3"], tgen.gears["r1"], "eth0", "eth2")
tgen.add_link(tgen.gears["ce4"], tgen.gears["r2"], "eth0", "eth2")
tgen.add_link(tgen.gears["ce5"], tgen.gears["r1"], "eth0", "eth3")
tgen.add_link(tgen.gears["ce6"], tgen.gears["r2"], "eth0", "eth3")
def setup_module(mod):
result = required_linux_kernel_version("4.15")
if result is not True:
pytest.skip("Kernel requirements are not met")
tgen = Topogen(Topology, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
for rname, router in tgen.routers().items():
router.run("/bin/bash {}/{}/setup.sh".format(CWD, rname))
router.load_config(TopoRouter.RD_ZEBRA,
os.path.join(CWD, '{}/zebra.conf'.format(rname)))
router.load_config(TopoRouter.RD_BGP,
os.path.join(CWD, '{}/bgpd.conf'.format(rname)))
tgen.gears["r1"].run("ip link add vrf10 type vrf table 10")
tgen.gears["r1"].run("ip link set vrf10 up")
tgen.gears["r1"].run("ip link add vrf20 type vrf table 20")
tgen.gears["r1"].run("ip link set vrf20 up")
tgen.gears["r1"].run("ip link set eth1 master vrf10")
tgen.gears["r1"].run("ip link set eth2 master vrf10")
tgen.gears["r1"].run("ip link set eth3 master vrf20")
tgen.gears["r2"].run("ip link add vrf10 type vrf table 10")
tgen.gears["r2"].run("ip link set vrf10 up")
tgen.gears["r2"].run("ip link add vrf20 type vrf table 20")
tgen.gears["r2"].run("ip link set vrf20 up")
tgen.gears["r2"].run("ip link set eth1 master vrf10")
tgen.gears["r2"].run("ip link set eth2 master vrf20")
tgen.gears["r2"].run("ip link set eth3 master vrf20")
tgen.start_router()
def teardown_module(mod):
tgen = get_topogen()
tgen.stop_topology()
def open_json_file(filename):
try:
with open(filename, "r") as f:
return json.load(f)
except IOError:
assert False, "Could not read file {}".format(filename)
def test_rib():
def _check(name, cmd, expected_file):
logger.info("polling")
tgen = get_topogen()
router = tgen.gears[name]
output = json.loads(router.vtysh_cmd(cmd))
expected = open_json_file("{}/{}".format(CWD, expected_file))
return topotest.json_cmp(output, expected)
def check(name, cmd, expected_file):
logger.info("[+] check {} \"{}\" {}".format(name, cmd, expected_file))
tgen = get_topogen()
func = functools.partial(_check, name, cmd, expected_file)
success, result = topotest.run_and_expect(func, None, count=10, wait=0.5)
assert result is None, 'Failed'
check("r1", "show bgp ipv6 vpn json", "r1/vpnv6_rib.json")
check("r2", "show bgp ipv6 vpn json", "r2/vpnv6_rib.json")
check("r1", "show ipv6 route vrf vrf10 json", "r1/vrf10_rib.json")
check("r1", "show ipv6 route vrf vrf20 json", "r1/vrf20_rib.json")
check("r2", "show ipv6 route vrf vrf10 json", "r2/vrf10_rib.json")
check("r2", "show ipv6 route vrf vrf20 json", "r2/vrf20_rib.json")
check("ce1", "show ipv6 route json", "ce1/ipv6_rib.json")
check("ce2", "show ipv6 route json", "ce2/ipv6_rib.json")
check("ce3", "show ipv6 route json", "ce3/ipv6_rib.json")
check("ce4", "show ipv6 route json", "ce4/ipv6_rib.json")
check("ce5", "show ipv6 route json", "ce5/ipv6_rib.json")
check("ce6", "show ipv6 route json", "ce6/ipv6_rib.json")
def test_ping():
def _check(name, dest_addr, match):
tgen = get_topogen()
output = tgen.gears[name].run("ping6 {} -c 1 -w 1".format(dest_addr))
logger.info(output)
assert match in output, "ping fail"
def check(name, dest_addr, match):
logger.info("[+] check {} {} {}".format(name, dest_addr, match))
tgen = get_topogen()
func = functools.partial(_check, name, dest_addr, match)
success, result = topotest.run_and_expect(func, None, count=10, wait=0.5)
assert result is None, 'Failed'
check("ce1", "2001:2::2", " 0% packet loss")
check("ce1", "2001:3::2", " 0% packet loss")
check("ce1", "2001:4::2", " 100% packet loss")
check("ce1", "2001:5::2", " 100% packet loss")
check("ce1", "2001:6::2", " 100% packet loss")
check("ce4", "2001:1::2", " 100% packet loss")
check("ce4", "2001:2::2", " 100% packet loss")
check("ce4", "2001:3::2", " 100% packet loss")
check("ce4", "2001:5::2", " 0% packet loss")
check("ce4", "2001:6::2", " 0% packet loss")
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))

View File

View File

@ -0,0 +1 @@
[]

View File

@ -0,0 +1,8 @@
[
{
"name": "loc1",
"chunks": [
"2001:db8:1:1::/64"
]
}
]

View File

@ -0,0 +1 @@
[]

View File

@ -0,0 +1,6 @@
[
{
"name": "loc3",
"chunks": []
}
]

View File

@ -0,0 +1,8 @@
[
{
"name": "loc3",
"chunks": [
"2001:db8:3:3::/64"
]
}
]

View File

@ -0,0 +1,29 @@
{
"2001:db8:1:1:1::/80":[
{
"prefix":"2001:db8:1:1:1::/80",
"protocol":"static",
"selected":true,
"installed":true,
"nexthops":[{
"fib":true,
"active":true,
"seg6local":{ "action":"End" }
}]
}
],
"2001:db8:2:2:1::/80":[
{
"prefix":"2001:db8:2:2:1::/80",
"protocol":"static",
"selected":true,
"installed":true,
"nexthops":[{
"fib":true,
"active":true,
"seg6local":{ "action":"End" }
}]
}
]
}

View File

@ -0,0 +1,26 @@
{
"locators":[
{
"name": "loc1",
"prefix": "2001:db8:1:1::/64",
"statusUp": true,
"chunks": [
{
"prefix": "2001:db8:1:1::/64",
"proto": "system"
}
]
},
{
"name": "loc2",
"prefix": "2001:db8:2:2::/64",
"statusUp": true,
"chunks": [
{
"prefix": "2001:db8:2:2::/64",
"proto": "system"
}
]
}
]
}

View File

@ -0,0 +1,26 @@
{
"locators":[
{
"name": "loc1",
"prefix": "2001:db8:1:1::/64",
"statusUp": true,
"chunks": [
{
"prefix": "2001:db8:1:1::/64",
"proto": "sharp"
}
]
},
{
"name": "loc2",
"prefix": "2001:db8:2:2::/64",
"statusUp": true,
"chunks": [
{
"prefix": "2001:db8:2:2::/64",
"proto": "system"
}
]
}
]
}

View File

@ -0,0 +1,26 @@
{
"locators":[
{
"name": "loc1",
"prefix": "2001:db8:1:1::/64",
"statusUp": true,
"chunks": [
{
"prefix": "2001:db8:1:1::/64",
"proto": "system"
}
]
},
{
"name": "loc2",
"prefix": "2001:db8:2:2::/64",
"statusUp": true,
"chunks": [
{
"prefix": "2001:db8:2:2::/64",
"proto": "system"
}
]
}
]
}

View File

@ -0,0 +1,36 @@
{
"locators":[
{
"name": "loc1",
"prefix": "2001:db8:1:1::/64",
"statusUp": true,
"chunks": [
{
"prefix": "2001:db8:1:1::/64",
"proto": "system"
}
]
},
{
"name": "loc2",
"prefix": "2001:db8:2:2::/64",
"statusUp": true,
"chunks": [
{
"prefix": "2001:db8:2:2::/64",
"proto": "system"
}
]
},
{
"name":"loc3",
"statusUp":false,
"chunks":[
{
"proto":"sharp"
}
]
}
]
}

View File

@ -0,0 +1,38 @@
{
"locators":[
{
"name": "loc1",
"prefix": "2001:db8:1:1::/64",
"statusUp": true,
"chunks": [
{
"prefix": "2001:db8:1:1::/64",
"proto": "system"
}
]
},
{
"name": "loc2",
"prefix": "2001:db8:2:2::/64",
"statusUp": true,
"chunks": [
{
"prefix": "2001:db8:2:2::/64",
"proto": "system"
}
]
},
{
"name": "loc3",
"prefix": "2001:db8:3:3::/64",
"statusUp": true,
"chunks":[
{
"prefix": "2001:db8:3:3::/64",
"proto": "sharp"
}
]
}
]
}

View File

@ -0,0 +1,2 @@
ip link add dummy0 type dummy
ip link set dummy0 up

View File

@ -0,0 +1,7 @@
hostname r1
!
log stdout notifications
log monitor notifications
log commands
log file sharpd.log debugging
!

View File

@ -0,0 +1,22 @@
hostname r1
!
debug zebra events
debug zebra rib detailed
!
log stdout notifications
log monitor notifications
log commands
log file zebra.log debugging
!
segment-routing
srv6
locators
locator loc1
prefix 2001:db8:1:1::/64
!
locator loc2
prefix 2001:db8:2:2::/64
!
!
!
!

View File

@ -0,0 +1,142 @@
#!/usr/bin/env python
#
# test_srv6_manager.py
# Part of NetDEF Topology Tests
#
# Copyright (c) 2020 by
# LINE Corporation, Hiroki Shirokura <slank.dev@gmail.com>
#
# Permission to use, copy, modify, and/or distribute this software
# for any purpose with or without fee is hereby granted, provided
# that the above copyright notice and this permission notice appear
# in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
#
"""
test_srv6_manager.py:
Test for SRv6 manager on zebra
"""
import os
import sys
import json
import time
import pytest
import functools
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CWD, '../'))
# pylint: disable=C0413
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
from mininet.topo import Topo
def open_json_file(filename):
try:
with open(filename, "r") as f:
return json.load(f)
except IOError:
assert False, "Could not read file {}".format(filename)
class TemplateTopo(Topo):
def build(self, *_args, **_opts):
tgen = get_topogen(self)
tgen.add_router('r1')
def setup_module(mod):
tgen = Topogen(TemplateTopo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
for rname, router in tgen.routers().items():
router.run("/bin/bash {}/{}/setup.sh".format(CWD, rname))
router.load_config(TopoRouter.RD_ZEBRA, os.path.join(CWD, '{}/zebra.conf'.format(rname)))
router.load_config(TopoRouter.RD_BGP, os.path.join(CWD, '{}/bgpd.conf'.format(rname)))
router.load_config(TopoRouter.RD_SHARP, os.path.join(CWD, '{}/sharpd.conf'.format(rname)))
tgen.start_router()
def teardown_module(mod):
tgen = get_topogen()
tgen.stop_topology()
def test_srv6():
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
router = tgen.gears['r1']
def _check_srv6_locator(router, expected_locator_file):
logger.info("checking zebra locator status")
output = json.loads(router.vtysh_cmd("show segment-routing srv6 locator json"))
expected = open_json_file("{}/{}".format(CWD, expected_locator_file))
return topotest.json_cmp(output, expected)
def _check_sharpd_chunk(router, expected_chunk_file):
logger.info("checking sharpd locator chunk status")
output = json.loads(router.vtysh_cmd("show sharp segment-routing srv6 json"))
expected = open_json_file("{}/{}".format(CWD, expected_chunk_file))
return topotest.json_cmp(output, expected)
def check_srv6_locator(router, expected_file):
func = functools.partial(_check_srv6_locator, router, expected_file)
success, result = topotest.run_and_expect(func, None, count=5, wait=0.5)
assert result is None, 'Failed'
def check_sharpd_chunk(router, expected_file):
func = functools.partial(_check_sharpd_chunk, router, expected_file)
success, result = topotest.run_and_expect(func, None, count=5, wait=0.5)
assert result is None, 'Failed'
logger.info("Test1 for Locator Configuration")
check_srv6_locator(router, "expected_locators1.json")
check_sharpd_chunk(router, "expected_chunks1.json")
logger.info("Test2 get chunk for locator loc1")
router.vtysh_cmd("sharp srv6-manager get-locator-chunk loc1")
check_srv6_locator(router, "expected_locators2.json")
check_sharpd_chunk(router, "expected_chunks2.json")
logger.info("Test3 release chunk for locator loc1")
router.vtysh_cmd("sharp srv6-manager release-locator-chunk loc1")
check_srv6_locator(router, "expected_locators3.json")
check_sharpd_chunk(router, "expected_chunks3.json")
logger.info("Test4 get chunk for non-exist locator by zclient")
router.vtysh_cmd("sharp srv6-manager get-locator-chunk loc3")
check_srv6_locator(router, "expected_locators4.json")
check_sharpd_chunk(router, "expected_chunks4.json")
logger.info("Test5 Test for Zclient. after locator loc3 was configured")
router.vtysh_cmd(
"""
configure terminal
segment-routing
srv6
locators
locator loc3
prefix 2001:db8:3:3::/64
"""
)
check_srv6_locator(router, "expected_locators5.json")
check_sharpd_chunk(router, "expected_chunks5.json")
if __name__ == '__main__':
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))

View File

@ -0,0 +1,25 @@
[
{
"in": {
"dest": "1::1",
"nh": "2001::1",
"sid": "a::"
},
"out":[{
"prefix":"1::1/128",
"protocol":"sharp",
"selected":true,
"destSelected":true,
"distance":150,
"metric":0,
"installed":true,
"table":254,
"nexthops":[{
"flags":3,
"fib":true,
"active":true,
"seg6": { "segs": "a::" }
}]
}]
}
]

View File

@ -0,0 +1,5 @@
ip link add vrf10 type vrf table 10
ip link set vrf10 up
ip link add dum0 type dummy
ip link set dum0 up
sysctl -w net.ipv6.conf.dum0.disable_ipv6=0

View File

@ -0,0 +1,13 @@
log file zebra.log
!
log stdout notifications
log monitor notifications
log commands
!
debug zebra packet
debug zebra dplane
debug zebra kernel msgdump
!
interface dum0
ipv6 address 2001::1/64
!

View File

@ -0,0 +1,109 @@
#!/usr/bin/env python
#
# test_zebra_seg6_route.py
#
# Copyright (c) 2020 by
# LINE Corporation, Hiroki Shirokura <slank.dev@gmail.com>
#
# Permission to use, copy, modify, and/or distribute this software
# for any purpose with or without fee is hereby granted, provided
# that the above copyright notice and this permission notice appear
# in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
#
"""
test_zebra_seg6_route.py: Test seg6 route addition with zapi.
"""
import os
import re
import sys
import pytest
import json
import platform
from functools import partial
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
from lib.common_config import shutdown_bringup_interface
from mininet.topo import Topo
def open_json_file(filename):
try:
with open(filename, "r") as f:
return json.load(f)
except IOError:
assert False, "Could not read file {}".format(filename)
class TemplateTopo(Topo):
def build(self, **_opts):
tgen = get_topogen(self)
tgen.add_router("r1")
def setup_module(mod):
tgen = Topogen(TemplateTopo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
for rname, router in tgen.routers().items():
router.run("/bin/bash {}".format(os.path.join(CWD, "{}/setup.sh".format(rname))))
router.load_config(TopoRouter.RD_ZEBRA, os.path.join(CWD, '{}/zebra.conf'.format(rname)))
router.load_config(TopoRouter.RD_SHARP, os.path.join(CWD, "{}/sharpd.conf".format(rname)))
tgen.start_router()
def teardown_module(_mod):
tgen = get_topogen()
tgen.stop_topology()
def test_zebra_seg6local_routes():
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
logger.info("Test for seg6local route install via ZAPI was start.")
r1 = tgen.gears["r1"]
def check(router, dest, nh, sid, expected):
router.vtysh_cmd("sharp install seg6-routes {} "\
"nexthop-seg6 {} encap {} 1".format(dest, nh, sid))
output = json.loads(router.vtysh_cmd("show ipv6 route {} json".format(dest)))
output = output.get('{}/128'.format(dest))
if output is None:
return False
return topotest.json_cmp(output, expected)
manifests = open_json_file(os.path.join(CWD, "{}/routes.json".format("r1")))
for manifest in manifests:
logger.info("CHECK {} {} {}".format(manifest['in']['dest'],
manifest['in']['nh'],
manifest['in']['sid']))
test_func = partial(check, r1,
manifest['in']['dest'],
manifest['in']['nh'],
manifest['in']['sid'],
manifest['out'])
success, result = topotest.run_and_expect(test_func, None, count=5, wait=1)
assert result is None, 'Failed'
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))

View File

@ -0,0 +1,98 @@
[
{
"in": {
"dest": "1::1",
"context": "End"
},
"out":[{
"prefix":"1::1/128",
"protocol":"sharp",
"selected":true,
"destSelected":true,
"distance":150,
"metric":0,
"installed":true,
"table":254,
"nexthops":[{
"flags":3,
"fib":true,
"active":true,
"directlyConnected":true,
"interfaceName": "dum0",
"seg6local": { "action": "End" }
}]
}]
},
{
"in": {
"dest": "2::1",
"context": "End_X 2001::1"
},
"out":[{
"prefix":"2::1/128",
"protocol":"sharp",
"selected":true,
"destSelected":true,
"distance":150,
"metric":0,
"installed":true,
"table":254,
"nexthops":[{
"flags":3,
"fib":true,
"active":true,
"directlyConnected":true,
"interfaceName": "dum0",
"seg6local": { "action": "End.X" }
}]
}]
},
{
"in": {
"dest": "3::1",
"context": "End_T 10"
},
"out":[{
"prefix":"3::1/128",
"protocol":"sharp",
"selected":true,
"destSelected":true,
"distance":150,
"metric":0,
"installed":true,
"table":254,
"nexthops":[{
"flags":3,
"fib":true,
"active":true,
"directlyConnected":true,
"interfaceName": "dum0",
"seg6local": { "action": "End.T" }
}]
}]
},
{
"in": {
"dest": "4::1",
"context": "End_DX4 10.0.0.1"
},
"out":[{
"prefix":"4::1/128",
"protocol":"sharp",
"selected":true,
"destSelected":true,
"distance":150,
"metric":0,
"installed":true,
"table":254,
"nexthops":[{
"flags":3,
"fib":true,
"active":true,
"directlyConnected":true,
"interfaceName": "dum0",
"seg6local": { "action": "End.DX4" }
}]
}]
}
]

View File

@ -0,0 +1,3 @@
ip link add dum0 type dummy
ip link set dum0 up
sysctl -w net.ipv6.conf.dum0.disable_ipv6=0

View File

@ -0,0 +1,9 @@
log file zebra.log
!
log stdout notifications
log monitor notifications
log commands
!
debug zebra packet
debug zebra dplane
debug zebra kernel msgdump

View File

@ -0,0 +1,107 @@
#!/usr/bin/env python
#
# test_zebra_seg6local_route.py
#
# Copyright (c) 2020 by
# LINE Corporation, Hiroki Shirokura <slank.dev@gmail.com>
#
# Permission to use, copy, modify, and/or distribute this software
# for any purpose with or without fee is hereby granted, provided
# that the above copyright notice and this permission notice appear
# in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
#
"""
test_zebra_seg6local_route.py: Test seg6local route addition with zapi.
"""
import os
import re
import sys
import pytest
import json
import platform
from functools import partial
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
from lib.common_config import shutdown_bringup_interface
from mininet.topo import Topo
def open_json_file(filename):
try:
with open(filename, "r") as f:
return json.load(f)
except IOError:
assert False, "Could not read file {}".format(filename)
class TemplateTopo(Topo):
def build(self, **_opts):
tgen = get_topogen(self)
tgen.add_router("r1")
def setup_module(mod):
tgen = Topogen(TemplateTopo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
for rname, router in tgen.routers().items():
router.run("/bin/bash {}".format(os.path.join(CWD, "{}/setup.sh".format(rname))))
router.load_config(TopoRouter.RD_ZEBRA, os.path.join(CWD, '{}/zebra.conf'.format(rname)))
router.load_config(TopoRouter.RD_SHARP, os.path.join(CWD, "{}/sharpd.conf".format(rname)))
tgen.start_router()
def teardown_module(_mod):
tgen = get_topogen()
tgen.stop_topology()
def test_zebra_seg6local_routes():
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
logger.info("Test for seg6local route install via ZAPI was start.")
r1 = tgen.gears["r1"]
def check(router, dest, context, expected):
router.vtysh_cmd("sharp install seg6local-routes {} "\
"nexthop-seg6local dum0 {} 1".format(dest, context))
output = json.loads(router.vtysh_cmd("show ipv6 route {} json".format(dest)))
output = output.get('{}/128'.format(dest))
if output is None:
return False
return topotest.json_cmp(output, expected)
manifests = open_json_file(os.path.join(CWD, "{}/routes.json".format("r1")))
for manifest in manifests:
logger.info("CHECK {} {}".format(manifest['in']['dest'],
manifest['in']['context']))
test_func = partial(check, r1,
manifest['in']['dest'],
manifest['in']['context'],
manifest['out'])
success, result = topotest.run_and_expect(test_func, None, count=5, wait=1)
assert result is None, 'Failed'
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))

View File

@ -1349,6 +1349,27 @@ static struct cmd_node rmap_node = {
.prompt = "%s(config-route-map)# ",
};
static struct cmd_node srv6_node = {
.name = "srv6",
.node = SRV6_NODE,
.parent_node = SEGMENT_ROUTING_NODE,
.prompt = "%s(config-srv6)# ",
};
static struct cmd_node srv6_locs_node = {
.name = "srv6-locators",
.node = SRV6_LOCS_NODE,
.parent_node = SRV6_NODE,
.prompt = "%s(config-srv6-locators)# ",
};
static struct cmd_node srv6_loc_node = {
.name = "srv6-locator",
.node = SRV6_LOC_NODE,
.parent_node = SRV6_LOCS_NODE,
.prompt = "%s(config-srv6-locator)# ",
};
#ifdef HAVE_PBRD
static struct cmd_node pbr_map_node = {
.name = "pbr-map",
@ -1486,6 +1507,13 @@ static struct cmd_node bmp_node = {
.parent_node = BGP_NODE,
.prompt = "%s(config-bgp-bmp)# "
};
static struct cmd_node bgp_srv6_node = {
.name = "bgp srv6",
.node = BGP_SRV6_NODE,
.parent_node = BGP_NODE,
.prompt = "%s(config-router-srv6)# ",
};
#endif /* HAVE_BGPD */
#ifdef HAVE_OSPFD
@ -1659,6 +1687,31 @@ DEFUNSH(VTYSH_REALLYALL, vtysh_end_all, vtysh_end_all_cmd, "end",
return vtysh_end();
}
DEFUNSH(VTYSH_SR, srv6, srv6_cmd,
"srv6",
"Segment-Routing SRv6 configration\n")
{
vty->node = SRV6_NODE;
return CMD_SUCCESS;
}
DEFUNSH(VTYSH_SR, srv6_locators, srv6_locators_cmd,
"locators",
"Segment-Routing SRv6 locators configration\n")
{
vty->node = SRV6_LOCS_NODE;
return CMD_SUCCESS;
}
DEFUNSH(VTYSH_SR, srv6_locator, srv6_locator_cmd,
"locator WORD",
"Segment Routing SRv6 locator\n"
"Specify locator-name\n")
{
vty->node = SRV6_LOC_NODE;
return CMD_SUCCESS;
}
#ifdef HAVE_BGPD
DEFUNSH(VTYSH_BGPD, router_bgp, router_bgp_cmd,
"router bgp [(1-4294967295) [<view|vrf> WORD]]",
@ -1816,6 +1869,39 @@ DEFUNSH(VTYSH_BGPD,
return CMD_SUCCESS;
}
DEFUNSH(VTYSH_BGPD,
bgp_srv6,
bgp_srv6_cmd,
"segment-routing srv6",
"Segment-Routing configuration\n"
"Segment-Routing SRv6 configuration\n")
{
vty->node = BGP_SRV6_NODE;
return CMD_SUCCESS;
}
DEFUNSH(VTYSH_BGPD,
exit_bgp_srv6,
exit_bgp_srv6_cmd,
"exit",
"exit Segment-Routing SRv6 configuration\n")
{
if (vty->node == BGP_SRV6_NODE)
vty->node = BGP_NODE;
return CMD_SUCCESS;
}
DEFUNSH(VTYSH_BGPD,
quit_bgp_srv6,
quit_bgp_srv6_cmd,
"quit",
"quit Segment-Routing SRv6 configuration\n")
{
if (vty->node == BGP_SRV6_NODE)
vty->node = BGP_NODE;
return CMD_SUCCESS;
}
DEFUNSH(VTYSH_BGPD, address_family_evpn, address_family_evpn_cmd,
"address-family <l2vpn evpn>",
"Enter Address Family command mode\n"
@ -2084,7 +2170,7 @@ DEFUNSH(VTYSH_FABRICD, router_openfabric, router_openfabric_cmd, "router openfab
#endif /* HAVE_FABRICD */
#if defined(HAVE_PATHD)
DEFUNSH(VTYSH_PATHD, segment_routing, segment_routing_cmd,
DEFUNSH(VTYSH_SR, segment_routing, segment_routing_cmd,
"segment-routing",
"Configure segment routing\n")
{
@ -2366,6 +2452,30 @@ DEFUNSH(VTYSH_VRF, exit_vrf_config, exit_vrf_config_cmd, "exit-vrf",
return CMD_SUCCESS;
}
DEFUNSH(VTYSH_SR, exit_srv6_config, exit_srv6_config_cmd, "exit",
"Exit from SRv6 configuration mode\n")
{
if (vty->node == SRV6_NODE)
vty->node = SEGMENT_ROUTING_NODE;
return CMD_SUCCESS;
}
DEFUNSH(VTYSH_SR, exit_srv6_locs_config, exit_srv6_locs_config_cmd, "exit",
"Exit from SRv6-locator configuration mode\n")
{
if (vty->node == SRV6_LOCS_NODE)
vty->node = SRV6_NODE;
return CMD_SUCCESS;
}
DEFUNSH(VTYSH_SR, exit_srv6_loc_config, exit_srv6_loc_config_cmd, "exit",
"Exit from SRv6-locators configuration mode\n")
{
if (vty->node == SRV6_LOC_NODE)
vty->node = SRV6_LOCS_NODE;
return CMD_SUCCESS;
}
#ifdef HAVE_RIPD
DEFUNSH(VTYSH_RIPD, vtysh_exit_ripd, vtysh_exit_ripd_cmd, "exit",
"Exit current mode and down to previous mode\n")
@ -4131,6 +4241,12 @@ void vtysh_init_vty(void)
install_element(BMP_NODE, &bmp_exit_cmd);
install_element(BMP_NODE, &bmp_quit_cmd);
install_element(BMP_NODE, &vtysh_end_all_cmd);
install_node(&bgp_srv6_node);
install_element(BGP_NODE, &bgp_srv6_cmd);
install_element(BGP_SRV6_NODE, &exit_bgp_srv6_cmd);
install_element(BGP_SRV6_NODE, &quit_bgp_srv6_cmd);
install_element(BGP_SRV6_NODE, &vtysh_end_all_cmd);
#endif /* HAVE_BGPD */
/* ripd */
@ -4431,6 +4547,22 @@ void vtysh_init_vty(void)
install_element(CONFIG_NODE, &vtysh_end_all_cmd);
install_element(ENABLE_NODE, &vtysh_end_all_cmd);
/* SRv6 Data-plane */
install_node(&srv6_node);
install_element(SEGMENT_ROUTING_NODE, &srv6_cmd);
install_element(SRV6_NODE, &srv6_locators_cmd);
install_element(SRV6_NODE, &exit_srv6_config_cmd);
install_element(SRV6_NODE, &vtysh_end_all_cmd);
install_node(&srv6_locs_node);
install_element(SRV6_LOCS_NODE, &srv6_locator_cmd);
install_element(SRV6_LOCS_NODE, &exit_srv6_locs_config_cmd);
install_element(SRV6_LOCS_NODE, &vtysh_end_all_cmd);
install_node(&srv6_loc_node);
install_element(SRV6_LOC_NODE, &exit_srv6_loc_config_cmd);
install_element(SRV6_LOC_NODE, &vtysh_end_all_cmd);
install_element(ENABLE_NODE, &vtysh_show_running_config_cmd);
install_element(ENABLE_NODE, &vtysh_copy_running_config_cmd);
install_element(ENABLE_NODE, &vtysh_copy_to_running_cmd);

View File

@ -60,6 +60,7 @@ DECLARE_MGROUP(MVTYSH);
#define VTYSH_KEYS VTYSH_RIPD|VTYSH_EIGRPD
/* Daemons who can process nexthop-group configs */
#define VTYSH_NH_GROUP VTYSH_PBRD|VTYSH_SHARPD
#define VTYSH_SR VTYSH_ZEBRA|VTYSH_PATHD
enum vtysh_write_integrated {
WRITE_INTEGRATED_UNSPECIFIED,

View File

@ -430,6 +430,10 @@ void vtysh_config_parse_line(void *arg, const char *line)
config = config_get(PROTOCOL_NODE, line);
else if (strncmp(line, "mpls", strlen("mpls")) == 0)
config = config_get(MPLS_NODE, line);
else if (strncmp(line, "segment-routing",
strlen("segment-routing"))
== 0)
config = config_get(SEGMENT_ROUTING_NODE, line);
else if (strncmp(line, "bfd", strlen("bfd")) == 0)
config = config_get(BFD_NODE, line);
else {

View File

@ -57,6 +57,8 @@
#include "zebra/zebra_nb.h"
#include "zebra/zebra_opaque.h"
#include "zebra/zebra_srte.h"
#include "zebra/zebra_srv6.h"
#include "zebra/zebra_srv6_vty.h"
#define ZEBRA_PTM_SUPPORT
@ -418,6 +420,8 @@ int main(int argc, char **argv)
zebra_pbr_init();
zebra_opaque_init();
zebra_srte_init();
zebra_srv6_init();
zebra_srv6_vty_init();
/* For debug purpose. */
/* SET_FLAG (zebra_debug_event, ZEBRA_DEBUG_EVENT); */

View File

@ -22,9 +22,18 @@
#ifdef HAVE_NETLINK
/* The following definition is to workaround an issue in the Linux kernel
* header files with redefinition of 'struct in6_addr' in both
* netinet/in.h and linux/in6.h.
* Reference - https://sourceware.org/ml/libc-alpha/2013-01/msg00599.html
*/
#define _LINUX_IN6_H
#include <net/if_arp.h>
#include <linux/lwtunnel.h>
#include <linux/mpls_iptunnel.h>
#include <linux/seg6_iptunnel.h>
#include <linux/seg6_local.h>
#include <linux/neighbour.h>
#include <linux/rtnetlink.h>
#include <linux/nexthop.h>
@ -38,6 +47,8 @@
#include "if.h"
#include "log.h"
#include "prefix.h"
#include "plist.h"
#include "plist_int.h"
#include "connected.h"
#include "table.h"
#include "memory.h"
@ -404,6 +415,55 @@ static int parse_encap_mpls(struct rtattr *tb, mpls_label_t *labels)
return num_labels;
}
static enum seg6local_action_t
parse_encap_seg6local(struct rtattr *tb,
struct seg6local_context *ctx)
{
struct rtattr *tb_encap[256] = {};
enum seg6local_action_t act = ZEBRA_SEG6_LOCAL_ACTION_UNSPEC;
netlink_parse_rtattr_nested(tb_encap, 256, tb);
if (tb_encap[SEG6_LOCAL_ACTION])
act = *(uint32_t *)RTA_DATA(tb_encap[SEG6_LOCAL_ACTION]);
if (tb_encap[SEG6_LOCAL_NH4])
ctx->nh4 = *(struct in_addr *)RTA_DATA(
tb_encap[SEG6_LOCAL_NH4]);
if (tb_encap[SEG6_LOCAL_NH6])
ctx->nh6 = *(struct in6_addr *)RTA_DATA(
tb_encap[SEG6_LOCAL_NH6]);
if (tb_encap[SEG6_LOCAL_TABLE])
ctx->table = *(uint32_t *)RTA_DATA(tb_encap[SEG6_LOCAL_TABLE]);
return act;
}
static int parse_encap_seg6(struct rtattr *tb, struct in6_addr *segs)
{
struct rtattr *tb_encap[256] = {};
struct seg6_iptunnel_encap *ipt = NULL;
struct in6_addr *segments = NULL;
netlink_parse_rtattr_nested(tb_encap, 256, tb);
/*
* TODO: It's not support multiple SID list.
*/
if (tb_encap[SEG6_IPTUNNEL_SRH]) {
ipt = (struct seg6_iptunnel_encap *)
RTA_DATA(tb_encap[SEG6_IPTUNNEL_SRH]);
segments = ipt->srh[0].segments;
*segs = segments[0];
return 1;
}
return 0;
}
static struct nexthop
parse_nexthop_unicast(ns_id_t ns_id, struct rtmsg *rtm, struct rtattr **tb,
enum blackhole_type bh_type, int index, void *prefsrc,
@ -413,6 +473,10 @@ parse_nexthop_unicast(ns_id_t ns_id, struct rtmsg *rtm, struct rtattr **tb,
struct nexthop nh = {0};
mpls_label_t labels[MPLS_MAX_LABELS] = {0};
int num_labels = 0;
enum seg6local_action_t seg6l_act = ZEBRA_SEG6_LOCAL_ACTION_UNSPEC;
struct seg6local_context seg6l_ctx = {};
struct in6_addr seg6_segs = {};
int num_segs = 0;
vrf_id_t nh_vrf_id = vrf_id;
size_t sz = (afi == AFI_IP) ? 4 : 16;
@ -452,6 +516,16 @@ parse_nexthop_unicast(ns_id_t ns_id, struct rtmsg *rtm, struct rtattr **tb,
== LWTUNNEL_ENCAP_MPLS) {
num_labels = parse_encap_mpls(tb[RTA_ENCAP], labels);
}
if (tb[RTA_ENCAP] && tb[RTA_ENCAP_TYPE]
&& *(uint16_t *)RTA_DATA(tb[RTA_ENCAP_TYPE])
== LWTUNNEL_ENCAP_SEG6_LOCAL) {
seg6l_act = parse_encap_seg6local(tb[RTA_ENCAP], &seg6l_ctx);
}
if (tb[RTA_ENCAP] && tb[RTA_ENCAP_TYPE]
&& *(uint16_t *)RTA_DATA(tb[RTA_ENCAP_TYPE])
== LWTUNNEL_ENCAP_SEG6) {
num_segs = parse_encap_seg6(tb[RTA_ENCAP], &seg6_segs);
}
if (rtm->rtm_flags & RTNH_F_ONLINK)
SET_FLAG(nh.flags, NEXTHOP_FLAG_ONLINK);
@ -459,6 +533,12 @@ parse_nexthop_unicast(ns_id_t ns_id, struct rtmsg *rtm, struct rtattr **tb,
if (num_labels)
nexthop_add_labels(&nh, ZEBRA_LSP_STATIC, num_labels, labels);
if (seg6l_act != ZEBRA_SEG6_LOCAL_ACTION_UNSPEC)
nexthop_add_srv6_seg6local(&nh, seg6l_act, &seg6l_ctx);
if (num_segs)
nexthop_add_srv6_seg6(&nh, &seg6_segs);
return nh;
}
@ -475,6 +555,10 @@ static uint8_t parse_multipath_nexthops_unicast(ns_id_t ns_id,
/* MPLS labels */
mpls_label_t labels[MPLS_MAX_LABELS] = {0};
int num_labels = 0;
enum seg6local_action_t seg6l_act = ZEBRA_SEG6_LOCAL_ACTION_UNSPEC;
struct seg6local_context seg6l_ctx = {};
struct in6_addr seg6_segs = {};
int num_segs = 0;
struct rtattr *rtnh_tb[RTA_MAX + 1] = {};
int len = RTA_PAYLOAD(tb[RTA_MULTIPATH]);
@ -519,6 +603,18 @@ static uint8_t parse_multipath_nexthops_unicast(ns_id_t ns_id,
num_labels = parse_encap_mpls(
rtnh_tb[RTA_ENCAP], labels);
}
if (rtnh_tb[RTA_ENCAP] && rtnh_tb[RTA_ENCAP_TYPE]
&& *(uint16_t *)RTA_DATA(rtnh_tb[RTA_ENCAP_TYPE])
== LWTUNNEL_ENCAP_SEG6_LOCAL) {
seg6l_act = parse_encap_seg6local(
rtnh_tb[RTA_ENCAP], &seg6l_ctx);
}
if (rtnh_tb[RTA_ENCAP] && rtnh_tb[RTA_ENCAP_TYPE]
&& *(uint16_t *)RTA_DATA(rtnh_tb[RTA_ENCAP_TYPE])
== LWTUNNEL_ENCAP_SEG6) {
num_segs = parse_encap_seg6(rtnh_tb[RTA_ENCAP],
&seg6_segs);
}
}
if (gate && rtm->rtm_family == AF_INET) {
@ -544,6 +640,13 @@ static uint8_t parse_multipath_nexthops_unicast(ns_id_t ns_id,
nexthop_add_labels(nh, ZEBRA_LSP_STATIC,
num_labels, labels);
if (seg6l_act != ZEBRA_SEG6_LOCAL_ACTION_UNSPEC)
nexthop_add_srv6_seg6local(nh, seg6l_act,
&seg6l_ctx);
if (num_segs)
nexthop_add_srv6_seg6(nh, &seg6_segs);
if (rtnh->rtnh_flags & RTNH_F_ONLINK)
SET_FLAG(nh->flags, NEXTHOP_FLAG_ONLINK);
@ -1227,6 +1330,40 @@ static bool _netlink_route_encode_nexthop_src(const struct nexthop *nexthop,
return true;
}
static ssize_t fill_seg6ipt_encap(char *buffer, size_t buflen,
const struct in6_addr *seg)
{
struct seg6_iptunnel_encap *ipt;
struct ipv6_sr_hdr *srh;
const size_t srhlen = 24;
/*
* Caution: Support only SINGLE-SID, not MULTI-SID
* This function only supports the case where segs represents
* a single SID. If you want to extend the SRv6 functionality,
* you should improve the Boundary Check.
* Ex. In case of set a SID-List include multiple-SIDs as an
* argument of the Transit Behavior, we must support variable
* boundary check for buflen.
*/
if (buflen < (sizeof(struct seg6_iptunnel_encap) +
sizeof(struct ipv6_sr_hdr) + 16))
return -1;
memset(buffer, 0, buflen);
ipt = (struct seg6_iptunnel_encap *)buffer;
ipt->mode = SEG6_IPTUN_MODE_ENCAP;
srh = ipt->srh;
srh->hdrlen = (srhlen >> 3) - 1;
srh->type = 4;
srh->segments_left = 0;
srh->first_segment = 0;
memcpy(&srh->segments[0], seg, sizeof(struct in6_addr));
return srhlen + 4;
}
/* This function takes a nexthop as argument and adds
* the appropriate netlink attributes to an existing
* netlink message.
@ -1262,6 +1399,99 @@ static bool _netlink_route_build_singlepath(const struct prefix *p,
sizeof(label_buf)))
return false;
if (nexthop->nh_srv6) {
if (nexthop->nh_srv6->seg6local_action !=
ZEBRA_SEG6_LOCAL_ACTION_UNSPEC) {
struct rtattr *nest;
const struct seg6local_context *ctx;
ctx = &nexthop->nh_srv6->seg6local_ctx;
if (!nl_attr_put16(nlmsg, req_size, RTA_ENCAP_TYPE,
LWTUNNEL_ENCAP_SEG6_LOCAL))
return false;
nest = nl_attr_nest(nlmsg, req_size, RTA_ENCAP);
if (!nest)
return false;
switch (nexthop->nh_srv6->seg6local_action) {
case ZEBRA_SEG6_LOCAL_ACTION_END:
if (!nl_attr_put32(nlmsg, req_size,
SEG6_LOCAL_ACTION,
SEG6_LOCAL_ACTION_END))
return false;
break;
case ZEBRA_SEG6_LOCAL_ACTION_END_X:
if (!nl_attr_put32(nlmsg, req_size,
SEG6_LOCAL_ACTION,
SEG6_LOCAL_ACTION_END_X))
return false;
if (!nl_attr_put(nlmsg, req_size,
SEG6_LOCAL_NH6, &ctx->nh6,
sizeof(struct in6_addr)))
return false;
break;
case ZEBRA_SEG6_LOCAL_ACTION_END_T:
if (!nl_attr_put32(nlmsg, req_size,
SEG6_LOCAL_ACTION,
SEG6_LOCAL_ACTION_END_T))
return false;
if (!nl_attr_put32(nlmsg, req_size,
SEG6_LOCAL_TABLE,
ctx->table))
return false;
break;
case ZEBRA_SEG6_LOCAL_ACTION_END_DX4:
if (!nl_attr_put32(nlmsg, req_size,
SEG6_LOCAL_ACTION,
SEG6_LOCAL_ACTION_END_DX4))
return false;
if (!nl_attr_put(nlmsg, req_size,
SEG6_LOCAL_NH4, &ctx->nh4,
sizeof(struct in_addr)))
return false;
break;
case ZEBRA_SEG6_LOCAL_ACTION_END_DT6:
if (!nl_attr_put32(nlmsg, req_size,
SEG6_LOCAL_ACTION,
SEG6_LOCAL_ACTION_END_DT6))
return false;
if (!nl_attr_put32(nlmsg, req_size,
SEG6_LOCAL_TABLE,
ctx->table))
return false;
break;
default:
zlog_err("%s: unsupport seg6local behaviour action=%u",
__func__,
nexthop->nh_srv6->seg6local_action);
return false;
}
nl_attr_nest_end(nlmsg, nest);
}
if (!sid_zero(&nexthop->nh_srv6->seg6_segs)) {
char tun_buf[4096];
ssize_t tun_len;
struct rtattr *nest;
if (!nl_attr_put16(nlmsg, req_size, RTA_ENCAP_TYPE,
LWTUNNEL_ENCAP_SEG6))
return false;
nest = nl_attr_nest(nlmsg, req_size, RTA_ENCAP);
if (!nest)
return false;
tun_len = fill_seg6ipt_encap(tun_buf, sizeof(tun_buf),
&nexthop->nh_srv6->seg6_segs);
if (tun_len < 0)
return false;
if (!nl_attr_put(nlmsg, req_size, SEG6_IPTUNNEL_SRH,
tun_buf, tun_len))
return false;
nl_attr_nest_end(nlmsg, nest);
}
}
if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK))
rtmsg->rtm_flags |= RTNH_F_ONLINK;
@ -2256,6 +2486,119 @@ ssize_t netlink_nexthop_msg_encode(uint16_t cmd,
nl_attr_nest_end(&req->n, nest);
}
if (nh->nh_srv6) {
if (nh->nh_srv6->seg6local_action !=
ZEBRA_SEG6_LOCAL_ACTION_UNSPEC) {
uint32_t action;
uint16_t encap;
struct rtattr *nest;
const struct seg6local_context *ctx;
req->nhm.nh_family = AF_INET6;
action = nh->nh_srv6->seg6local_action;
ctx = &nh->nh_srv6->seg6local_ctx;
encap = LWTUNNEL_ENCAP_SEG6_LOCAL;
if (!nl_attr_put(&req->n, buflen,
NHA_ENCAP_TYPE,
&encap,
sizeof(uint16_t)))
return 0;
nest = nl_attr_nest(&req->n, buflen,
NHA_ENCAP | NLA_F_NESTED);
if (!nest)
return 0;
switch (action) {
case SEG6_LOCAL_ACTION_END:
if (!nl_attr_put32(
&req->n, buflen,
SEG6_LOCAL_ACTION,
SEG6_LOCAL_ACTION_END))
return 0;
break;
case SEG6_LOCAL_ACTION_END_X:
if (!nl_attr_put32(
&req->n, buflen,
SEG6_LOCAL_ACTION,
SEG6_LOCAL_ACTION_END_X))
return 0;
if (!nl_attr_put(
&req->n, buflen,
SEG6_LOCAL_NH6, &ctx->nh6,
sizeof(struct in6_addr)))
return 0;
break;
case SEG6_LOCAL_ACTION_END_T:
if (!nl_attr_put32(
&req->n, buflen,
SEG6_LOCAL_ACTION,
SEG6_LOCAL_ACTION_END_T))
return 0;
if (!nl_attr_put32(
&req->n, buflen,
SEG6_LOCAL_TABLE,
ctx->table))
return 0;
break;
case SEG6_LOCAL_ACTION_END_DX4:
if (!nl_attr_put32(
&req->n, buflen,
SEG6_LOCAL_ACTION,
SEG6_LOCAL_ACTION_END_DX4))
return 0;
if (!nl_attr_put(
&req->n, buflen,
SEG6_LOCAL_NH4, &ctx->nh4,
sizeof(struct in_addr)))
return 0;
break;
case SEG6_LOCAL_ACTION_END_DT6:
if (!nl_attr_put32(
&req->n, buflen,
SEG6_LOCAL_ACTION,
SEG6_LOCAL_ACTION_END_DT6))
return 0;
if (!nl_attr_put32(
&req->n, buflen,
SEG6_LOCAL_TABLE,
ctx->table))
return 0;
break;
default:
zlog_err("%s: unsupport seg6local behaviour action=%u",
__func__, action);
return 0;
}
nl_attr_nest_end(&req->n, nest);
}
if (!sid_zero(&nh->nh_srv6->seg6_segs)) {
char tun_buf[4096];
ssize_t tun_len;
struct rtattr *nest;
if (!nl_attr_put16(&req->n, buflen,
NHA_ENCAP_TYPE,
LWTUNNEL_ENCAP_SEG6))
return 0;
nest = nl_attr_nest(&req->n, buflen,
NHA_ENCAP | NLA_F_NESTED);
if (!nest)
return 0;
tun_len = fill_seg6ipt_encap(tun_buf,
sizeof(tun_buf),
&nh->nh_srv6->seg6_segs);
if (tun_len < 0)
return 0;
if (!nl_attr_put(&req->n, buflen,
SEG6_IPTUNNEL_SRH,
tun_buf, tun_len))
return 0;
nl_attr_nest_end(&req->n, nest);
}
}
nexthop_done:
if (IS_ZEBRA_DEBUG_KERNEL)

View File

@ -13,6 +13,7 @@ vtysh_scan += \
zebra/zebra_mlag_vty.c \
zebra/zebra_evpn_mh.c \
zebra/zebra_mpls_vty.c \
zebra/zebra_srv6_vty.c \
zebra/zebra_ptm.c \
zebra/zebra_pw.c \
zebra/zebra_routemap.c \
@ -92,6 +93,8 @@ zebra_zebra_SOURCES = \
zebra/zebra_mpls_openbsd.c \
zebra/zebra_mpls_null.c \
zebra/zebra_mpls_vty.c \
zebra/zebra_srv6.c \
zebra/zebra_srv6_vty.c \
zebra/zebra_mroute.c \
zebra/zebra_nb.c \
zebra/zebra_nb_config.c \
@ -128,6 +131,7 @@ clippy_scan += \
zebra/zebra_mlag_vty.c \
zebra/zebra_routemap.c \
zebra/zebra_vty.c \
zebra/zebra_srv6_vty.c \
# end
noinst_HEADERS += \
@ -161,6 +165,8 @@ noinst_HEADERS += \
zebra/zebra_mlag.h \
zebra/zebra_mlag_vty.h \
zebra/zebra_mpls.h \
zebra/zebra_srv6.h \
zebra/zebra_srv6_vty.h \
zebra/zebra_mroute.h \
zebra/zebra_nb.h \
zebra/zebra_netns_id.h \

View File

@ -60,6 +60,7 @@
#include "zebra/connected.h"
#include "zebra/zebra_opaque.h"
#include "zebra/zebra_srte.h"
#include "zebra/zebra_srv6.h"
DEFINE_MTYPE_STATIC(ZEBRA, OPAQUE, "Opaque Data");
@ -1747,6 +1748,27 @@ static bool zapi_read_nexthops(struct zserv *client, struct prefix *p,
&api_nh->labels[0]);
}
if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_SEG6LOCAL)
&& api_nh->type != NEXTHOP_TYPE_BLACKHOLE) {
if (IS_ZEBRA_DEBUG_RECV)
zlog_debug("%s: adding seg6local action %s",
__func__,
seg6local_action2str(
api_nh->seg6local_action));
nexthop_add_srv6_seg6local(nexthop,
api_nh->seg6local_action,
&api_nh->seg6local_ctx);
}
if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_SEG6)
&& api_nh->type != NEXTHOP_TYPE_BLACKHOLE) {
if (IS_ZEBRA_DEBUG_RECV)
zlog_debug("%s: adding seg6", __func__);
nexthop_add_srv6_seg6(nexthop, &api_nh->seg6_segs);
}
if (IS_ZEBRA_DEBUG_RECV) {
labelbuf[0] = '\0';
nhbuf[0] = '\0';
@ -2612,6 +2634,29 @@ int zsend_client_close_notify(struct zserv *client, struct zserv *closed_client)
return zserv_send_message(client, s);
}
int zsend_srv6_manager_get_locator_chunk_response(struct zserv *client,
vrf_id_t vrf_id,
struct srv6_locator *loc)
{
struct srv6_locator_chunk chunk = {};
struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
strlcpy(chunk.locator_name, loc->name, sizeof(chunk.locator_name));
chunk.prefix = loc->prefix;
chunk.block_bits_length = loc->block_bits_length;
chunk.node_bits_length = loc->node_bits_length;
chunk.function_bits_length = loc->function_bits_length;
chunk.argument_bits_length = loc->argument_bits_length;
chunk.keep = 0;
chunk.proto = client->proto;
chunk.instance = client->instance;
zclient_create_header(s, ZEBRA_SRV6_MANAGER_GET_LOCATOR_CHUNK, vrf_id);
zapi_srv6_locator_chunk_encode(s, &chunk);
stream_putw_at(s, 0, stream_get_endp(s));
return zserv_send_message(client, s);
}
/* Send response to a table manager connect request to client */
static void zread_table_manager_connect(struct zserv *client,
struct stream *msg, vrf_id_t vrf_id)
@ -2821,6 +2866,62 @@ static void zread_table_manager_request(ZAPI_HANDLER_ARGS)
}
}
static void zread_srv6_manager_get_locator_chunk(struct zserv *client,
struct stream *msg,
vrf_id_t vrf_id)
{
struct stream *s = msg;
uint16_t len;
char locator_name[SRV6_LOCNAME_SIZE] = {0};
/* Get data. */
STREAM_GETW(s, len);
STREAM_GET(locator_name, s, len);
/* call hook to get a chunk using wrapper */
struct srv6_locator *loc = NULL;
srv6_manager_get_locator_chunk_call(&loc, client, locator_name, vrf_id);
stream_failure:
return;
}
static void zread_srv6_manager_release_locator_chunk(struct zserv *client,
struct stream *msg,
vrf_id_t vrf_id)
{
struct stream *s = msg;
uint16_t len;
char locator_name[SRV6_LOCNAME_SIZE] = {0};
/* Get data. */
STREAM_GETW(s, len);
STREAM_GET(locator_name, s, len);
/* call hook to release a chunk using wrapper */
srv6_manager_release_locator_chunk_call(client, locator_name, vrf_id);
stream_failure:
return;
}
static void zread_srv6_manager_request(ZAPI_HANDLER_ARGS)
{
switch (hdr->command) {
case ZEBRA_SRV6_MANAGER_GET_LOCATOR_CHUNK:
zread_srv6_manager_get_locator_chunk(client, msg,
zvrf_id(zvrf));
break;
case ZEBRA_SRV6_MANAGER_RELEASE_LOCATOR_CHUNK:
zread_srv6_manager_release_locator_chunk(client, msg,
zvrf_id(zvrf));
break;
default:
zlog_err("%s: unknown SRv6 Manager command", __func__);
break;
}
}
static void zread_pseudowire(ZAPI_HANDLER_ARGS)
{
struct stream *s;
@ -3580,6 +3681,8 @@ void (*const zserv_handlers[])(ZAPI_HANDLER_ARGS) = {
[ZEBRA_MLAG_CLIENT_REGISTER] = zebra_mlag_client_register,
[ZEBRA_MLAG_CLIENT_UNREGISTER] = zebra_mlag_client_unregister,
[ZEBRA_MLAG_FORWARD_MSG] = zebra_mlag_forward_client_msg,
[ZEBRA_SRV6_MANAGER_GET_LOCATOR_CHUNK] = zread_srv6_manager_request,
[ZEBRA_SRV6_MANAGER_RELEASE_LOCATOR_CHUNK] = zread_srv6_manager_request,
[ZEBRA_CLIENT_CAPABILITIES] = zread_client_capabilities,
[ZEBRA_NEIGH_DISCOVER] = zread_neigh_discover,
[ZEBRA_NHG_ADD] = zread_nhg_add,

View File

@ -30,6 +30,7 @@
#include "zebra/zebra_pbr.h"
#include "zebra/zebra_errors.h"
#include "zebra/label_manager.h"
#include "zebra/zebra_srv6.h"
#ifdef __cplusplus
@ -116,6 +117,13 @@ int zsend_nhg_notify(uint16_t type, uint16_t instance, uint32_t session_id,
extern void zapi_opaque_free(struct opaque *opaque);
extern int zsend_zebra_srv6_locator_add(struct zserv *client,
struct srv6_locator *loc);
extern int zsend_zebra_srv6_locator_delete(struct zserv *client,
struct srv6_locator *loc);
extern int zsend_srv6_manager_get_locator_chunk_response(struct zserv *client,
vrf_id_t vrf_id, struct srv6_locator *loc);
#ifdef __cplusplus
}
#endif

View File

@ -785,6 +785,12 @@ static struct log_ref ferr_zebra_err[] = {
.description = "Zebra has detected a situation where there are two vrf devices with the exact same tableid. This is considered a complete misconfiguration of VRF devices and breaks a fundamental assumption in FRR about how VRF's work",
.suggestion = "Use different table id's for the VRF's in question"
},
{
.code = EC_ZEBRA_SRV6M_UNRELEASED_LOCATOR_CHUNK,
.title = "Zebra did not free any srv6 locator chunks",
.description = "Zebra's srv6-locator chunk cleanup procedure ran, but no srv6 locator chunks were released.",
.suggestion = "Ignore this error.",
},
{
.code = END_FERR,
}

Some files were not shown because too many files have changed in this diff Show More