diff --git a/.gitignore b/.gitignore index 4e75acbac5..ddaf6e240e 100644 --- a/.gitignore +++ b/.gitignore @@ -20,6 +20,9 @@ autom4te*.cache configure.lineno configure config.h.in +confdefs.h +conftest +conftest.err aclocal.m4 Makefile.in zebra-[0-9.][0-9.][0-9.]*.tar.gz @@ -55,3 +58,6 @@ debian/quagga/ debian/tmp/ *.swp cscope.* +*.pb.h +*.pb-c.h +*.pb-c.c diff --git a/Makefile.am b/Makefile.am index 1a39844cb1..d19df6f3b6 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,19 +1,19 @@ ## Process this file with automake to produce Makefile.in. -SUBDIRS = lib @ZEBRA@ @BGPD@ @RIPD@ @RIPNGD@ @OSPFD@ @OSPF6D@ \ +SUBDIRS = lib qpb fpm @ZEBRA@ @LIBRFP@ @RFPTEST@ \ + @BGPD@ @RIPD@ @RIPNGD@ @OSPFD@ @OSPF6D@ @LDPD@ \ @ISISD@ @PIMD@ @WATCHQUAGGA@ @VTYSH@ @OSPFCLIENT@ @DOC@ m4 @pkgsrcdir@ \ redhat @SOLARIS@ tests tools cumulus -DIST_SUBDIRS = lib zebra bgpd ripd ripngd ospfd ospf6d \ +DIST_SUBDIRS = lib qpb fpm zebra bgpd ripd ripngd ospfd ospf6d ldpd \ isisd watchquagga vtysh ospfclient doc m4 pkgsrc redhat tests \ - solaris pimd tools cumulus + solaris pimd @LIBRFP@ @RFPTEST@ tools cumulus EXTRA_DIST = aclocal.m4 SERVICES TODO REPORTING-BUGS INSTALL.quagga.txt \ update-autotools \ vtysh/Makefile.in vtysh/Makefile.am \ tools/rrcheck.pl tools/rrlookup.pl tools/zc.pl \ - tools/zebra.el tools/multiple-bgpd.sh \ - fpm/fpm.h + tools/zebra.el tools/multiple-bgpd.sh if HAVE_LATEX diff --git a/bgpd/Makefile.am b/bgpd/Makefile.am index fb5b2375de..1730f5cdb8 100644 --- a/bgpd/Makefile.am +++ b/bgpd/Makefile.am @@ -1,6 +1,64 @@ ## Process this file with automake to produce Makefile.in. +AUTOMAKE_OPTIONS = subdir-objects -AM_CPPFLAGS = -I.. -I$(top_srcdir) -I$(top_srcdir)/lib -I$(top_builddir)/lib +if ENABLE_BGP_VNC +#o file to keep linker happy +BGP_VNC_RFP_LIB=rfapi/rfapi_descriptor_rfp_utils.o @top_builddir@/$(LIBRFP)/librfp.a +BGP_VNC_RFP_INC=-I@top_srcdir@/$(RFPINC) +BGP_VNC_RFP_HD=\ + @top_srcdir@/$(RFPINC)/rfp.h +BGP_VNC_RFP_LD_FLAGS_FILE=@top_srcdir@/$(LIBRFP)/rfp_ld_flags +BGP_VNC_RFP_LD_FLAGS=`if [ -e "$(BGP_VNC_RFP_LD_FLAGS_FILE)" ] ; then cat "$(BGP_VNC_RFP_LD_FLAGS_FILE)" ; fi ` + +#BGP_VNC_RFAPI_SRCDIR=rfapi +BGP_VNC_RFAPI_SRCDIR= +BGP_VNC_RFAPI_INC=-Irfapi +BGP_VNC_RFAPI_SRC=rfapi/bgp_rfapi_cfg.c \ + rfapi/rfapi_import.c \ + rfapi/rfapi.c \ + rfapi/rfapi_ap.c \ + rfapi/rfapi_descriptor_rfp_utils.c \ + rfapi/rfapi_encap_tlv.c \ + rfapi/rfapi_nve_addr.c \ + rfapi/rfapi_monitor.c \ + rfapi/rfapi_rib.c \ + rfapi/rfapi_vty.c \ + rfapi/vnc_debug.c \ + rfapi/vnc_export_bgp.c \ + rfapi/vnc_export_table.c \ + rfapi/vnc_import_bgp.c \ + rfapi/vnc_zebra.c +BGP_VNC_RFAPI_HD=rfapi/bgp_rfapi_cfg.h \ + rfapi/rfapi_import.h \ + rfapi/rfapi.h \ + rfapi/rfapi_ap.h \ + rfapi/rfapi_backend.h \ + rfapi/rfapi_descriptor_rfp_utils.h \ + rfapi/rfapi_encap_tlv.h \ + rfapi/rfapi_nve_addr.h \ + rfapi/rfapi_monitor.h \ + rfapi/rfapi_private.h \ + rfapi/rfapi_rib.h \ + rfapi/rfapi_vty.h \ + rfapi/vnc_debug.h \ + rfapi/vnc_export_bgp.h \ + rfapi/vnc_export_table.h \ + rfapi/vnc_import_bgp.h \ + rfapi/vnc_zebra.h \ + bgp_vnc_types.h $(BGP_VNC_RFP_HD) + +else +BGP_VNC_RFAPI_INC= +BGP_VNC_RFAPI_SRC= +BGP_VNC_RFAPI_HD= +BGP_VNC_RFP_LIB= +BGP_VNC_RFP_INC= +BGP_VNC_RFP_HD= +BGP_VNC_RFP_LD_FLAGS= +endif + +AM_CPPFLAGS = -I.. -I$(top_srcdir) -I$(top_srcdir)/lib -I$(top_builddir)/lib \ + $(BGP_VNC_RFAPI_INC) $(BGP_VNC_RFP_INC) DEFS = @DEFS@ -DSYSCONFDIR=\"$(sysconfdir)/\" INSTALL_SDATA=@INSTALL@ -m 600 @@ -18,7 +76,7 @@ libbgp_a_SOURCES = \ bgp_dump.c bgp_snmp.c bgp_ecommunity.c bgp_mplsvpn.c bgp_nexthop.c \ bgp_damp.c bgp_table.c bgp_advertise.c bgp_vty.c bgp_mpath.c \ bgp_nht.c bgp_updgrp.c bgp_updgrp_packet.c bgp_updgrp_adv.c bgp_bfd.c \ - bgp_encap.c bgp_encap_tlv.c + bgp_encap.c bgp_encap_tlv.c $(BGP_VNC_RFAPI_SRC) noinst_HEADERS = \ bgp_memory.h \ @@ -27,16 +85,20 @@ noinst_HEADERS = \ bgpd.h bgp_filter.h bgp_clist.h bgp_dump.h bgp_zebra.h \ bgp_ecommunity.h bgp_mplsvpn.h bgp_nexthop.h bgp_damp.h bgp_table.h \ bgp_advertise.h bgp_snmp.h bgp_vty.h bgp_mpath.h bgp_nht.h \ - bgp_updgrp.h bgp_bfd.h bgp_encap.h bgp_encap_tlv.h bgp_encap_types.h + bgp_updgrp.h bgp_bfd.h bgp_encap.h bgp_encap_tlv.h bgp_encap_types.h \ + $(BGP_VNC_RFAPI_HD) bgpd_SOURCES = bgp_main.c -bgpd_LDADD = libbgp.a ../lib/libzebra.la @LIBCAP@ @LIBM@ +bgpd_LDADD = libbgp.a $(BGP_VNC_RFP_LIB) ../lib/libzebra.la @LIBCAP@ @LIBM@ +bgpd_LDFLAGS = $(BGP_VNC_RFP_LD_FLAGS) bgp_btoa_SOURCES = bgp_btoa.c -bgp_btoa_LDADD = libbgp.a ../lib/libzebra.la @LIBCAP@ @LIBM@ +bgp_btoa_LDADD = libbgp.a $(BGP_VNC_RFP_LIB) ../lib/libzebra.la @LIBCAP@ @LIBM@ +bgp_btoa_LDFLAGS = $(BGP_VNC_RFP_LD_FLAGS) examplesdir = $(exampledir) -dist_examples_DATA = bgpd.conf.sample bgpd.conf.sample2 +dist_examples_DATA = bgpd.conf.sample bgpd.conf.sample2 \ + bgpd.conf.vnc.sample EXTRA_DIST = BGP4-MIB.txt diff --git a/bgpd/bgp_attr.c b/bgpd/bgp_attr.c index 936428e178..24bd526ce3 100644 --- a/bgpd/bgp_attr.c +++ b/bgpd/bgp_attr.c @@ -44,6 +44,11 @@ Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA #include "bgpd/bgp_ecommunity.h" #include "bgpd/bgp_updgrp.h" #include "bgpd/bgp_encap_types.h" +#if ENABLE_BGP_VNC +# include "bgpd/rfapi/bgp_rfapi_cfg.h" +# include "bgp_encap_types.h" +# include "bgp_vnc_types.h" +#endif /* Attribute strings for logging. */ static const struct message attr_str [] = @@ -68,6 +73,9 @@ static const struct message attr_str [] = { BGP_ATTR_AS4_AGGREGATOR, "AS4_AGGREGATOR" }, { BGP_ATTR_AS_PATHLIMIT, "AS_PATHLIMIT" }, { BGP_ATTR_ENCAP, "ENCAP" }, +#if ENABLE_BGP_VNC + { BGP_ATTR_VNC, "VNC" }, +#endif }; static const int attr_str_max = array_size(attr_str); @@ -257,6 +265,12 @@ bgp_attr_flush_encap(struct attr *attr) encap_free(attr->extra->encap_subtlvs); attr->extra->encap_subtlvs = NULL; } +#if ENABLE_BGP_VNC + if (attr->extra->vnc_subtlvs) { + encap_free(attr->extra->vnc_subtlvs); + attr->extra->vnc_subtlvs = NULL; + } +#endif } /* @@ -422,6 +436,12 @@ bgp_attr_extra_free (struct attr *attr) encap_free(attr->extra->encap_subtlvs); attr->extra->encap_subtlvs = NULL; } +#if ENABLE_BGP_VNC + if (attr->extra->vnc_subtlvs) { + encap_free(attr->extra->vnc_subtlvs); + attr->extra->vnc_subtlvs = NULL; + } +#endif XFREE (MTYPE_ATTR_EXTRA, attr->extra); attr->extra = NULL; } @@ -462,6 +482,11 @@ bgp_attr_dup (struct attr *new, struct attr *orig) if (orig->extra->encap_subtlvs) { new->extra->encap_subtlvs = encap_tlv_dup(orig->extra->encap_subtlvs); } +#if ENABLE_BGP_VNC + if (orig->extra->vnc_subtlvs) { + new->extra->vnc_subtlvs = encap_tlv_dup(orig->extra->vnc_subtlvs); + } +#endif } } else if (orig->extra) @@ -471,6 +496,11 @@ bgp_attr_dup (struct attr *new, struct attr *orig) if (orig->extra->encap_subtlvs) { new->extra->encap_subtlvs = encap_tlv_dup(orig->extra->encap_subtlvs); } +#if ENABLE_BGP_VNC + if (orig->extra->vnc_subtlvs) { + new->extra->vnc_subtlvs = encap_tlv_dup(orig->extra->vnc_subtlvs); + } +#endif } } @@ -612,6 +642,9 @@ attrhash_cmp (const void *p1, const void *p2) && ae1->transit == ae2->transit && (ae1->encap_tunneltype == ae2->encap_tunneltype) && encap_same(ae1->encap_subtlvs, ae2->encap_subtlvs) +#if ENABLE_BGP_VNC + && encap_same(ae1->vnc_subtlvs, ae2->vnc_subtlvs) +#endif && IPV4_ADDR_SAME (&ae1->originator_id, &ae2->originator_id)) return 1; else if (ae1 || ae2) @@ -670,6 +703,11 @@ bgp_attr_hash_alloc (void *p) if (attr->extra->encap_subtlvs) { attr->extra->encap_subtlvs = encap_tlv_dup(attr->extra->encap_subtlvs); } +#if ENABLE_BGP_VNC + if (attr->extra->vnc_subtlvs) { + attr->extra->vnc_subtlvs = encap_tlv_dup(attr->extra->vnc_subtlvs); + } +#endif } attr->refcnt = 0; return attr; @@ -940,6 +978,10 @@ bgp_attr_flush (struct attr *attr) transit_free (attre->transit); encap_free(attre->encap_subtlvs); attre->encap_subtlvs = NULL; +#if ENABLE_BGP_VNC + encap_free(attre->vnc_subtlvs); + attre->vnc_subtlvs = NULL; +#endif } } @@ -1911,7 +1953,7 @@ bgp_attr_encap( bgp_size_t total; struct attr_extra *attre = NULL; struct bgp_attr_encap_subtlv *stlv_last = NULL; - uint16_t tunneltype; + uint16_t tunneltype = 0; total = length + (CHECK_FLAG (flag, BGP_ATTR_FLAG_EXTLEN) ? 4 : 3); @@ -1957,6 +1999,12 @@ bgp_attr_encap( subtype = stream_getc (BGP_INPUT (peer)); sublength = stream_getc (BGP_INPUT (peer)); length -= 2; +#if ENABLE_BGP_VNC + } else { + subtype = stream_getw (BGP_INPUT (peer)); + sublength = stream_getw (BGP_INPUT (peer)); + length -= 4; +#endif } if (sublength > length) { @@ -1988,6 +2036,16 @@ bgp_attr_encap( } else { attre->encap_subtlvs = tlv; } +#if ENABLE_BGP_VNC + } else { + for (stlv_last = attre->vnc_subtlvs; stlv_last && stlv_last->next; + stlv_last = stlv_last->next); + if (stlv_last) { + stlv_last->next = tlv; + } else { + attre->vnc_subtlvs = tlv; + } +#endif } } else { stlv_last->next = tlv; @@ -2301,6 +2359,9 @@ bgp_attr_parse (struct peer *peer, struct attr *attr, bgp_size_t size, case BGP_ATTR_EXT_COMMUNITIES: ret = bgp_attr_ext_communities (&attr_args); break; +#if ENABLE_BGP_VNC + case BGP_ATTR_VNC: +#endif case BGP_ATTR_ENCAP: ret = bgp_attr_encap (type, peer, length, attr, flag, startp); break; @@ -2570,7 +2631,9 @@ bgp_packet_mpattr_prefix_size (afi_t afi, safi_t safi, struct prefix *p) } /* - * Encodes the tunnel encapsulation attribute + * Encodes the tunnel encapsulation attribute, + * and with ENABLE_BGP_VNC the VNC attribute which uses + * almost the same TLV format */ static void bgp_packet_mpattr_tea( @@ -2604,6 +2667,15 @@ bgp_packet_mpattr_tea( attrhdrlen = 1 + 1; /* subTLV T + L */ break; +#if ENABLE_BGP_VNC + case BGP_ATTR_VNC: + attrname = "VNC"; + subtlvs = attr->extra->vnc_subtlvs; + attrlenfield = 0; /* no outer T + L */ + attrhdrlen = 2 + 2; /* subTLV T + L */ + break; +#endif + default: assert(0); } @@ -2649,6 +2721,11 @@ bgp_packet_mpattr_tea( if (attrtype == BGP_ATTR_ENCAP) { stream_putc (s, st->type); stream_putc (s, st->length); +#if ENABLE_BGP_VNC + } else { + stream_putw (s, st->type); + stream_putw (s, st->length); +#endif } stream_put (s, st->value, st->length); } @@ -3038,6 +3115,11 @@ bgp_packet_attribute (struct bgp *bgp, struct peer *peer, { /* Tunnel Encap attribute */ bgp_packet_mpattr_tea(bgp, peer, s, attr, BGP_ATTR_ENCAP); + +#if ENABLE_BGP_VNC + /* VNC attribute */ + bgp_packet_mpattr_tea(bgp, peer, s, attr, BGP_ATTR_VNC); +#endif } /* Unknown transit attribute. */ diff --git a/bgpd/bgp_attr.h b/bgpd/bgp_attr.h index 0bf8c897de..d4f45ba60a 100644 --- a/bgpd/bgp_attr.h +++ b/bgpd/bgp_attr.h @@ -63,6 +63,21 @@ struct bgp_attr_encap_subtlv { uint8_t value[1]; /* will be extended */ }; +#if ENABLE_BGP_VNC +/* + * old rfp<->rfapi representation + */ +struct bgp_tea_options { + struct bgp_tea_options *next; + uint8_t options_count; + uint16_t options_length; /* each TLV may be 256 in length */ + uint8_t type; + uint8_t length; + void *value; /* pointer to data */ +}; + +#endif + /* Additional/uncommon BGP attributes. * lazily allocated as and when a struct attr * requires it. @@ -103,10 +118,14 @@ struct attr_extra u_char mp_nexthop_prefer_global; /* route tag */ - u_short tag; + route_tag_t tag; uint16_t encap_tunneltype; /* grr */ struct bgp_attr_encap_subtlv *encap_subtlvs; /* rfc5512 */ + +#if ENABLE_BGP_VNC + struct bgp_attr_encap_subtlv *vnc_subtlvs; /* VNC-specific */ +#endif }; /* BGP core attribute structure. */ diff --git a/bgpd/bgp_debug.c b/bgpd/bgp_debug.c index 39f723a6a4..4619a08d55 100644 --- a/bgpd/bgp_debug.c +++ b/bgpd/bgp_debug.c @@ -1696,6 +1696,50 @@ DEFUN (show_debugging_bgp, return CMD_SUCCESS; } +/* return count of number of debug flags set */ +int +bgp_debug_count(void) +{ + int ret = 0; + if (BGP_DEBUG (as4, AS4)) + ret++; + + if (BGP_DEBUG (as4, AS4_SEGMENT)) + ret++; + + if (BGP_DEBUG (bestpath, BESTPATH)) + ret++; + + if (BGP_DEBUG (keepalive, KEEPALIVE)) + ret++; + + if (BGP_DEBUG (neighbor_events, NEIGHBOR_EVENTS)) + ret++; + + if (BGP_DEBUG (nht, NHT)) + ret++; + + if (BGP_DEBUG (update_groups, UPDATE_GROUPS)) + ret++; + + if (BGP_DEBUG (update, UPDATE_PREFIX)) + ret++; + + if (BGP_DEBUG (update, UPDATE_IN)) + ret++; + + if (BGP_DEBUG (update, UPDATE_OUT)) + ret++; + + if (BGP_DEBUG (zebra, ZEBRA)) + ret++; + + if (BGP_DEBUG (allow_martians, ALLOW_MARTIANS)) + ret++; + + return ret; +} + static int bgp_config_write_debug (struct vty *vty) { diff --git a/bgpd/bgp_debug.h b/bgpd/bgp_debug.h index 835d585735..00fb670a47 100644 --- a/bgpd/bgp_debug.h +++ b/bgpd/bgp_debug.h @@ -150,4 +150,5 @@ extern int bgp_debug_update(struct peer *peer, struct prefix *p, extern int bgp_debug_bestpath(struct prefix *p); extern int bgp_debug_zebra(struct prefix *p); +extern int bgp_debug_count(void); #endif /* _QUAGGA_BGP_DEBUG_H */ diff --git a/bgpd/bgp_ecommunity.c b/bgpd/bgp_ecommunity.c index 926e2650a2..6c72aa36d9 100644 --- a/bgpd/bgp_ecommunity.c +++ b/bgpd/bgp_ecommunity.c @@ -35,7 +35,7 @@ Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA static struct hash *ecomhash; /* Allocate a new ecommunities. */ -static struct ecommunity * +struct ecommunity * ecommunity_new (void) { return (struct ecommunity *) XCALLOC (MTYPE_ECOMMUNITY, @@ -59,7 +59,7 @@ ecommunity_free (struct ecommunity **ecom) structure, we don't add the value. Newly added value is sorted by numerical order. When the value is added to the structure return 1 else return 0. */ -static int +int ecommunity_add_val (struct ecommunity *ecom, struct ecommunity_val *eval) { u_int8_t *p; diff --git a/bgpd/bgp_ecommunity.h b/bgpd/bgp_ecommunity.h index 993fd5acfd..c5c58e4260 100644 --- a/bgpd/bgp_ecommunity.h +++ b/bgpd/bgp_ecommunity.h @@ -85,4 +85,7 @@ extern char *ecommunity_ecom2str (struct ecommunity *, int); extern int ecommunity_match (const struct ecommunity *, const struct ecommunity *); extern char *ecommunity_str (struct ecommunity *); +/* for vpn */ +extern struct ecommunity *ecommunity_new (void); +extern int ecommunity_add_val (struct ecommunity *, struct ecommunity_val *); #endif /* _QUAGGA_BGP_ECOMMUNITY_H */ diff --git a/bgpd/bgp_encap.c b/bgpd/bgp_encap.c index f739e81b67..bb62e37134 100644 --- a/bgpd/bgp_encap.c +++ b/bgpd/bgp_encap.c @@ -45,50 +45,9 @@ Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA #include "bgpd/bgp_vty.h" #include "bgpd/bgp_encap.h" -static u_int16_t -decode_rd_type (u_char *pnt) -{ - u_int16_t v; - - v = ((u_int16_t) *pnt++ << 8); - v |= (u_int16_t) *pnt; - return v; -} - - -static void -decode_rd_as (u_char *pnt, struct rd_as *rd_as) -{ - rd_as->as = (u_int16_t) *pnt++ << 8; - rd_as->as |= (u_int16_t) *pnt++; - - rd_as->val = ((u_int32_t) *pnt++) << 24; - rd_as->val |= ((u_int32_t) *pnt++) << 16; - rd_as->val |= ((u_int32_t) *pnt++) << 8; - rd_as->val |= (u_int32_t) *pnt; -} - -static void -decode_rd_as4 (u_char *pnt, struct rd_as *rd_as) -{ - rd_as->as = (u_int32_t) *pnt++ << 24; - rd_as->as |= (u_int32_t) *pnt++ << 16; - rd_as->as |= (u_int32_t) *pnt++ << 8; - rd_as->as |= (u_int32_t) *pnt++; - - rd_as->val = ((u_int32_t) *pnt++ << 8); - rd_as->val |= (u_int32_t) *pnt; -} - -static void -decode_rd_ip (u_char *pnt, struct rd_ip *rd_ip) -{ - memcpy (&rd_ip->ip, pnt, 4); - pnt += 4; - - rd_ip->val = ((u_int16_t) *pnt++ << 8); - rd_ip->val |= (u_int16_t) *pnt; -} +#if ENABLE_BGP_VNC +#include "bgpd/rfapi/rfapi_backend.h" +#endif static void ecom2prd(struct ecommunity *ecom, struct prefix_rd *prd) @@ -230,7 +189,15 @@ bgp_nlri_parse_encap( if (!withdraw) { bgp_update (peer, &p, 0, attr, afi, SAFI_ENCAP, ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL, &prd, NULL, 0); +#if ENABLE_BGP_VNC + rfapiProcessUpdate(peer, NULL, &p, &prd, attr, afi, SAFI_ENCAP, + ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL, NULL); +#endif } else { +#if ENABLE_BGP_VNC + rfapiProcessWithdraw(peer, NULL, &p, &prd, attr, afi, SAFI_ENCAP, + ZEBRA_ROUTE_BGP, 0); +#endif bgp_withdraw (peer, &p, 0, attr, afi, SAFI_ENCAP, ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL, &prd, NULL); } @@ -987,27 +954,4 @@ bgp_encap_init (void) install_element (VIEW_NODE, &show_bgp_ipv6_encap_neighbor_advertised_routes_cmd); install_element (VIEW_NODE, &show_bgp_ipv6_encap_rd_neighbor_advertised_routes_cmd); #endif - - - install_element (ENABLE_NODE, &show_bgp_ipv4_encap_cmd); - install_element (ENABLE_NODE, &show_bgp_ipv4_encap_rd_cmd); - install_element (ENABLE_NODE, &show_bgp_ipv4_encap_tags_cmd); - install_element (ENABLE_NODE, &show_bgp_ipv4_encap_rd_tags_cmd); - install_element (ENABLE_NODE, &show_bgp_ipv4_encap_neighbor_routes_cmd); - install_element (ENABLE_NODE, &show_bgp_ipv4_encap_rd_neighbor_routes_cmd); - install_element (ENABLE_NODE, &show_bgp_ipv4_encap_neighbor_advertised_routes_cmd); - install_element (ENABLE_NODE, &show_bgp_ipv4_encap_rd_neighbor_advertised_routes_cmd); - -#ifdef HAVE_IPV6 - install_element (ENABLE_NODE, &show_bgp_ipv6_encap_cmd); - install_element (ENABLE_NODE, &show_bgp_ipv6_encap_rd_cmd); - install_element (ENABLE_NODE, &show_bgp_ipv6_encap_tags_cmd); - install_element (ENABLE_NODE, &show_bgp_ipv6_encap_rd_tags_cmd); - install_element (ENABLE_NODE, &show_bgp_ipv6_encap_neighbor_routes_cmd); - install_element (ENABLE_NODE, &show_bgp_ipv6_encap_rd_neighbor_routes_cmd); - install_element (ENABLE_NODE, &show_bgp_ipv6_encap_neighbor_advertised_routes_cmd); - install_element (ENABLE_NODE, &show_bgp_ipv6_encap_rd_neighbor_advertised_routes_cmd); -#endif - - } diff --git a/bgpd/bgp_encap_tlv.c b/bgpd/bgp_encap_tlv.c index 87aa0ceac5..5c18629aa6 100644 --- a/bgpd/bgp_encap_tlv.c +++ b/bgpd/bgp_encap_tlv.c @@ -410,9 +410,7 @@ bgp_encap_type_mpls_to_tlv( struct bgp_encap_type_mpls *bet, /* input structure */ struct attr *attr) { - struct attr_extra *extra = bgp_attr_extra_get(attr); - - extra->encap_tunneltype = BGP_ENCAP_TYPE_MPLS; + return; /* no encap attribute for MPLS */ } void diff --git a/bgpd/bgp_encap_types.h b/bgpd/bgp_encap_types.h index 603ff9d2d6..0985446ff2 100644 --- a/bgpd/bgp_encap_types.h +++ b/bgpd/bgp_encap_types.h @@ -32,7 +32,7 @@ typedef enum { BGP_ENCAP_TYPE_IP_IN_IP=7, BGP_ENCAP_TYPE_VXLAN=8, BGP_ENCAP_TYPE_NVGRE=9, - BGP_ENCAP_TYPE_MPLS=10, + BGP_ENCAP_TYPE_MPLS=10, /* NOTE: Encap SAFI&Attribute not used */ BGP_ENCAP_TYPE_MPLS_IN_GRE=11, BGP_ENCAP_TYPE_VXLAN_GPE=12, BGP_ENCAP_TYPE_MPLS_IN_UDP=13, diff --git a/bgpd/bgp_filter.c b/bgpd/bgp_filter.c index cee68e4d4c..6755335535 100644 --- a/bgpd/bgp_filter.c +++ b/bgpd/bgp_filter.c @@ -702,8 +702,6 @@ bgp_filter_init (void) install_element (VIEW_NODE, &show_ip_as_path_access_list_cmd); install_element (VIEW_NODE, &show_ip_as_path_access_list_all_cmd); - install_element (ENABLE_NODE, &show_ip_as_path_access_list_cmd); - install_element (ENABLE_NODE, &show_ip_as_path_access_list_all_cmd); } void diff --git a/bgpd/bgp_fsm.c b/bgpd/bgp_fsm.c index c4c3b0f62a..2f2ea3ae41 100644 --- a/bgpd/bgp_fsm.c +++ b/bgpd/bgp_fsm.c @@ -689,7 +689,7 @@ bgp_adjust_routeadv (struct peer *peer) * * m > MRAI */ - diff = difftime(nowtime, peer->last_write); + diff = difftime(nowtime, peer->last_update); if (diff > (double) peer->v_routeadv) { BGP_TIMER_OFF(peer->t_routeadv); diff --git a/bgpd/bgp_main.c b/bgpd/bgp_main.c index 68d6cb7f38..3aa16b2632 100644 --- a/bgpd/bgp_main.c +++ b/bgpd/bgp_main.c @@ -54,6 +54,10 @@ Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA #include "bgpd/bgp_filter.h" #include "bgpd/bgp_zebra.h" +#ifdef ENABLE_BGP_VNC +#include "bgpd/rfapi/rfapi_backend.h" +#endif + /* bgpd options, we use GNU getopt library. */ static const struct option longopts[] = { @@ -282,7 +286,9 @@ bgp_exit (int status) bgp_vrf_terminate (); cmd_terminate (); vty_terminate (); - +#if ENABLE_BGP_VNC + vnc_zebra_destroy(); +#endif bgp_zebra_destroy(); if (bgp_nexthop_buf) stream_free (bgp_nexthop_buf); @@ -296,6 +302,8 @@ bgp_exit (int status) if (zlog_default) closezlog (zlog_default); + if (bgp_debug_count()) + log_memstats_stderr ("bgpd"); exit (status); } diff --git a/bgpd/bgp_memory.c b/bgpd/bgp_memory.c index 166400b745..72c0311c17 100644 --- a/bgpd/bgp_memory.c +++ b/bgpd/bgp_memory.c @@ -108,3 +108,6 @@ DEFINE_MTYPE(BGPD, BGP_REDIST, "BGP redistribution") DEFINE_MTYPE(BGPD, BGP_FILTER_NAME, "BGP Filter Information") DEFINE_MTYPE(BGPD, BGP_DUMP_STR, "BGP Dump String Information") DEFINE_MTYPE(BGPD, ENCAP_TLV, "ENCAP TLV") + +DEFINE_MTYPE(BGPD, BGP_TEA_OPTIONS, "BGP TEA Options") +DEFINE_MTYPE(BGPD, BGP_TEA_OPTIONS_VALUE, "BGP TEA Options Value") diff --git a/bgpd/bgp_memory.h b/bgpd/bgp_memory.h index b2956f07ed..a4ce8b891b 100644 --- a/bgpd/bgp_memory.h +++ b/bgpd/bgp_memory.h @@ -105,4 +105,7 @@ DECLARE_MTYPE(BGP_FILTER_NAME) DECLARE_MTYPE(BGP_DUMP_STR) DECLARE_MTYPE(ENCAP_TLV) +DECLARE_MTYPE(BGP_TEA_OPTIONS) +DECLARE_MTYPE(BGP_TEA_OPTIONS_VALUE) + #endif /* _QUAGGA_BGP_MEMORY_H */ diff --git a/bgpd/bgp_mplsvpn.c b/bgpd/bgp_mplsvpn.c index 48baedcea5..c050e6ddd8 100644 --- a/bgpd/bgp_mplsvpn.c +++ b/bgpd/bgp_mplsvpn.c @@ -35,16 +35,35 @@ Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA #include "bgpd/bgp_attr.h" #include "bgpd/bgp_mplsvpn.h" -static u_int16_t +#if ENABLE_BGP_VNC +#include "bgpd/rfapi/rfapi_backend.h" +#endif + +u_int16_t decode_rd_type (u_char *pnt) { u_int16_t v; v = ((u_int16_t) *pnt++ << 8); +#if ENABLE_BGP_VNC + /* + * VNC L2 stores LHI in lower byte, so omit it + */ + if (v != RD_TYPE_VNC_ETH) + v |= (u_int16_t) *pnt; +#else /* duplicate code for clarity */ v |= (u_int16_t) *pnt; +#endif + return v; } +void +encode_rd_type (u_int16_t v, u_char *pnt) +{ + *((u_int16_t *)pnt) = htons(v); +} + u_int32_t decode_label (u_char *pnt) { @@ -56,8 +75,19 @@ decode_label (u_char *pnt) return l; } +void +encode_label(u_int32_t label, + u_char *pnt) +{ + if (pnt == NULL) + return; + *pnt++ = (label>>12) & 0xff; + *pnt++ = (label>>4) & 0xff; + *pnt++ = ((label<<4)+1) & 0xff; /* S=1 */ +} + /* type == RD_TYPE_AS */ -static void +void decode_rd_as (u_char *pnt, struct rd_as *rd_as) { rd_as->as = (u_int16_t) *pnt++ << 8; @@ -70,7 +100,7 @@ decode_rd_as (u_char *pnt, struct rd_as *rd_as) } /* type == RD_TYPE_AS4 */ -static void +void decode_rd_as4 (u_char *pnt, struct rd_as *rd_as) { rd_as->as = (u_int32_t) *pnt++ << 24; @@ -83,7 +113,7 @@ decode_rd_as4 (u_char *pnt, struct rd_as *rd_as) } /* type == RD_TYPE_IP */ -static void +void decode_rd_ip (u_char *pnt, struct rd_ip *rd_ip) { memcpy (&rd_ip->ip, pnt, 4); @@ -93,6 +123,17 @@ decode_rd_ip (u_char *pnt, struct rd_ip *rd_ip) rd_ip->val |= (u_int16_t) *pnt; } +#if ENABLE_BGP_VNC +/* type == RD_TYPE_VNC_ETH */ +static void +decode_rd_vnc_eth (u_char *pnt, struct rd_vnc_eth *rd_vnc_eth) +{ + rd_vnc_eth->type = RD_TYPE_VNC_ETH; + rd_vnc_eth->local_nve_id = pnt[1]; + memcpy (rd_vnc_eth->macaddr.octet, pnt + 2, ETHER_ADDR_LEN); +} +#endif + int bgp_nlri_parse_vpn (struct peer *peer, struct attr *attr, struct bgp_nlri *packet) @@ -111,6 +152,9 @@ bgp_nlri_parse_vpn (struct peer *peer, struct attr *attr, safi_t safi; int addpath_encoded; u_int32_t addpath_id; +#if ENABLE_BGP_VNC + u_int32_t label = 0; +#endif /* Check peer status. */ if (peer->status != Established) @@ -146,17 +190,17 @@ bgp_nlri_parse_vpn (struct peer *peer, struct attr *attr, pnt += BGP_ADDPATH_ID_LEN; } + /* Fetch prefix length. */ + prefixlen = *pnt++; + p.family = afi2family (packet->afi); + psize = PSIZE (prefixlen); + if (prefixlen < 88) { zlog_err ("prefix length is less than 88: %d", prefixlen); return -1; } - /* Fetch prefix length. */ - prefixlen = *pnt++; - p.family = afi2family (packet->afi); - psize = PSIZE (prefixlen); - /* sanity check against packet data */ if (prefixlen < VPN_PREFIXLEN_MIN_BYTES*8 || (pnt + psize) > lim) { @@ -184,6 +228,10 @@ bgp_nlri_parse_vpn (struct peer *peer, struct attr *attr, } +#if ENABLE_BGP_VNC + label = decode_label (pnt); +#endif + /* Copyr label to prefix. */ tagpnt = pnt; @@ -207,22 +255,40 @@ bgp_nlri_parse_vpn (struct peer *peer, struct attr *attr, decode_rd_ip (pnt + 5, &rd_ip); break; +#if ENABLE_BGP_VNC + case RD_TYPE_VNC_ETH: + break; +#endif + default: zlog_err ("Unknown RD type %d", type); break; /* just report */ } - p.prefixlen = prefixlen - VPN_PREFIXLEN_MIN_BYTES*8; + p.prefixlen = prefixlen - VPN_PREFIXLEN_MIN_BYTES*8;/* exclude label & RD */ memcpy (&p.u.prefix, pnt + VPN_PREFIXLEN_MIN_BYTES, psize - VPN_PREFIXLEN_MIN_BYTES); if (attr) + { bgp_update (peer, &p, addpath_id, attr, packet->afi, SAFI_MPLS_VPN, ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL, &prd, tagpnt, 0); +#if ENABLE_BGP_VNC + rfapiProcessUpdate(peer, NULL, &p, &prd, attr, packet->afi, + SAFI_MPLS_VPN, ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL, + &label); +#endif + } else + { +#if ENABLE_BGP_VNC + rfapiProcessWithdraw(peer, NULL, &p, &prd, attr, packet->afi, + SAFI_MPLS_VPN, ZEBRA_ROUTE_BGP, 0); +#endif bgp_withdraw (peer, &p, addpath_id, attr, packet->afi, SAFI_MPLS_VPN, ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL, &prd, tagpnt); } + } /* Packet length consistency check. */ if (pnt != lim) return -1; @@ -346,6 +412,21 @@ prefix_rd2str (struct prefix_rd *prd, char *buf, size_t size) snprintf (buf, size, "%s:%d", inet_ntoa (rd_ip.ip), rd_ip.val); return buf; } +#if ENABLE_BGP_VNC + else if (type == RD_TYPE_VNC_ETH) + { + snprintf(buf, size, "LHI:%d, %02x:%02x:%02x:%02x:%02x:%02x", + *(pnt+1), /* LHI */ + *(pnt+2), /* MAC[0] */ + *(pnt+3), + *(pnt+4), + *(pnt+5), + *(pnt+6), + *(pnt+7)); + + return buf; + } +#endif return NULL; } @@ -493,6 +574,9 @@ show_adj_route_vpn (struct vty *vty, struct peer *peer, struct prefix_rd *prd, u u_int16_t type; struct rd_as rd_as; struct rd_ip rd_ip = {0}; +#if ENABLE_BGP_VNC + struct rd_vnc_eth rd_vnc_eth; +#endif u_char *pnt; pnt = rn->p.u.val; @@ -506,6 +590,10 @@ show_adj_route_vpn (struct vty *vty, struct peer *peer, struct prefix_rd *prd, u decode_rd_as4 (pnt + 2, &rd_as); else if (type == RD_TYPE_IP) decode_rd_ip (pnt + 2, &rd_ip); +#if ENABLE_BGP_VNC + else if (type == RD_TYPE_VNC_ETH) + decode_rd_vnc_eth (pnt, &rd_vnc_eth); +#endif if (use_json) { @@ -524,6 +612,17 @@ show_adj_route_vpn (struct vty *vty, struct peer *peer, struct prefix_rd *prd, u vty_out (vty, "%u:%d", rd_as.as, rd_as.val); else if (type == RD_TYPE_IP) vty_out (vty, "%s:%d", inet_ntoa (rd_ip.ip), rd_ip.val); +#if ENABLE_BGP_VNC + else if (type == RD_TYPE_VNC_ETH) + vty_out (vty, "%u:%02x:%02x:%02x:%02x:%02x:%02x", + rd_vnc_eth.local_nve_id, + rd_vnc_eth.macaddr.octet[0], + rd_vnc_eth.macaddr.octet[1], + rd_vnc_eth.macaddr.octet[2], + rd_vnc_eth.macaddr.octet[3], + rd_vnc_eth.macaddr.octet[4], + rd_vnc_eth.macaddr.octet[5]); +#endif vty_out (vty, "%s", VTY_NEWLINE); } @@ -546,7 +645,7 @@ show_adj_route_vpn (struct vty *vty, struct peer *peer, struct prefix_rd *prd, u if (use_json) { json_object_object_add(json, "routes", json_routes); - vty_out (vty, "%s%s", json_object_to_json_string(json), VTY_NEWLINE); + vty_out (vty, "%s%s", json_object_to_json_string_ext(json, JSON_C_TO_STRING_PRETTY), VTY_NEWLINE); json_object_free(json); } return CMD_SUCCESS; @@ -685,6 +784,9 @@ bgp_show_mpls_vpn (struct vty *vty, afi_t afi, struct prefix_rd *prd, u_int16_t type; struct rd_as rd_as; struct rd_ip rd_ip = {0}; +#if ENABLE_BGP_VNC + struct rd_vnc_eth rd_vnc_eth; +#endif u_char *pnt; pnt = rn->p.u.val; @@ -698,6 +800,10 @@ bgp_show_mpls_vpn (struct vty *vty, afi_t afi, struct prefix_rd *prd, decode_rd_as4 (pnt + 2, &rd_as); else if (type == RD_TYPE_IP) decode_rd_ip (pnt + 2, &rd_ip); +#if ENABLE_BGP_VNC + else if (type == RD_TYPE_VNC_ETH) + decode_rd_vnc_eth (pnt, &rd_vnc_eth); +#endif if (use_json) { @@ -716,6 +822,17 @@ bgp_show_mpls_vpn (struct vty *vty, afi_t afi, struct prefix_rd *prd, vty_out (vty, "%u:%d", rd_as.as, rd_as.val); else if (type == RD_TYPE_IP) vty_out (vty, "%s:%d", inet_ntoa (rd_ip.ip), rd_ip.val); +#if ENABLE_BGP_VNC + else if (type == RD_TYPE_VNC_ETH) + vty_out (vty, "%u:%02x:%02x:%02x:%02x:%02x:%02x", + rd_vnc_eth.local_nve_id, + rd_vnc_eth.macaddr.octet[0], + rd_vnc_eth.macaddr.octet[1], + rd_vnc_eth.macaddr.octet[2], + rd_vnc_eth.macaddr.octet[3], + rd_vnc_eth.macaddr.octet[4], + rd_vnc_eth.macaddr.octet[5]); +#endif vty_out (vty, "%s", VTY_NEWLINE); } rd_header = 0; @@ -753,7 +870,7 @@ bgp_show_mpls_vpn (struct vty *vty, afi_t afi, struct prefix_rd *prd, if (use_json) { json_object_object_add(json, "routes", json_nroute); - vty_out (vty, "%s%s", json_object_to_json_string(json), VTY_NEWLINE); + vty_out (vty, "%s%s", json_object_to_json_string_ext(json, JSON_C_TO_STRING_PRETTY), VTY_NEWLINE); json_object_free(json); } else @@ -1182,17 +1299,4 @@ bgp_mplsvpn_init (void) install_element (VIEW_NODE, &show_ip_bgp_vpnv4_rd_neighbor_routes_cmd); install_element (VIEW_NODE, &show_ip_bgp_vpnv4_all_neighbor_advertised_routes_cmd); install_element (VIEW_NODE, &show_ip_bgp_vpnv4_rd_neighbor_advertised_routes_cmd); - - install_element (ENABLE_NODE, &show_bgp_ipv4_vpn_cmd); - install_element (ENABLE_NODE, &show_bgp_ipv4_vpn_rd_cmd); - install_element (ENABLE_NODE, &show_bgp_ipv6_vpn_cmd); - install_element (ENABLE_NODE, &show_bgp_ipv6_vpn_rd_cmd); - install_element (ENABLE_NODE, &show_ip_bgp_vpnv4_all_cmd); - install_element (ENABLE_NODE, &show_ip_bgp_vpnv4_rd_cmd); - install_element (ENABLE_NODE, &show_ip_bgp_vpnv4_all_tags_cmd); - install_element (ENABLE_NODE, &show_ip_bgp_vpnv4_rd_tags_cmd); - install_element (ENABLE_NODE, &show_ip_bgp_vpnv4_all_neighbor_routes_cmd); - install_element (ENABLE_NODE, &show_ip_bgp_vpnv4_rd_neighbor_routes_cmd); - install_element (ENABLE_NODE, &show_ip_bgp_vpnv4_all_neighbor_advertised_routes_cmd); - install_element (ENABLE_NODE, &show_ip_bgp_vpnv4_rd_neighbor_advertised_routes_cmd); } diff --git a/bgpd/bgp_mplsvpn.h b/bgpd/bgp_mplsvpn.h index 3299b9cb9a..f75b989057 100644 --- a/bgpd/bgp_mplsvpn.h +++ b/bgpd/bgp_mplsvpn.h @@ -24,9 +24,37 @@ Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA #define RD_TYPE_AS 0 #define RD_TYPE_IP 1 #define RD_TYPE_AS4 2 +#if ENABLE_BGP_VNC +#define RD_TYPE_VNC_ETH 0xff00 /* VNC L2VPN */ +#endif #define RD_ADDRSTRLEN 28 +typedef enum { + MPLS_LABEL_IPV4_EXPLICIT_NULL = 0, /* [RFC3032] */ + MPLS_LABEL_ROUTER_ALERT = 1, /* [RFC3032] */ + MPLS_LABEL_IPV6_EXPLICIT_NULL = 2, /* [RFC3032] */ + MPLS_LABEL_IMPLICIT_NULL = 3, /* [RFC3032] */ + MPLS_LABEL_UNASSIGNED4 = 4, + MPLS_LABEL_UNASSIGNED5 = 5, + MPLS_LABEL_UNASSIGNED6 = 6, + MPLS_LABEL_ELI = 7, /* Entropy Indicator [RFC6790] */ + MPLS_LABEL_UNASSIGNED8 = 8, + MPLS_LABEL_UNASSIGNED9 = 9, + MPLS_LABEL_UNASSIGNED10 = 10, + MPLS_LABEL_UNASSIGNED11 = 11, + MPLS_LABEL_GAL = 13, /* [RFC5586] */ + MPLS_LABEL_OAM_ALERT = 14, /* [RFC3429] */ + MPLS_LABEL_EXTENSION = 15 /* [RFC7274] */ +} mpls_special_label_t; + +#define MPLS_LABEL_IS_SPECIAL(label) \ + ((label) <= MPLS_LABEL_EXTENSION) +#define MPLS_LABEL_IS_NULL(label) \ + ((label) == MPLS_LABEL_IPV4_EXPLICIT_NULL || \ + (label) == MPLS_LABEL_IPV6_EXPLICIT_NULL || \ + (label) == MPLS_LABEL_IMPLICIT_NULL) + struct rd_as { u_int16_t type; @@ -41,9 +69,27 @@ struct rd_ip u_int16_t val; }; +#if ENABLE_BGP_VNC +struct rd_vnc_eth +{ + u_int16_t type; + uint8_t local_nve_id; + struct ethaddr macaddr; +}; +#endif + +extern u_int16_t decode_rd_type (u_char *); +extern void encode_rd_type (u_int16_t, u_char *); extern void bgp_mplsvpn_init (void); extern int bgp_nlri_parse_vpn (struct peer *, struct attr *, struct bgp_nlri *); extern u_int32_t decode_label (u_char *); +extern void encode_label(u_int32_t, u_char *); +extern void decode_rd_as (u_char *, struct rd_as *); +extern void decode_rd_as4 (u_char *, struct rd_as *); +extern void decode_rd_ip (u_char *, struct rd_ip *); +#if ENABLE_BGP_VNC +extern void decode_vnc_eth (u_char *, struct rd_vnc_eth *); +#endif extern int str2prefix_rd (const char *, struct prefix_rd *); extern int str2tag (const char *, u_char *); extern char *prefix_rd2str (struct prefix_rd *, char *, size_t); diff --git a/bgpd/bgp_nexthop.c b/bgpd/bgp_nexthop.c index f59971c1b8..6cd1721f96 100644 --- a/bgpd/bgp_nexthop.c +++ b/bgpd/bgp_nexthop.c @@ -569,16 +569,11 @@ bgp_scan_init (struct bgp *bgp) void bgp_scan_vty_init (void) { - install_element (ENABLE_NODE, &show_ip_bgp_nexthop_cmd); install_element (VIEW_NODE, &show_ip_bgp_nexthop_cmd); install_element (VIEW_NODE, &show_ip_bgp_nexthop_detail_cmd); - install_element (ENABLE_NODE, &show_ip_bgp_nexthop_detail_cmd); - install_element (ENABLE_NODE, &show_ip_bgp_instance_nexthop_cmd); - install_element (ENABLE_NODE, &show_ip_bgp_instance_all_nexthop_cmd); install_element (VIEW_NODE, &show_ip_bgp_instance_nexthop_cmd); install_element (VIEW_NODE, &show_ip_bgp_instance_all_nexthop_cmd); install_element (VIEW_NODE, &show_ip_bgp_instance_nexthop_detail_cmd); - install_element (ENABLE_NODE, &show_ip_bgp_instance_nexthop_detail_cmd); } void diff --git a/bgpd/bgp_nexthop.h b/bgpd/bgp_nexthop.h index 861da5740f..652a6813ee 100644 --- a/bgpd/bgp_nexthop.h +++ b/bgpd/bgp_nexthop.h @@ -34,6 +34,8 @@ Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA AF_UNSPEC)) \ ) +#define BGP_MP_NEXTHOP_FAMILY NEXTHOP_FAMILY + /* BGP nexthop cache value structure. */ struct bgp_nexthop_cache { diff --git a/bgpd/bgp_packet.c b/bgpd/bgp_packet.c index ae54cd43d3..6811513448 100644 --- a/bgpd/bgp_packet.c +++ b/bgpd/bgp_packet.c @@ -341,6 +341,7 @@ bgp_write (struct thread *thread) u_char type; struct stream *s; int num; + int update_last_write = 0; unsigned int count = 0; unsigned int oc = 0; @@ -432,6 +433,7 @@ bgp_write (struct thread *thread) /* OK we send packet so delete it. */ bgp_packet_delete (peer); + update_last_write = 1; } while (++count < peer->bgp->wpkt_quanta && (s = bgp_write_packet (peer)) != NULL); @@ -439,8 +441,12 @@ bgp_write (struct thread *thread) bgp_write_proceed_actions (peer); done: - /* Update the last write if some updates were written. */ + /* Update last_update if UPDATEs were written. */ if (peer->update_out > oc) + peer->last_update = bgp_clock (); + + /* If we TXed any flavor of packet update last_write */ + if (update_last_write) peer->last_write = bgp_clock (); sockopt_cork (peer->fd, 0); diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c index 9f3c9baae5..db73d379d4 100644 --- a/bgpd/bgp_route.c +++ b/bgpd/bgp_route.c @@ -61,6 +61,12 @@ Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA #include "bgpd/bgp_nht.h" #include "bgpd/bgp_updgrp.h" +#if ENABLE_BGP_VNC +#include "bgpd/rfapi/rfapi_backend.h" +#include "bgpd/rfapi/vnc_import_bgp.h" +#include "bgpd/rfapi/vnc_export_bgp.h" +#endif + /* Extern from bgp_dump.c */ extern const char *bgp_origin_str[]; extern const char *bgp_origin_long_str[]; @@ -131,6 +137,13 @@ bgp_info_extra_get (struct bgp_info *ri) return ri->extra; } +/* Allocate new bgp info structure. */ +struct bgp_info * +bgp_info_new (void) +{ + return XCALLOC (MTYPE_BGP_ROUTE, sizeof (struct bgp_info)); +} + /* Free bgp route information. */ static void bgp_info_free (struct bgp_info *binfo) @@ -227,7 +240,7 @@ bgp_info_delete (struct bgp_node *rn, struct bgp_info *ri) /* undo the effects of a previous call to bgp_info_delete; typically called when a route is deleted and then quickly re-added before the deletion has been processed */ -static void +void bgp_info_restore (struct bgp_node *rn, struct bgp_info *ri) { bgp_info_unset_flag (rn, ri, BGP_INFO_REMOVED); @@ -331,7 +344,7 @@ bgp_info_path_with_addpath_rx_str (struct bgp_info *ri, char *buf) static int bgp_info_cmp (struct bgp *bgp, struct bgp_info *new, struct bgp_info *exist, int *paths_eq, struct bgp_maxpaths_cfg *mpath_cfg, int debug, - char *pfx_buf) + const char *pfx_buf) { struct attr *newattr, *existattr; struct attr_extra *newattre, *existattre; @@ -846,6 +859,31 @@ bgp_info_cmp (struct bgp *bgp, struct bgp_info *new, struct bgp_info *exist, return 1; } +/* Compare two bgp route entity. Return -1 if new is preferred, 1 if exist + * is preferred, or 0 if they are the same (usually will only occur if + * multipath is enabled + * This version is compatible with */ +int +bgp_info_cmp_compatible (struct bgp *bgp, struct bgp_info *new, struct bgp_info *exist, + afi_t afi, safi_t safi) +{ + int paths_eq; + struct bgp_maxpaths_cfg mpath_cfg; + int ret; + ret = bgp_info_cmp (bgp, new, exist, &paths_eq, &mpath_cfg, 0, __func__); + + if (paths_eq) + ret = 0; + else + { + if (ret == 1) + ret = -1; + else + ret = 1; + } + return ret; +} + static enum filter_type bgp_input_filter (struct peer *peer, struct prefix *p, struct attr *attr, afi_t afi, safi_t safi) @@ -979,8 +1017,8 @@ bgp_input_modifier (struct peer *peer, struct prefix *p, struct attr *attr, filter = &peer->filter[afi][safi]; /* Apply default weight value. */ - if (peer->weight) - (bgp_attr_extra_get (attr))->weight = peer->weight; + if (peer->weight[afi][safi]) + (bgp_attr_extra_get (attr))->weight = peer->weight[afi][safi]; if (rmap_name) { @@ -1036,8 +1074,8 @@ bgp_output_modifier (struct peer *peer, struct prefix *p, struct attr *attr, filter = &peer->filter[afi][safi]; /* Apply default weight value. */ - if (peer->weight) - (bgp_attr_extra_get (attr))->weight = peer->weight; + if (peer->weight[afi][safi]) + (bgp_attr_extra_get (attr))->weight = peer->weight[afi][safi]; if (rmap_name) { @@ -1158,6 +1196,7 @@ subgroup_announce_check (struct bgp_info *ri, struct update_subgroup *subgrp, int reflect; afi_t afi; safi_t safi; + int samepeer_safe = 0; /* for synthetic mplsvpns routes */ if (DISABLE_BGP_ANNOUNCE) return 0; @@ -1174,6 +1213,22 @@ subgroup_announce_check (struct bgp_info *ri, struct update_subgroup *subgrp, bgp = SUBGRP_INST(subgrp); riattr = bgp_info_mpath_count (ri) ? bgp_info_mpath_attr (ri) : ri->attr; +#if ENABLE_BGP_VNC + if (((afi == AFI_IP) || (afi == AFI_IP6)) && (safi == SAFI_MPLS_VPN) && + ((ri->type == ZEBRA_ROUTE_BGP_DIRECT) || + (ri->type == ZEBRA_ROUTE_BGP_DIRECT_EXT))) { + + /* + * direct and direct_ext type routes originate internally even + * though they can have peer pointers that reference other systems + */ + char buf[BUFSIZ]; + prefix2str(p, buf, BUFSIZ); + zlog_debug("%s: pfx %s bgp_direct->vpn route peer safe", __func__, buf); + samepeer_safe = 1; + } +#endif + /* With addpath we may be asked to TX all kinds of paths so make sure * ri is valid */ if (!CHECK_FLAG (ri->flags, BGP_INFO_VALID) || @@ -1310,7 +1365,7 @@ subgroup_announce_check (struct bgp_info *ri, struct update_subgroup *subgrp, reflect = 0; /* IBGP reflection check. */ - if (reflect) + if (reflect && !samepeer_safe) { /* A route from a Client peer. */ if (CHECK_FLAG (from->af_flags[afi][safi], PEER_FLAG_REFLECTOR_CLIENT)) @@ -1868,8 +1923,13 @@ bgp_process_main (struct work_queue *wq, void *data) !bgp->addpath_tx_used[afi][safi]) { if (bgp_zebra_has_route_changed (rn, old_select)) + { +#if ENABLE_BGP_VNC + vnc_import_bgp_add_route(bgp, p, old_select); + vnc_import_bgp_exterior_add_route(bgp, p, old_select); +#endif bgp_zebra_announce (p, old_select, bgp, afi, safi); - + } UNSET_FLAG (old_select->flags, BGP_INFO_MULTIPATH_CHG); bgp_zebra_clear_route_change_flags (rn); UNSET_FLAG (rn->flags, BGP_NODE_PROCESS_SCHEDULED); @@ -1902,6 +1962,21 @@ bgp_process_main (struct work_queue *wq, void *data) UNSET_FLAG (new_select->flags, BGP_INFO_MULTIPATH_CHG); } +#if ENABLE_BGP_VNC + if ((afi == AFI_IP || afi == AFI_IP6) && (safi == SAFI_UNICAST)) { + if (old_select != new_select) { + if (old_select) { + vnc_import_bgp_exterior_del_route(bgp, p, old_select); + vnc_import_bgp_del_route(bgp, p, old_select); + } + if (new_select) { + vnc_import_bgp_exterior_add_route(bgp, p, new_select); + vnc_import_bgp_add_route(bgp, p, new_select); + } + } + } +#endif + group_announce_route(bgp, afi, safi, rn, new_select); /* FIB update. */ @@ -2135,7 +2210,7 @@ bgp_rib_remove (struct bgp_node *rn, struct bgp_info *ri, struct peer *peer, static void bgp_rib_withdraw (struct bgp_node *rn, struct bgp_info *ri, struct peer *peer, - afi_t afi, safi_t safi) + afi_t afi, safi_t safi, struct prefix_rd *prd) { int status = BGP_DAMP_NONE; @@ -2151,6 +2226,32 @@ bgp_rib_withdraw (struct bgp_node *rn, struct bgp_info *ri, struct peer *peer, return; } +#if ENABLE_BGP_VNC + if (safi == SAFI_MPLS_VPN) { + struct bgp_node *prn = NULL; + struct bgp_table *table = NULL; + + prn = bgp_node_get(peer->bgp->rib[afi][safi], (struct prefix *) prd); + if (prn->info) { + table = (struct bgp_table *)(prn->info); + + vnc_import_bgp_del_vnc_host_route_mode_resolve_nve( + peer->bgp, + prd, + table, + &rn->p, + ri); + } + bgp_unlock_node(prn); + } + if ((afi == AFI_IP || afi == AFI_IP6) && (safi == SAFI_UNICAST)) { + if (CHECK_FLAG (ri->flags, BGP_INFO_SELECTED)) { + + vnc_import_bgp_del_route(peer->bgp, &rn->p, ri); + vnc_import_bgp_exterior_del_route(peer->bgp, &rn->p, ri); + } + } +#endif bgp_rib_remove (rn, ri, peer, afi, safi); } @@ -2253,6 +2354,9 @@ bgp_update (struct peer *peer, struct prefix *p, u_int32_t addpath_id, char buf[SU_ADDRSTRLEN]; char buf2[30]; int connected = 0; +#if ENABLE_BGP_VNC + int vnc_implicit_withdraw = 0; +#endif bgp = peer->bgp; rn = bgp_afi_node_get (bgp->rib[afi][safi], afi, safi, p, prd); @@ -2442,6 +2546,35 @@ bgp_update (struct peer *peer, struct prefix *p, u_int32_t addpath_id, if (! CHECK_FLAG (ri->flags, BGP_INFO_HISTORY)) bgp_damp_withdraw (ri, rn, afi, safi, 1); } +#if ENABLE_BGP_VNC + if (safi == SAFI_MPLS_VPN) { + struct bgp_node *prn = NULL; + struct bgp_table *table = NULL; + + prn = bgp_node_get(bgp->rib[afi][safi], (struct prefix *) prd); + if (prn->info) { + table = (struct bgp_table *)(prn->info); + + vnc_import_bgp_del_vnc_host_route_mode_resolve_nve( + bgp, + prd, + table, + p, + ri); + } + bgp_unlock_node(prn); + } + if ((afi == AFI_IP || afi == AFI_IP6) && (safi == SAFI_UNICAST)) { + if (CHECK_FLAG (ri->flags, BGP_INFO_SELECTED)) { + /* + * Implicit withdraw case. + */ + ++vnc_implicit_withdraw; + vnc_import_bgp_del_route(bgp, p, ri); + vnc_import_bgp_exterior_del_route(bgp, p, ri); + } + } +#endif /* Update to new attribute. */ bgp_attr_unintern (&ri->attr); @@ -2451,6 +2584,25 @@ bgp_update (struct peer *peer, struct prefix *p, u_int32_t addpath_id, if (safi == SAFI_MPLS_VPN) memcpy ((bgp_info_extra_get (ri))->tag, tag, 3); +#if ENABLE_BGP_VNC + if ((afi == AFI_IP || afi == AFI_IP6) && (safi == SAFI_UNICAST)) + { + if (vnc_implicit_withdraw) + { + /* + * Add back the route with its new attributes (e.g., nexthop). + * The route is still selected, until the route selection + * queued by bgp_process actually runs. We have to make this + * update to the VNC side immediately to avoid racing against + * configuration changes (e.g., route-map changes) which + * trigger re-importation of the entire RIB. + */ + vnc_import_bgp_add_route(bgp, p, ri); + vnc_import_bgp_exterior_add_route(bgp, p, ri); + } + } +#endif + /* Update bgp route dampening information. */ if (CHECK_FLAG (bgp->af_flags[afi][safi], BGP_CONFIG_DAMPENING) && peer->sort == BGP_PEER_EBGP) @@ -2490,6 +2642,28 @@ bgp_update (struct peer *peer, struct prefix *p, u_int32_t addpath_id, else bgp_info_set_flag (rn, ri, BGP_INFO_VALID); +#if ENABLE_BGP_VNC + if (safi == SAFI_MPLS_VPN) + { + struct bgp_node *prn = NULL; + struct bgp_table *table = NULL; + + prn = bgp_node_get(bgp->rib[afi][safi], (struct prefix *) prd); + if (prn->info) + { + table = (struct bgp_table *)(prn->info); + + vnc_import_bgp_add_vnc_host_route_mode_resolve_nve( + bgp, + prd, + table, + p, + ri); + } + bgp_unlock_node(prn); + } +#endif + /* Process change. */ bgp_aggregate_increment (bgp, p, ri, afi, safi); @@ -2560,6 +2734,28 @@ bgp_update (struct peer *peer, struct prefix *p, u_int32_t addpath_id, /* route_node_get lock */ bgp_unlock_node (rn); +#if ENABLE_BGP_VNC + if (safi == SAFI_MPLS_VPN) + { + struct bgp_node *prn = NULL; + struct bgp_table *table = NULL; + + prn = bgp_node_get(bgp->rib[afi][safi], (struct prefix *) prd); + if (prn->info) + { + table = (struct bgp_table *)(prn->info); + + vnc_import_bgp_add_vnc_host_route_mode_resolve_nve( + bgp, + prd, + table, + p, + new); + } + bgp_unlock_node(prn); + } +#endif + /* If maximum prefix count is configured and current prefix count exeed it. */ if (bgp_maximum_prefix_overflow (peer, afi, safi, 0)) @@ -2652,7 +2848,7 @@ bgp_withdraw (struct peer *peer, struct prefix *p, u_int32_t addpath_id, /* Withdraw specified route from routing table. */ if (ri && ! CHECK_FLAG (ri->flags, BGP_INFO_HISTORY)) - bgp_rib_withdraw (rn, ri, peer, afi, safi); + bgp_rib_withdraw (rn, ri, peer, afi, safi, prd); else if (bgp_debug_update(peer, p, NULL, 1)) zlog_debug ("%s Can't find the route %s/%d", peer->host, inet_ntop (p->family, &p->u.prefix, buf, SU_ADDRSTRLEN), @@ -3046,6 +3242,10 @@ bgp_clear_route_all (struct peer *peer) for (afi = AFI_IP; afi < AFI_MAX; afi++) for (safi = SAFI_UNICAST; safi < SAFI_MAX; safi++) bgp_clear_route (peer, afi, safi); + +#if ENABLE_BGP_VNC + rfapiProcessPeerDown(peer); +#endif } void @@ -3116,9 +3316,15 @@ bgp_cleanup_table(struct bgp_table *table, safi_t safi) && ri->type == ZEBRA_ROUTE_BGP && (ri->sub_type == BGP_ROUTE_NORMAL || ri->sub_type == BGP_ROUTE_AGGREGATE)) + { +#if ENABLE_BGP_VNC + if (table->owner && table->owner->bgp) + vnc_import_bgp_del_route(table->owner->bgp, &rn->p, ri); +#endif bgp_zebra_withdraw (&rn->p, ri, safi); } } +} /* Delete all kernel routes. */ void @@ -3416,6 +3622,9 @@ bgp_static_update_main (struct bgp *bgp, struct prefix *p, struct attr attr; struct attr *attr_new; int ret; +#if ENABLE_BGP_VNC + int vnc_implicit_withdraw = 0; +#endif assert (bgp_static); if (!bgp_static) @@ -3488,9 +3697,34 @@ bgp_static_update_main (struct bgp *bgp, struct prefix *p, bgp_info_restore(rn, ri); else bgp_aggregate_decrement (bgp, p, ri, afi, safi); +#if ENABLE_BGP_VNC + if ((afi == AFI_IP || afi == AFI_IP6) && (safi == SAFI_UNICAST)) + { + if (CHECK_FLAG (ri->flags, BGP_INFO_SELECTED)) + { + /* + * Implicit withdraw case. + * We have to do this before ri is changed + */ + ++vnc_implicit_withdraw; + vnc_import_bgp_del_route(bgp, p, ri); + vnc_import_bgp_exterior_del_route(bgp, p, ri); + } + } +#endif bgp_attr_unintern (&ri->attr); ri->attr = attr_new; ri->uptime = bgp_clock (); +#if ENABLE_BGP_VNC + if ((afi == AFI_IP || afi == AFI_IP6) && (safi == SAFI_UNICAST)) + { + if (vnc_implicit_withdraw) + { + vnc_import_bgp_add_route(bgp, p, ri); + vnc_import_bgp_exterior_add_route(bgp, p, ri); + } + } +#endif /* Nexthop reachability check. */ if (bgp_flag_check (bgp, BGP_FLAG_IMPORT_CHECK)) @@ -3636,6 +3870,18 @@ bgp_static_withdraw_safi (struct bgp *bgp, struct prefix *p, afi_t afi, /* Withdraw static BGP route from routing table. */ if (ri) { +#if ENABLE_BGP_VNC + rfapiProcessWithdraw( + ri->peer, + NULL, + p, + prd, + ri->attr, + afi, + safi, + ri->type, + 1); /* Kill, since it is an administrative change */ +#endif bgp_aggregate_decrement (bgp, p, ri, afi, safi); bgp_info_delete (rn, ri); bgp_process (bgp, rn, afi, safi); @@ -3654,6 +3900,9 @@ bgp_static_update_safi (struct bgp *bgp, struct prefix *p, struct attr *attr_new; struct attr attr = { 0 }; struct bgp_info *ri; +#if ENABLE_BGP_VNC + u_int32_t label = 0; +#endif assert (bgp_static); @@ -3730,10 +3979,19 @@ bgp_static_update_safi (struct bgp *bgp, struct prefix *p, bgp_attr_unintern (&ri->attr); ri->attr = attr_new; ri->uptime = bgp_clock (); +#if ENABLE_BGP_VNC + if (ri->extra) + label = decode_label (ri->extra->tag); +#endif /* Process change. */ bgp_aggregate_increment (bgp, p, ri, afi, safi); bgp_process (bgp, rn, afi, safi); +#if ENABLE_BGP_VNC + rfapiProcessUpdate(ri->peer, NULL, p, &bgp_static->prd, + ri->attr, afi, safi, + ri->type, ri->sub_type, &label); +#endif bgp_unlock_node (rn); aspath_unintern (&attr.aspath); bgp_attr_extra_free (&attr); @@ -3748,6 +4006,9 @@ bgp_static_update_safi (struct bgp *bgp, struct prefix *p, SET_FLAG (new->flags, BGP_INFO_VALID); new->extra = bgp_info_extra_new(); memcpy (new->extra->tag, bgp_static->tag, 3); +#if ENABLE_BGP_VNC + label = decode_label (bgp_static->tag); +#endif /* Aggregate address increment. */ bgp_aggregate_increment (bgp, p, new, afi, safi); @@ -3761,6 +4022,12 @@ bgp_static_update_safi (struct bgp *bgp, struct prefix *p, /* Process change. */ bgp_process (bgp, rn, afi, safi); +#if ENABLE_BGP_VNC + rfapiProcessUpdate(new->peer, NULL, p, &bgp_static->prd, + new->attr, afi, safi, + new->type, new->sub_type, &label); +#endif + /* Unintern original. */ aspath_unintern (&attr.aspath); bgp_attr_extra_free (&attr); @@ -5404,7 +5671,7 @@ DEFUN (no_ipv6_aggregate_address_summary_only, void bgp_redistribute_add (struct bgp *bgp, struct prefix *p, const struct in_addr *nexthop, const struct in6_addr *nexthop6, unsigned int ifindex, - u_int32_t metric, u_char type, u_short instance, u_short tag) + u_int32_t metric, u_char type, u_short instance, route_tag_t tag) { struct bgp_info *new; struct bgp_info *bi; @@ -5773,7 +6040,7 @@ route_vty_out (struct vty *vty, struct prefix *p, vty_out(vty, "?"); } /* IPv4 Next Hop */ - else if (p->family == AF_INET || !BGP_ATTR_NEXTHOP_AFI_IP6(attr)) + else if (p->family == AF_INET && !BGP_ATTR_NEXTHOP_AFI_IP6(attr)) { if (json_paths) { @@ -5958,7 +6225,14 @@ route_vty_out (struct vty *vty, struct prefix *p, json_object_array_add(json_paths, json_path); } else + { vty_out (vty, "%s", VTY_NEWLINE); +#if ENABLE_BGP_VNC + /* prints an additional line, indented, with VNC info, if present */ + if ((safi == SAFI_MPLS_VPN) || (safi == SAFI_ENCAP) || (safi == SAFI_UNICAST)) + rfapi_vty_out_vncinfo(vty, p, binfo, safi); +#endif + } } /* called from terminal list command */ @@ -6762,7 +7036,7 @@ route_vty_out_detail (struct vty *vty, struct bgp *bgp, struct prefix *p, if (json_paths) json_object_int_add(json_path, "tag", attr->extra->tag); else - vty_out (vty, ", tag %d", attr->extra->tag); + vty_out (vty, ", tag %"ROUTE_TAG_PRI, attr->extra->tag); } if (! CHECK_FLAG (binfo->flags, BGP_INFO_VALID)) @@ -7159,6 +7433,7 @@ bgp_show_table (struct vty *vty, struct bgp_table *table, int header = 1; int display; unsigned long output_count; + unsigned long total_count; struct prefix *p; char buf[BUFSIZ]; char buf2[BUFSIZ]; @@ -7177,6 +7452,7 @@ bgp_show_table (struct vty *vty, struct bgp_table *table, /* This is first entry point, so reset total line. */ output_count = 0; + total_count = 0; /* Start processing of routes. */ for (rn = bgp_table_top (table); rn; rn = bgp_route_next (rn)) @@ -7191,6 +7467,7 @@ bgp_show_table (struct vty *vty, struct bgp_table *table, for (ri = rn->info; ri; ri = ri->next) { + total_count++; if (type == bgp_show_type_flap_statistics || type == bgp_show_type_flap_neighbor || type == bgp_show_type_dampend_paths @@ -7350,6 +7627,9 @@ bgp_show_table (struct vty *vty, struct bgp_table *table, if (use_json) { + /* This can produce a LOT of text so do not use + * JSON_C_TO_STRING_PRETTY here + */ json_object_object_add(json, "routes", json_routes); vty_out (vty, "%s%s", json_object_to_json_string(json), VTY_NEWLINE); json_object_free(json); @@ -7360,11 +7640,11 @@ bgp_show_table (struct vty *vty, struct bgp_table *table, if (output_count == 0) { if (type == bgp_show_type_normal) - vty_out (vty, "No BGP network exists%s", VTY_NEWLINE); + vty_out (vty, "No BGP prefixes displayed, %ld exist%s", total_count, VTY_NEWLINE); } else - vty_out (vty, "%sTotal number of prefixes %ld%s", - VTY_NEWLINE, output_count, VTY_NEWLINE); + vty_out (vty, "%sDisplayed %ld out of %ld total prefixes%s", + VTY_NEWLINE, output_count, total_count, VTY_NEWLINE); } return CMD_SUCCESS; @@ -7680,7 +7960,7 @@ bgp_show_route_in_table (struct vty *vty, struct bgp *bgp, if (display) json_object_object_add(json, "paths", json_paths); - vty_out (vty, "%s%s", json_object_to_json_string(json), VTY_NEWLINE); + vty_out (vty, "%s%s", json_object_to_json_string_ext(json, JSON_C_TO_STRING_PRETTY), VTY_NEWLINE); json_object_free(json); } else @@ -10258,17 +10538,16 @@ bgp_route_init (void) install_element (VIEW_NODE, &show_ip_bgp_ipv4_dampening_parameters_cmd); /* Restricted node: VIEW_NODE - (set of dangerous commands) */ - install_element (RESTRICTED_NODE, &show_ip_bgp_route_cmd); - install_element (ENABLE_NODE, &show_ip_bgp_instance_all_cmd); - install_element (ENABLE_NODE, &show_ip_bgp_ipv4_cmd); - install_element (ENABLE_NODE, &show_ip_bgp_route_cmd); + install_element (VIEW_NODE, &show_ip_bgp_instance_all_cmd); + install_element (VIEW_NODE, &show_ip_bgp_ipv4_cmd); + install_element (VIEW_NODE, &show_ip_bgp_route_cmd); - install_element (ENABLE_NODE, &show_ip_bgp_instance_neighbor_advertised_route_cmd); - install_element (ENABLE_NODE, &show_ip_bgp_neighbor_routes_cmd); - install_element (ENABLE_NODE, &show_ip_bgp_neighbor_received_prefix_filter_cmd); - install_element (ENABLE_NODE, &show_ip_bgp_dampening_params_cmd); - install_element (ENABLE_NODE, &show_ip_bgp_ipv4_dampening_parameters_cmd); + install_element (VIEW_NODE, &show_ip_bgp_instance_neighbor_advertised_route_cmd); + install_element (VIEW_NODE, &show_ip_bgp_neighbor_routes_cmd); + install_element (VIEW_NODE, &show_ip_bgp_neighbor_received_prefix_filter_cmd); + install_element (VIEW_NODE, &show_ip_bgp_dampening_params_cmd); + install_element (VIEW_NODE, &show_ip_bgp_ipv4_dampening_parameters_cmd); /* BGP dampening clear commands */ install_element (ENABLE_NODE, &clear_ip_bgp_dampening_cmd); diff --git a/bgpd/bgp_route.h b/bgpd/bgp_route.h index 60c406775d..0dce5da572 100644 --- a/bgpd/bgp_route.h +++ b/bgpd/bgp_route.h @@ -49,6 +49,30 @@ struct bgp_info_extra /* MPLS label. */ u_char tag[3]; + +#if ENABLE_BGP_VNC + union { + + struct { + void *rfapi_handle; /* export: NVE advertising this route */ + struct list *local_nexthops; /* optional, for static routes */ + } export; + + struct { + void *timer; + void *hme; /* encap monitor, if this is a VPN route */ + struct prefix_rd rd; /* import: route's route-distinguisher */ + u_char un_family; /* family of cached un address, 0 if unset */ + union { + struct in_addr addr4; + struct in6_addr addr6; + } un; /* cached un address */ + time_t create_time; + struct prefix aux_prefix; /* AFI_ETHER: the IP addr, if family set */ + } import; + + } vnc; +#endif }; struct bgp_info @@ -111,6 +135,9 @@ struct bgp_info #define BGP_ROUTE_STATIC 1 #define BGP_ROUTE_AGGREGATE 2 #define BGP_ROUTE_REDISTRIBUTE 3 +#ifdef ENABLE_BGP_VNC +# define BGP_ROUTE_RFP 4 +#endif u_short instance; @@ -244,7 +271,7 @@ extern int bgp_maximum_prefix_overflow (struct peer *, afi_t, safi_t, int); extern void bgp_redistribute_add (struct bgp *, struct prefix *, const struct in_addr *, const struct in6_addr *, unsigned int ifindex, - u_int32_t, u_char, u_short, u_short); + u_int32_t, u_char, u_short, route_tag_t); extern void bgp_redistribute_delete (struct bgp *, struct prefix *, u_char, u_short); extern void bgp_redistribute_withdraw (struct bgp *, afi_t, int, u_short); @@ -309,4 +336,14 @@ extern int subgroup_announce_check(struct bgp_info *ri, extern void bgp_peer_clear_node_queue_drain_immediate (struct peer *peer); extern void bgp_process_queues_drain_immediate (void); +/* for encap/vpn */ +extern struct bgp_node * +bgp_afi_node_get (struct bgp_table *, afi_t , safi_t , struct prefix *, + struct prefix_rd *); +extern struct bgp_info *bgp_info_new (void); +extern void bgp_info_restore (struct bgp_node *, struct bgp_info *); + +extern int bgp_info_cmp_compatible (struct bgp *, struct bgp_info *, + struct bgp_info *, afi_t, safi_t ); + #endif /* _QUAGGA_BGP_ROUTE_H */ diff --git a/bgpd/bgp_routemap.c b/bgpd/bgp_routemap.c index 33a29e8f4c..028158e520 100644 --- a/bgpd/bgp_routemap.c +++ b/bgpd/bgp_routemap.c @@ -59,6 +59,9 @@ Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA #include "bgpd/bgp_vty.h" #include "bgpd/bgp_debug.h" +#if ENABLE_BGP_VNC +# include "bgpd/rfapi/bgp_rfapi_cfg.h" +#endif /* Memo of route-map commands. @@ -108,7 +111,7 @@ o Local extensions set ipv6 next-hop local : Done set as-path exclude : Done -*/ +*/ /* generic value manipulation to be shared in multiple rules */ @@ -330,7 +333,7 @@ struct route_map_rule_cmd route_match_peer_cmd = /* Match function should return 1 if match is success else return zero. */ static route_map_result_t -route_match_ip_address (void *rule, struct prefix *prefix, +route_match_ip_address (void *rule, struct prefix *prefix, route_map_object_t type, void *object) { struct access_list *alist; @@ -341,7 +344,7 @@ route_match_ip_address (void *rule, struct prefix *prefix, alist = access_list_lookup (AFI_IP, (char *) rule); if (alist == NULL) return RMAP_NOMATCH; - + return (access_list_apply (alist, prefix) == FILTER_DENY ? RMAP_NOMATCH : RMAP_MATCH); } @@ -376,7 +379,7 @@ struct route_map_rule_cmd route_match_ip_address_cmd = /* Match function return 1 if match is success else return zero. */ static route_map_result_t -route_match_ip_next_hop (void *rule, struct prefix *prefix, +route_match_ip_next_hop (void *rule, struct prefix *prefix, route_map_object_t type, void *object) { struct access_list *alist; @@ -428,7 +431,7 @@ struct route_map_rule_cmd route_match_ip_next_hop_cmd = /* Match function return 1 if match is success else return zero. */ static route_map_result_t -route_match_ip_route_source (void *rule, struct prefix *prefix, +route_match_ip_route_source (void *rule, struct prefix *prefix, route_map_object_t type, void *object) { struct access_list *alist; @@ -485,7 +488,7 @@ struct route_map_rule_cmd route_match_ip_route_source_cmd = /* `match ip address prefix-list PREFIX_LIST' */ static route_map_result_t -route_match_ip_address_prefix_list (void *rule, struct prefix *prefix, +route_match_ip_address_prefix_list (void *rule, struct prefix *prefix, route_map_object_t type, void *object) { struct prefix_list *plist; @@ -495,7 +498,7 @@ route_match_ip_address_prefix_list (void *rule, struct prefix *prefix, plist = prefix_list_lookup (AFI_IP, (char *) rule); if (plist == NULL) return RMAP_NOMATCH; - + return (prefix_list_apply (plist, prefix) == PREFIX_DENY ? RMAP_NOMATCH : RMAP_MATCH); } @@ -692,7 +695,7 @@ struct route_map_rule_cmd route_match_local_pref_cmd = /* Match function return 1 if match is success else return zero. */ static route_map_result_t -route_match_metric (void *rule, struct prefix *prefix, +route_match_metric (void *rule, struct prefix *prefix, route_map_object_t type, void *object) { struct rmap_value *rv; @@ -720,10 +723,10 @@ struct route_map_rule_cmd route_match_metric_cmd = /* Match function for as-path match. I assume given object is */ static route_map_result_t -route_match_aspath (void *rule, struct prefix *prefix, +route_match_aspath (void *rule, struct prefix *prefix, route_map_object_t type, void *object) { - + struct as_list *as_list; struct bgp_info *bgp_info; @@ -756,7 +759,7 @@ route_match_aspath_free (void *rule) } /* Route map commands for aspath matching. */ -struct route_map_rule_cmd route_match_aspath_cmd = +struct route_map_rule_cmd route_match_aspath_cmd = { "as-path", route_match_aspath, @@ -773,14 +776,14 @@ struct rmap_community /* Match function for community match. */ static route_map_result_t -route_match_community (void *rule, struct prefix *prefix, +route_match_community (void *rule, struct prefix *prefix, route_map_object_t type, void *object) { struct community_list *list; struct bgp_info *bgp_info; struct rmap_community *rcom; - if (type == RMAP_BGP) + if (type == RMAP_BGP) { bgp_info = object; rcom = rule; @@ -835,12 +838,12 @@ route_match_community_free (void *rule) { struct rmap_community *rcom = rule; - XFREE (MTYPE_ROUTE_MAP_COMPILED, rcom->name); + XFREE (MTYPE_ROUTE_MAP_COMPILED, rcom->name); XFREE (MTYPE_ROUTE_MAP_COMPILED, rcom); } /* Route map commands for community matching. */ -struct route_map_rule_cmd route_match_community_cmd = +struct route_map_rule_cmd route_match_community_cmd = { "community", route_match_community, @@ -850,19 +853,19 @@ struct route_map_rule_cmd route_match_community_cmd = /* Match function for extcommunity match. */ static route_map_result_t -route_match_ecommunity (void *rule, struct prefix *prefix, +route_match_ecommunity (void *rule, struct prefix *prefix, route_map_object_t type, void *object) { struct community_list *list; struct bgp_info *bgp_info; - if (type == RMAP_BGP) + if (type == RMAP_BGP) { bgp_info = object; - + if (!bgp_info->attr->extra) return RMAP_NOMATCH; - + list = community_list_lookup (bgp_clist, (char *) rule, EXTCOMMUNITY_LIST_MASTER); if (! list) @@ -889,7 +892,7 @@ route_match_ecommunity_free (void *rule) } /* Route map commands for community matching. */ -struct route_map_rule_cmd route_match_ecommunity_cmd = +struct route_map_rule_cmd route_match_ecommunity_cmd = { "extcommunity", route_match_ecommunity, @@ -902,7 +905,7 @@ struct route_map_rule_cmd route_match_ecommunity_cmd = /* `match origin' */ static route_map_result_t -route_match_origin (void *rule, struct prefix *prefix, +route_match_origin (void *rule, struct prefix *prefix, route_map_object_t type, void *object) { u_char *origin; @@ -912,7 +915,7 @@ route_match_origin (void *rule, struct prefix *prefix, { origin = rule; bgp_info = object; - + if (bgp_info->attr->origin == *origin) return RMAP_MATCH; } @@ -1068,7 +1071,7 @@ static route_map_result_t route_match_tag (void *rule, struct prefix *prefix, route_map_object_t type, void *object) { - u_short *tag; + route_tag_t *tag; struct bgp_info *bgp_info; if (type == RMAP_BGP) @@ -1086,46 +1089,13 @@ route_match_tag (void *rule, struct prefix *prefix, } -/* Route map `match tag' match statement. `arg' is TAG value */ -static void * -route_match_tag_compile (const char *arg) -{ - u_short *tag; - u_short tmp; - - /* tag value shoud be integer. */ - if (! all_digit (arg)) - return NULL; - - tmp = atoi(arg); - if (tmp < 1) - return NULL; - - tag = XMALLOC (MTYPE_ROUTE_MAP_COMPILED, sizeof (u_short)); - - if (!tag) - return tag; - - *tag = tmp; - - return tag; -} - - -/* Free route map's compiled 'match tag' value. */ -static void -route_match_tag_free (void *rule) -{ - XFREE (MTYPE_ROUTE_MAP_COMPILED, rule); -} - /* Route map commands for tag matching. */ -struct route_map_rule_cmd route_match_tag_cmd = +static struct route_map_rule_cmd route_match_tag_cmd = { "tag", route_match_tag, - route_match_tag_compile, - route_match_tag_free, + route_map_rule_tag_compile, + route_map_rule_tag_free, }; @@ -1159,7 +1129,7 @@ route_set_ip_nexthop (void *rule, struct prefix *prefix, { if ((CHECK_FLAG (peer->rmap_type, PEER_RMAP_TYPE_IN) || CHECK_FLAG (peer->rmap_type, PEER_RMAP_TYPE_IMPORT)) - && peer->su_remote + && peer->su_remote && sockunion_family (peer->su_remote) == AF_INET) { bgp_info->attr->nexthop.s_addr = sockunion2ip (peer->su_remote); @@ -1233,7 +1203,7 @@ route_set_ip_nexthop_free (void *rule) if (rins->address) XFREE (MTYPE_ROUTE_MAP_COMPILED, rins->address); - + XFREE (MTYPE_ROUTE_MAP_COMPILED, rins); } @@ -1262,8 +1232,8 @@ route_set_local_pref (void *rule, struct prefix *prefix, /* Fetch routemap's rule information. */ rv = rule; bgp_info = object; - - /* Set local preference value. */ + + /* Set local preference value. */ if (bgp_info->attr->flag & ATTR_FLAG_BIT (BGP_ATTR_LOCAL_PREF)) locpref = bgp_info->attr->local_pref; @@ -1275,7 +1245,7 @@ route_set_local_pref (void *rule, struct prefix *prefix, } /* Set local preference rule structure. */ -struct route_map_rule_cmd route_set_local_pref_cmd = +struct route_map_rule_cmd route_set_local_pref_cmd = { "local-preference", route_set_local_pref, @@ -1299,8 +1269,8 @@ route_set_weight (void *rule, struct prefix *prefix, route_map_object_t type, /* Fetch routemap's rule information. */ rv = rule; bgp_info = object; - - /* Set weight value. */ + + /* Set weight value. */ weight = route_value_adjust(rv, 0, bgp_info->peer); if (weight) (bgp_attr_extra_get (bgp_info->attr))->weight = weight; @@ -1312,7 +1282,7 @@ route_set_weight (void *rule, struct prefix *prefix, route_map_object_t type, } /* Set local preference rule structure. */ -struct route_map_rule_cmd route_set_weight_cmd = +struct route_map_rule_cmd route_set_weight_cmd = { "weight", route_set_weight, @@ -1324,7 +1294,7 @@ struct route_map_rule_cmd route_set_weight_cmd = /* Set metric to attribute. */ static route_map_result_t -route_set_metric (void *rule, struct prefix *prefix, +route_set_metric (void *rule, struct prefix *prefix, route_map_object_t type, void *object) { struct rmap_value *rv; @@ -1347,7 +1317,7 @@ route_set_metric (void *rule, struct prefix *prefix, } /* Set metric rule structure. */ -struct route_map_rule_cmd route_set_metric_cmd = +struct route_map_rule_cmd route_set_metric_cmd = { "metric", route_set_metric, @@ -1368,7 +1338,7 @@ route_set_aspath_prepend (void *rule, struct prefix *prefix, route_map_object_t if (type == RMAP_BGP) { binfo = object; - + if (binfo->attr->aspath->refcnt) new = aspath_dup (binfo->attr->aspath); else @@ -1412,7 +1382,7 @@ route_set_aspath_prepend_free (void *rule) /* Set as-path prepend rule structure. */ -struct route_map_rule_cmd route_set_aspath_prepend_cmd = +struct route_map_rule_cmd route_set_aspath_prepend_cmd = { "as-path prepend", route_set_aspath_prepend, @@ -1446,7 +1416,7 @@ route_set_aspath_exclude (void *rule, struct prefix *dummy, route_map_object_t t } /* Set ASn exlude rule structure. */ -struct route_map_rule_cmd route_set_aspath_exclude_cmd = +struct route_map_rule_cmd route_set_aspath_exclude_cmd = { "as-path exclude", route_set_aspath_exclude, @@ -1473,7 +1443,7 @@ route_set_community (void *rule, struct prefix *prefix, struct community *new = NULL; struct community *old; struct community *merge; - + if (type == RMAP_BGP) { rcs = rule; @@ -1496,8 +1466,8 @@ route_set_community (void *rule, struct prefix *prefix, if (rcs->additive && old) { merge = community_merge (community_dup (old), rcs->com); - - /* HACK: if the old community is not intern'd, + + /* HACK: if the old community is not intern'd, * we should free it here, or all reference to it may be lost. * Really need to cleanup attribute caching sometime. */ @@ -1508,7 +1478,7 @@ route_set_community (void *rule, struct prefix *prefix, } else new = community_dup (rcs->com); - + /* will be interned by caller if required */ attr->community = new; @@ -1527,7 +1497,7 @@ route_set_community_compile (const char *arg) char *sp; int additive = 0; int none = 0; - + if (strcmp (arg, "none") == 0) none = 1; else @@ -1549,12 +1519,12 @@ route_set_community_compile (const char *arg) if (! com) return NULL; } - + rcs = XCALLOC (MTYPE_ROUTE_MAP_COMPILED, sizeof (struct rmap_com_set)); rcs->com = com; rcs->additive = additive; rcs->none = none; - + return rcs; } @@ -1570,7 +1540,7 @@ route_set_community_free (void *rule) } /* Set community rule structure. */ -struct route_map_rule_cmd route_set_community_cmd = +struct route_map_rule_cmd route_set_community_cmd = { "community", route_set_community, @@ -1683,10 +1653,10 @@ route_set_ecommunity (void *rule, struct prefix *prefix, { ecom = rule; bgp_info = object; - + if (! ecom) return RMAP_OKAY; - + /* We assume additive for Extended Community. */ old_ecom = (bgp_attr_extra_get (bgp_info->attr))->ecommunity; @@ -1731,7 +1701,7 @@ route_set_ecommunity_free (void *rule) } /* Set community rule structure. */ -struct route_map_rule_cmd route_set_ecommunity_rt_cmd = +struct route_map_rule_cmd route_set_ecommunity_rt_cmd = { "extcommunity rt", route_set_ecommunity, @@ -1750,12 +1720,12 @@ route_set_ecommunity_soo_compile (const char *arg) ecom = ecommunity_str2com (arg, ECOMMUNITY_SITE_ORIGIN, 0); if (! ecom) return NULL; - + return ecommunity_intern (ecom); } /* Set community rule structure. */ -struct route_map_rule_cmd route_set_ecommunity_soo_cmd = +struct route_map_rule_cmd route_set_ecommunity_soo_cmd = { "extcommunity soo", route_set_ecommunity, @@ -1776,7 +1746,7 @@ route_set_origin (void *rule, struct prefix *prefix, route_map_object_t type, vo { origin = rule; bgp_info = object; - + bgp_info->attr->origin = *origin; } @@ -1809,7 +1779,7 @@ route_set_origin_free (void *rule) } /* Set origin rule structure. */ -struct route_map_rule_cmd route_set_origin_cmd = +struct route_map_rule_cmd route_set_origin_cmd = { "origin", route_set_origin, @@ -1850,7 +1820,7 @@ route_set_atomic_aggregate_free (void *rule) } /* Set atomic aggregate rule structure. */ -struct route_map_rule_cmd route_set_atomic_aggregate_cmd = +struct route_map_rule_cmd route_set_atomic_aggregate_cmd = { "atomic-aggregate", route_set_atomic_aggregate, @@ -1866,7 +1836,7 @@ struct aggregator }; static route_map_result_t -route_set_aggregator_as (void *rule, struct prefix *prefix, +route_set_aggregator_as (void *rule, struct prefix *prefix, route_map_object_t type, void *object) { struct bgp_info *bgp_info; @@ -1878,7 +1848,7 @@ route_set_aggregator_as (void *rule, struct prefix *prefix, bgp_info = object; aggregator = rule; ae = bgp_attr_extra_get (bgp_info->attr); - + ae->aggregator_as = aggregator->as; ae->aggregator_addr = aggregator->address; bgp_info->attr->flag |= ATTR_FLAG_BIT (BGP_ATTR_AGGREGATOR); @@ -1909,7 +1879,7 @@ route_set_aggregator_as_free (void *rule) XFREE (MTYPE_ROUTE_MAP_COMPILED, rule); } -struct route_map_rule_cmd route_set_aggregator_as_cmd = +struct route_map_rule_cmd route_set_aggregator_as_cmd = { "aggregator as", route_set_aggregator_as, @@ -1922,7 +1892,7 @@ static route_map_result_t route_set_tag (void *rule, struct prefix *prefix, route_map_object_t type, void *object) { - u_short *tag; + route_tag_t *tag; struct bgp_info *bgp_info; struct attr_extra *ae; @@ -1940,47 +1910,13 @@ route_set_tag (void *rule, struct prefix *prefix, return RMAP_OKAY; } -/* Route map `tag' compile function. Given string is converted to u_short. */ -static void * -route_set_tag_compile (const char *arg) -{ - u_short *tag; - u_short tmp; - - /* tag value shoud be integer. */ - if (! all_digit (arg)) - return NULL; - - tmp = atoi(arg); - - if (tmp < 1) - return NULL; - - tag = XMALLOC (MTYPE_ROUTE_MAP_COMPILED, sizeof (u_short)); - - if (!tag) - return tag; - - *tag = tmp; - - return tag; -} - -/* Free route map's tag value. */ -static void -route_set_tag_free (void *rule) -{ - XFREE (MTYPE_ROUTE_MAP_COMPILED, rule); -} - - /* Route map commands for tag set. */ -struct route_map_rule_cmd route_set_tag_cmd = +static struct route_map_rule_cmd route_set_tag_cmd = { "tag", route_set_tag, - route_set_tag_compile, - route_set_tag_free, + route_map_rule_tag_compile, + route_map_rule_tag_free, }; @@ -1988,7 +1924,7 @@ struct route_map_rule_cmd route_set_tag_cmd = /* `match ipv6 address IP_ACCESS_LIST' */ static route_map_result_t -route_match_ipv6_address (void *rule, struct prefix *prefix, +route_match_ipv6_address (void *rule, struct prefix *prefix, route_map_object_t type, void *object) { struct access_list *alist; @@ -1998,7 +1934,7 @@ route_match_ipv6_address (void *rule, struct prefix *prefix, alist = access_list_lookup (AFI_IP6, (char *) rule); if (alist == NULL) return RMAP_NOMATCH; - + return (access_list_apply (alist, prefix) == FILTER_DENY ? RMAP_NOMATCH : RMAP_MATCH); } @@ -2029,7 +1965,7 @@ struct route_map_rule_cmd route_match_ipv6_address_cmd = /* `match ipv6 next-hop IP_ADDRESS' */ static route_map_result_t -route_match_ipv6_next_hop (void *rule, struct prefix *prefix, +route_match_ipv6_next_hop (void *rule, struct prefix *prefix, route_map_object_t type, void *object) { struct in6_addr *addr = rule; @@ -2038,10 +1974,10 @@ route_match_ipv6_next_hop (void *rule, struct prefix *prefix, if (type == RMAP_BGP) { bgp_info = object; - + if (!bgp_info->attr->extra) return RMAP_NOMATCH; - + if (IPV6_ADDR_SAME (&bgp_info->attr->extra->mp_nexthop_global, addr)) return RMAP_MATCH; @@ -2090,7 +2026,7 @@ struct route_map_rule_cmd route_match_ipv6_next_hop_cmd = /* `match ipv6 address prefix-list PREFIX_LIST' */ static route_map_result_t -route_match_ipv6_address_prefix_list (void *rule, struct prefix *prefix, +route_match_ipv6_address_prefix_list (void *rule, struct prefix *prefix, route_map_object_t type, void *object) { struct prefix_list *plist; @@ -2100,7 +2036,7 @@ route_match_ipv6_address_prefix_list (void *rule, struct prefix *prefix, plist = prefix_list_lookup (AFI_IP6, (char *) rule); if (plist == NULL) return RMAP_NOMATCH; - + return (prefix_list_apply (plist, prefix) == PREFIX_DENY ? RMAP_NOMATCH : RMAP_MATCH); } @@ -2131,7 +2067,7 @@ struct route_map_rule_cmd route_match_ipv6_address_prefix_list_cmd = /* Set nexthop to object. ojbect must be pointer to struct attr. */ static route_map_result_t -route_set_ipv6_nexthop_global (void *rule, struct prefix *prefix, +route_set_ipv6_nexthop_global (void *rule, struct prefix *prefix, route_map_object_t type, void *object) { struct in6_addr *address; @@ -2142,8 +2078,8 @@ route_set_ipv6_nexthop_global (void *rule, struct prefix *prefix, /* Fetch routemap's rule information. */ address = rule; bgp_info = object; - - /* Set next hop value. */ + + /* Set next hop value. */ (bgp_attr_extra_get (bgp_info->attr))->mp_nexthop_global = *address; /* Set nexthop length. */ @@ -2259,7 +2195,7 @@ struct route_map_rule_cmd route_set_ipv6_nexthop_prefer_global_cmd = /* Set nexthop to object. ojbect must be pointer to struct attr. */ static route_map_result_t -route_set_ipv6_nexthop_local (void *rule, struct prefix *prefix, +route_set_ipv6_nexthop_local (void *rule, struct prefix *prefix, route_map_object_t type, void *object) { struct in6_addr *address; @@ -2270,10 +2206,10 @@ route_set_ipv6_nexthop_local (void *rule, struct prefix *prefix, /* Fetch routemap's rule information. */ address = rule; bgp_info = object; - - /* Set next hop value. */ + + /* Set next hop value. */ (bgp_attr_extra_get (bgp_info->attr))->mp_nexthop_local = *address; - + /* Set nexthop length. */ if (bgp_info->attr->extra->mp_nexthop_len != BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL) bgp_info->attr->extra->mp_nexthop_len = BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL; @@ -2418,7 +2354,7 @@ struct route_map_rule_cmd route_set_ipv6_nexthop_peer_cmd = /* `set vpnv4 nexthop A.B.C.D' */ static route_map_result_t -route_set_vpnv4_nexthop (void *rule, struct prefix *prefix, +route_set_vpnv4_nexthop (void *rule, struct prefix *prefix, route_map_object_t type, void *object) { struct in_addr *address; @@ -2429,8 +2365,8 @@ route_set_vpnv4_nexthop (void *rule, struct prefix *prefix, /* Fetch routemap's rule information. */ address = rule; bgp_info = object; - - /* Set next hop value. */ + + /* Set next hop value. */ (bgp_attr_extra_get (bgp_info->attr))->mp_nexthop_global_in = *address; (bgp_attr_extra_get (bgp_info->attr))->mp_nexthop_len = 4; } @@ -2481,11 +2417,11 @@ route_set_originator_id (void *rule, struct prefix *prefix, route_map_object_t t struct in_addr *address; struct bgp_info *bgp_info; - if (type == RMAP_BGP) + if (type == RMAP_BGP) { address = rule; bgp_info = object; - + bgp_info->attr->flag |= ATTR_FLAG_BIT (BGP_ATTR_ORIGINATOR_ID); (bgp_attr_extra_get (bgp_info->attr))->originator_id = *address; } @@ -2521,7 +2457,7 @@ route_set_originator_id_free (void *rule) } /* Set originator-id rule structure. */ -struct route_map_rule_cmd route_set_originator_id_cmd = +struct route_map_rule_cmd route_set_originator_id_cmd = { "originator-id", route_set_originator_id, @@ -2879,6 +2815,10 @@ bgp_route_map_process_update_cb (char *rmap_name) for (ALL_LIST_ELEMENTS (bm->bgp, node, nnode, bgp)) bgp_route_map_process_update(bgp, rmap_name, 1); +#if ENABLE_BGP_VNC + zlog_debug("%s: calling vnc_routemap_update", __func__); + vnc_routemap_update(bgp, __func__); +#endif return 0; } @@ -2915,6 +2855,10 @@ bgp_route_map_mark_update (const char *rmap_name) { for (ALL_LIST_ELEMENTS (bm->bgp, node, nnode, bgp)) bgp_route_map_process_update(bgp, rmap_name, 0); + #if ENABLE_BGP_VNC + zlog_debug("%s: calling vnc_routemap_update", __func__); + vnc_routemap_update(bgp, __func__); +#endif } } } @@ -3718,7 +3662,7 @@ DEFUN (set_aggregator_as, int ret; struct in_addr address; char *argstr; - + ret = inet_aton (argv[idx_ipv4]->arg, &address); if (ret == 0) { @@ -3757,7 +3701,7 @@ DEFUN (no_set_aggregator_as, if (argc <= idx_asn) return generic_set_delete (vty, vty->index, "aggregator as", NULL); - + ret = inet_aton (argv[idx_ip]->arg, &address); if (ret == 0) { @@ -3777,7 +3721,6 @@ DEFUN (no_set_aggregator_as, return ret; } - #ifdef HAVE_IPV6 DEFUN (match_ipv6_next_hop, match_ipv6_next_hop_cmd, diff --git a/bgpd/bgp_updgrp.c b/bgpd/bgp_updgrp.c index 29e6243a1a..77b979e56e 100644 --- a/bgpd/bgp_updgrp.c +++ b/bgpd/bgp_updgrp.c @@ -140,7 +140,6 @@ conf_copy (struct peer *dst, struct peer *src, afi_t afi, safi_t safi) dst->bgp = src->bgp; dst->sort = src->sort; dst->as = src->as; - dst->weight = src->weight; dst->v_routeadv = src->v_routeadv; dst->flags = src->flags; dst->af_flags[afi][safi] = src->af_flags[afi][safi]; diff --git a/bgpd/bgp_vnc_types.h b/bgpd/bgp_vnc_types.h new file mode 100644 index 0000000000..8bc9cb6407 --- /dev/null +++ b/bgpd/bgp_vnc_types.h @@ -0,0 +1,41 @@ +/* + * Copyright 2015-2016, LabN Consulting, L.L.C. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#ifndef _QUAGGA_BGP_VNC_TYPES_H +#define _QUAGGA_BGP_VNC_TYPES_H + +#if ENABLE_BGP_VNC +typedef enum { + BGP_VNC_SUBTLV_TYPE_LIFETIME=1, + BGP_VNC_SUBTLV_TYPE_RFPOPTION=2, /* deprecated */ +} bgp_vnc_subtlv_types; + +/* + * VNC Attribute subtlvs + */ +struct bgp_vnc_subtlv_lifetime { + uint32_t lifetime; +}; + +struct bgp_vnc_subtlv_unaddr { + struct prefix un_address; /* IPv4 or IPv6; pfx length ignored */ +}; + +#endif /* ENABLE_BGP_VNC */ +#endif /* _QUAGGA_BGP_VNC_TYPES_H */ diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c index b5e2d9e36f..0f8d0c76dd 100644 --- a/bgpd/bgp_vty.c +++ b/bgpd/bgp_vty.c @@ -702,9 +702,9 @@ DEFUN (router_bgp, { name = argv[idx_vrf]->arg; - if (!strcmp(argv[idx_view_vrf]->text, "vrf")) + if (!strcmp(argv[idx_view_vrf]->text, "vrf")) inst_type = BGP_INSTANCE_TYPE_VRF; - else if (!strcmp(argv[idx_view_vrf]->text, "view")) + else if (!strcmp(argv[idx_view_vrf]->text, "view")) inst_type = BGP_INSTANCE_TYPE_VIEW; } @@ -1999,7 +1999,7 @@ DEFUN (bgp_bestpath_med, { int idx_med_knob = 3; struct bgp *bgp; - + bgp = vty->index; if (strncmp (argv[idx_med_knob]->arg, "confed", 1) == 0) @@ -2024,7 +2024,7 @@ DEFUN (bgp_bestpath_med2, "Compare MED among confederation paths\n") { struct bgp *bgp; - + bgp = vty->index; bgp_flag_set (bgp, BGP_FLAG_MED_CONFED); bgp_flag_set (bgp, BGP_FLAG_MED_MISSING_AS_WORST); @@ -2048,7 +2048,7 @@ DEFUN (no_bgp_bestpath_med, struct bgp *bgp; bgp = vty->index; - + if (strncmp (argv[idx_med_knob]->arg, "confed", 1) == 0) bgp_flag_unset (bgp, BGP_FLAG_MED_CONFED); else @@ -2070,7 +2070,7 @@ DEFUN (no_bgp_bestpath_med2, "Treat missing MED as the least preferred one\n") { struct bgp *bgp; - + bgp = vty->index; bgp_flag_unset (bgp, BGP_FLAG_MED_CONFED); bgp_flag_unset (bgp, BGP_FLAG_MED_MISSING_AS_WORST); @@ -2571,7 +2571,7 @@ DEFUN (no_bgp_disable_connected_route_check, static int -peer_remote_as_vty (struct vty *vty, const char *peer_str, +peer_remote_as_vty (struct vty *vty, const char *peer_str, const char *as_str, afi_t afi, safi_t safi) { int ret; @@ -3318,7 +3318,7 @@ DEFUN (no_neighbor_set_peer_group, } static int -peer_flag_modify_vty (struct vty *vty, const char *ip_str, +peer_flag_modify_vty (struct vty *vty, const char *ip_str, u_int16_t flag, int set) { int ret; @@ -4101,7 +4101,7 @@ DEFUN (neighbor_attr_unchanged4, DEFUN (no_neighbor_attr_unchanged, no_neighbor_attr_unchanged_cmd, "no neighbor attribute-unchanged [as-path] [next-hop] [med]", - NO_STR + NO_STR NEIGHBOR_STR NEIGHBOR_ADDR_STR2 "BGP attribute is propagated unchanged to this neighbor\n" @@ -4223,7 +4223,7 @@ DEFUN (no_neighbor_attr_unchanged4, /* EBGP multihop configuration. */ static int -peer_ebgp_multihop_set_vty (struct vty *vty, const char *ip_str, +peer_ebgp_multihop_set_vty (struct vty *vty, const char *ip_str, const char *ttl_str) { struct peer *peer; @@ -4245,7 +4245,7 @@ peer_ebgp_multihop_set_vty (struct vty *vty, const char *ip_str, } static int -peer_ebgp_multihop_unset_vty (struct vty *vty, const char *ip_str) +peer_ebgp_multihop_unset_vty (struct vty *vty, const char *ip_str) { struct peer *peer; @@ -4371,7 +4371,7 @@ DEFUN (no_neighbor_description, /* Neighbor update-source. */ static int -peer_update_source_vty (struct vty *vty, const char *peer_str, +peer_update_source_vty (struct vty *vty, const char *peer_str, const char *source_str) { struct peer *peer; @@ -4431,8 +4431,8 @@ DEFUN (no_neighbor_update_source, } static int -peer_default_originate_set_vty (struct vty *vty, const char *peer_str, - afi_t afi, safi_t safi, +peer_default_originate_set_vty (struct vty *vty, const char *peer_str, + afi_t afi, safi_t safi, const char *rmap, int set) { int ret; @@ -4496,7 +4496,7 @@ DEFUN (no_neighbor_default_originate, /* Set neighbor's BGP port. */ static int -peer_port_vty (struct vty *vty, const char *ip_str, int afi, +peer_port_vty (struct vty *vty, const char *ip_str, int afi, const char *port_str) { struct peer *peer; @@ -4508,7 +4508,7 @@ peer_port_vty (struct vty *vty, const char *ip_str, int afi, return CMD_WARNING; if (! port_str) - { + { sp = getservbyname ("bgp", "tcp"); port = (sp == NULL) ? BGP_PORT_DEFAULT : ntohs (sp->s_port); } @@ -4552,7 +4552,8 @@ DEFUN (no_neighbor_port, /* neighbor weight. */ static int -peer_weight_set_vty (struct vty *vty, const char *ip_str, +peer_weight_set_vty (struct vty *vty, const char *ip_str, + afi_t afi, safi_t safi, const char *weight_str) { int ret; @@ -4565,12 +4566,13 @@ peer_weight_set_vty (struct vty *vty, const char *ip_str, VTY_GET_INTEGER_RANGE("weight", weight, weight_str, 0, 65535); - ret = peer_weight_set (peer, weight); + ret = peer_weight_set (peer, afi, safi, weight); return bgp_vty_return (vty, ret); } static int -peer_weight_unset_vty (struct vty *vty, const char *ip_str) +peer_weight_unset_vty (struct vty *vty, const char *ip_str, + afi_t afi, safi_t safi) { int ret; struct peer *peer; @@ -4579,7 +4581,7 @@ peer_weight_unset_vty (struct vty *vty, const char *ip_str) if (! peer) return CMD_WARNING; - ret = peer_weight_unset (peer); + ret = peer_weight_unset (peer, afi, safi); return bgp_vty_return (vty, ret); } @@ -4593,7 +4595,11 @@ DEFUN (neighbor_weight, { int idx_peer = 1; int idx_number = 3; - return peer_weight_set_vty (vty, argv[idx_peer]->arg, argv[idx_number]->arg); + return peer_weight_set_vty (vty, + argv[idx_peer]->arg, + bgp_node_afi (vty), + bgp_node_safi (vty), + argv[idx_number]->arg); } DEFUN (no_neighbor_weight, @@ -4606,7 +4612,7 @@ DEFUN (no_neighbor_weight, "default weight\n") { int idx_peer = 2; - return peer_weight_unset_vty (vty, argv[idx_peer]->arg); + return peer_weight_unset_vty (vty, argv[idx_peer]->arg, bgp_node_afi (vty), bgp_node_safi (vty)); } @@ -4658,7 +4664,7 @@ DEFUN (no_neighbor_strict_capability, } static int -peer_timers_set_vty (struct vty *vty, const char *ip_str, +peer_timers_set_vty (struct vty *vty, const char *ip_str, const char *keep_str, const char *hold_str) { int ret; @@ -4724,7 +4730,7 @@ DEFUN (no_neighbor_timers, static int -peer_timers_connect_set_vty (struct vty *vty, const char *ip_str, +peer_timers_connect_set_vty (struct vty *vty, const char *ip_str, const char *time_str) { int ret; @@ -4787,8 +4793,8 @@ DEFUN (no_neighbor_timers_connect, static int -peer_advertise_interval_vty (struct vty *vty, const char *ip_str, - const char *time_str, int set) +peer_advertise_interval_vty (struct vty *vty, const char *ip_str, + const char *time_str, int set) { int ret; struct peer *peer; @@ -4928,7 +4934,7 @@ DEFUN (no_neighbor_interface, /* Set distribute list to the peer. */ static int -peer_distribute_set_vty (struct vty *vty, const char *ip_str, +peer_distribute_set_vty (struct vty *vty, const char *ip_str, afi_t afi, safi_t safi, const char *name_str, const char *direct_str) { @@ -5015,7 +5021,7 @@ DEFUN (no_neighbor_distribute_list, /* Set prefix list to the peer. */ static int peer_prefix_list_set_vty (struct vty *vty, const char *ip_str, afi_t afi, - safi_t safi, const char *name_str, + safi_t safi, const char *name_str, const char *direct_str) { int ret; @@ -5048,7 +5054,7 @@ peer_prefix_list_unset_vty (struct vty *vty, const char *ip_str, afi_t afi, peer = peer_and_group_lookup_vty (vty, ip_str); if (! peer) return CMD_WARNING; - + /* Check filter direction. */ if (strncmp (direct_str, "i", 1) == 0) direct = FILTER_IN; @@ -5095,7 +5101,7 @@ DEFUN (no_neighbor_prefix_list, } static int -peer_aslist_set_vty (struct vty *vty, const char *ip_str, +peer_aslist_set_vty (struct vty *vty, const char *ip_str, afi_t afi, safi_t safi, const char *name_str, const char *direct_str) { @@ -5119,7 +5125,7 @@ peer_aslist_set_vty (struct vty *vty, const char *ip_str, } static int -peer_aslist_unset_vty (struct vty *vty, const char *ip_str, +peer_aslist_unset_vty (struct vty *vty, const char *ip_str, afi_t afi, safi_t safi, const char *direct_str) { @@ -5178,7 +5184,7 @@ DEFUN (no_neighbor_filter_list, /* Set route-map to the peer. */ static int -peer_route_map_set_vty (struct vty *vty, const char *ip_str, +peer_route_map_set_vty (struct vty *vty, const char *ip_str, afi_t afi, safi_t safi, const char *name_str, const char *direct_str) { @@ -5322,7 +5328,7 @@ DEFUN (no_neighbor_unsuppress_map, static int peer_maximum_prefix_set_vty (struct vty *vty, const char *ip_str, afi_t afi, - safi_t safi, const char *num_str, + safi_t safi, const char *num_str, const char *threshold_str, int warning, const char *restart_str) { @@ -5489,7 +5495,7 @@ DEFUN (no_neighbor_maximum_prefix, return peer_maximum_prefix_unset_vty (vty, argv[idx_peer]->arg, bgp_node_afi (vty), bgp_node_safi (vty)); } - + /* "neighbor allowas-in" */ DEFUN (neighbor_allowas_in, @@ -5559,7 +5565,7 @@ DEFUN (neighbor_ttl_security, peer = peer_and_group_lookup_vty (vty, argv[idx_peer]->arg); if (! peer) return CMD_WARNING; - + VTY_GET_INTEGER_RANGE ("", gtsm_hops, argv[idx_number]->arg, 1, 254); /* @@ -6170,7 +6176,7 @@ DEFUN (show_bgp_views, vty_out (vty, "BGP Multiple Instance is not enabled%s", VTY_NEWLINE); return CMD_WARNING; } - + vty_out (vty, "Defined BGP views:%s", VTY_NEWLINE); for (ALL_LIST_ELEMENTS_RO(inst, node, bgp)) { @@ -6181,7 +6187,7 @@ DEFUN (show_bgp_views, bgp->name ? bgp->name : "(null)", bgp->as, VTY_NEWLINE); } - + return CMD_SUCCESS; } @@ -6280,7 +6286,7 @@ DEFUN (show_bgp_vrfs, json_object_int_add(json, "totalVrfs", count); - vty_out (vty, "%s%s", json_object_to_json_string(json), VTY_NEWLINE); + vty_out (vty, "%s%s", json_object_to_json_string_ext(json, JSON_C_TO_STRING_PRETTY), VTY_NEWLINE); json_object_free(json); } else @@ -6302,14 +6308,14 @@ DEFUN (show_bgp_memory, { char memstrbuf[MTYPE_MEMSTR_LEN]; unsigned long count; - + /* RIB related usage stats */ count = mtype_stats_alloc (MTYPE_BGP_NODE); vty_out (vty, "%ld RIB nodes, using %s of memory%s", count, mtype_memstr (memstrbuf, sizeof (memstrbuf), count * sizeof (struct bgp_node)), VTY_NEWLINE); - + count = mtype_stats_alloc (MTYPE_BGP_ROUTE); vty_out (vty, "%ld BGP routes, using %s of memory%s", count, mtype_memstr (memstrbuf, sizeof (memstrbuf), @@ -6320,7 +6326,7 @@ DEFUN (show_bgp_memory, mtype_memstr (memstrbuf, sizeof (memstrbuf), count * sizeof (struct bgp_info_extra)), VTY_NEWLINE); - + if ((count = mtype_stats_alloc (MTYPE_BGP_STATIC))) vty_out (vty, "%ld Static routes, using %s of memory%s", count, mtype_memstr (memstrbuf, sizeof (memstrbuf), @@ -6332,7 +6338,7 @@ DEFUN (show_bgp_memory, mtype_memstr (memstrbuf, sizeof (memstrbuf), count * sizeof (struct bpacket)), VTY_NEWLINE); - + /* Adj-In/Out */ if ((count = mtype_stats_alloc (MTYPE_BGP_ADJ_IN))) vty_out (vty, "%ld Adj-In entries, using %s of memory%s", count, @@ -6344,7 +6350,7 @@ DEFUN (show_bgp_memory, mtype_memstr (memstrbuf, sizeof (memstrbuf), count * sizeof (struct bgp_adj_out)), VTY_NEWLINE); - + if ((count = mtype_stats_alloc (MTYPE_BGP_NEXTHOP_CACHE))) vty_out (vty, "%ld Nexthop cache entries, using %s of memory%s", count, mtype_memstr (memstrbuf, sizeof (memstrbuf), @@ -6359,32 +6365,32 @@ DEFUN (show_bgp_memory, /* Attributes */ count = attr_count(); - vty_out (vty, "%ld BGP attributes, using %s of memory%s", count, - mtype_memstr (memstrbuf, sizeof (memstrbuf), - count * sizeof(struct attr)), + vty_out (vty, "%ld BGP attributes, using %s of memory%s", count, + mtype_memstr (memstrbuf, sizeof (memstrbuf), + count * sizeof(struct attr)), VTY_NEWLINE); if ((count = mtype_stats_alloc (MTYPE_ATTR_EXTRA))) - vty_out (vty, "%ld BGP extra attributes, using %s of memory%s", count, - mtype_memstr (memstrbuf, sizeof (memstrbuf), - count * sizeof(struct attr_extra)), + vty_out (vty, "%ld BGP extra attributes, using %s of memory%s", count, + mtype_memstr (memstrbuf, sizeof (memstrbuf), + count * sizeof(struct attr_extra)), VTY_NEWLINE); - + if ((count = attr_unknown_count())) vty_out (vty, "%ld unknown attributes%s", count, VTY_NEWLINE); - + /* AS_PATH attributes */ count = aspath_count (); vty_out (vty, "%ld BGP AS-PATH entries, using %s of memory%s", count, mtype_memstr (memstrbuf, sizeof (memstrbuf), count * sizeof (struct aspath)), VTY_NEWLINE); - + count = mtype_stats_alloc (MTYPE_AS_SEG); vty_out (vty, "%ld BGP AS-PATH segments, using %s of memory%s", count, mtype_memstr (memstrbuf, sizeof (memstrbuf), count * sizeof (struct assegment)), VTY_NEWLINE); - + /* Other attributes */ if ((count = community_count ())) vty_out (vty, "%ld BGP community entries, using %s of memory%s", count, @@ -6396,26 +6402,26 @@ DEFUN (show_bgp_memory, mtype_memstr (memstrbuf, sizeof (memstrbuf), count * sizeof (struct ecommunity)), VTY_NEWLINE); - + if ((count = mtype_stats_alloc (MTYPE_CLUSTER))) vty_out (vty, "%ld Cluster lists, using %s of memory%s", count, mtype_memstr (memstrbuf, sizeof (memstrbuf), count * sizeof (struct cluster_list)), VTY_NEWLINE); - + /* Peer related usage */ count = mtype_stats_alloc (MTYPE_BGP_PEER); vty_out (vty, "%ld peers, using %s of memory%s", count, mtype_memstr (memstrbuf, sizeof (memstrbuf), count * sizeof (struct peer)), VTY_NEWLINE); - + if ((count = mtype_stats_alloc (MTYPE_PEER_GROUP))) vty_out (vty, "%ld peer groups, using %s of memory%s", count, mtype_memstr (memstrbuf, sizeof (memstrbuf), count * sizeof (struct peer_group)), VTY_NEWLINE); - + /* Other */ if ((count = mtype_stats_alloc (MTYPE_HASH))) vty_out (vty, "%ld hash tables, using %s of memory%s", count, @@ -6614,7 +6620,7 @@ bgp_show_summary (struct vty *vty, struct bgp *bgp, int afi, int safi, vty_out (vty, "%s%s", header, VTY_NEWLINE); } } - + count++; if (use_json) @@ -6724,7 +6730,7 @@ bgp_show_summary (struct vty *vty, struct bgp *bgp, int afi, int safi, json_object_int_add(json, "totalPeers", count); json_object_int_add(json, "dynamicPeers", dn_count); - vty_out (vty, "%s%s", json_object_to_json_string(json), VTY_NEWLINE); + vty_out (vty, "%s%s", json_object_to_json_string_ext(json, JSON_C_TO_STRING_PRETTY), VTY_NEWLINE); json_object_free(json); } else @@ -6976,8 +6982,6 @@ bgp_show_peer_afi (struct vty *vty, struct peer *p, afi_t afi, safi_t safi, { json_addr = json_object_new_object(); json_af = json_object_new_object(); - json_prefA = json_object_new_object(); - json_prefB = json_object_new_object(); filter = &p->filter[afi][safi]; if (peer_group_active(p)) @@ -6997,6 +7001,7 @@ bgp_show_peer_afi (struct vty *vty, struct peer *p, afi_t afi, safi_t safi, || CHECK_FLAG (p->af_cap[afi][safi], PEER_CAP_ORF_PREFIX_RM_RCV)) { json_object_int_add(json_af, "orfType", ORF_TYPE_PREFIX); + json_prefA = json_object_new_object(); bgp_show_peer_afi_orf_cap (vty, p, afi, safi, PEER_CAP_ORF_PREFIX_SM_ADV, PEER_CAP_ORF_PREFIX_RM_ADV, @@ -7011,6 +7016,7 @@ bgp_show_peer_afi (struct vty *vty, struct peer *p, afi_t afi, safi_t safi, || CHECK_FLAG (p->af_cap[afi][safi], PEER_CAP_ORF_PREFIX_RM_OLD_RCV)) { json_object_int_add(json_af, "orfOldType", ORF_TYPE_PREFIX_OLD); + json_prefB = json_object_new_object(); bgp_show_peer_afi_orf_cap (vty, p, afi, safi, PEER_CAP_ORF_PREFIX_SM_ADV, PEER_CAP_ORF_PREFIX_RM_ADV, @@ -7026,6 +7032,8 @@ bgp_show_peer_afi (struct vty *vty, struct peer *p, afi_t afi, safi_t safi, || CHECK_FLAG (p->af_cap[afi][safi], PEER_CAP_ORF_PREFIX_RM_RCV) || CHECK_FLAG (p->af_cap[afi][safi], PEER_CAP_ORF_PREFIX_RM_OLD_RCV)) json_object_object_add(json_addr, "afDependentCap", json_af); + else + json_object_free(json_af); sprintf (orf_pfx_name, "%s.%d.%d", p->host, afi, safi); orf_pfx_count = prefix_bgp_show_prefix_list (NULL, afi, orf_pfx_name, use_json); @@ -7633,7 +7641,7 @@ bgp_show_peer (struct vty *vty, struct peer *p, u_char use_json, json_object *js if (CHECK_FLAG (bgp->config, BGP_CONFIG_CONFEDERATION) && bgp_confederation_peers_check (bgp, p->as)) vty_out (vty, " Neighbor under common administration%s", VTY_NEWLINE); - + /* Status. */ vty_out (vty, " BGP state = %s", LOOKUP (bgp_status_msg, p->status)); @@ -7665,7 +7673,7 @@ bgp_show_peer (struct vty *vty, struct peer *p, u_char use_json, json_object *js } } /* Capability. */ - if (p->status == Established) + if (p->status == Established) { if (p->cap || p->afc_adv[AFI_IP][SAFI_UNICAST] @@ -7748,6 +7756,8 @@ bgp_show_peer (struct vty *vty, struct peer *p, u_char use_json, json_object *js CHECK_FLAG (p->af_cap[afi][safi], PEER_CAP_ADDPATH_AF_RX_ADV) || CHECK_FLAG (p->af_cap[afi][safi], PEER_CAP_ADDPATH_AF_RX_RCV)) json_object_object_add(json_add, print_store, json_sub); + else + json_object_free(json_sub); } json_object_object_add(json_cap, "addPath", json_add); @@ -7772,7 +7782,6 @@ bgp_show_peer (struct vty *vty, struct peer *p, u_char use_json, json_object *js json_object *json_nxt = NULL; const char *print_store; - json_nxt = json_object_new_object(); if (CHECK_FLAG (p->cap, PEER_CAP_ENHE_ADV) && CHECK_FLAG (p->cap, PEER_CAP_ENHE_RCV)) json_object_string_add(json_cap, "extendedNexthop", "advertisedAndReceived"); @@ -7783,6 +7792,8 @@ bgp_show_peer (struct vty *vty, struct peer *p, u_char use_json, json_object *js if (CHECK_FLAG (p->cap, PEER_CAP_ENHE_RCV)) { + json_nxt = json_object_new_object(); + for (safi = SAFI_UNICAST ; safi < SAFI_MAX ; safi++) { if (CHECK_FLAG (p->af_cap[AFI_IP][safi], PEER_CAP_ENHE_AF_RCV)) @@ -7880,7 +7891,10 @@ bgp_show_peer (struct vty *vty, struct peer *p, u_char use_json, json_object *js } } if (! restart_af_count) + { json_object_string_add(json_cap, "addressFamiliesByPeer", "none"); + json_object_free(json_restart); + } else json_object_object_add(json_cap, "addressFamiliesByPeer", json_restart); } @@ -8154,7 +8168,7 @@ bgp_show_peer (struct vty *vty, struct peer *p, u_char use_json, json_object *js if (p->t_gr_restart) vty_out (vty, " The remaining time of restart timer is %ld%s", thread_timer_remain_second (p->t_gr_restart), VTY_NEWLINE); - + if (p->t_gr_stale) vty_out (vty, " The remaining time of stalepath timer is %ld%s", thread_timer_remain_second (p->t_gr_stale), VTY_NEWLINE); @@ -8215,11 +8229,6 @@ bgp_show_peer (struct vty *vty, struct peer *p, u_char use_json, json_object *js else if (p->update_source) json_object_string_add(json_neigh, "updateSource", sockunion2str (p->update_source, buf1, SU_ADDRSTRLEN)); } - - /* Default weight */ - if (CHECK_FLAG (p->config, PEER_CONFIG_WEIGHT)) - json_object_int_add(json_neigh, "defaultWeight", p->weight); - } else { @@ -8238,10 +8247,6 @@ bgp_show_peer (struct vty *vty, struct peer *p, u_char use_json, json_object *js vty_out (vty, "%s", VTY_NEWLINE); } - /* Default weight */ - if (CHECK_FLAG (p->config, PEER_CONFIG_WEIGHT)) - vty_out (vty, " Default weight %d%s", p->weight, VTY_NEWLINE); - vty_out (vty, "%s", VTY_NEWLINE); } @@ -8414,7 +8419,7 @@ bgp_show_peer (struct vty *vty, struct peer *p, u_char use_json, json_object *js ntohs (p->su_local->sin.sin_port), VTY_NEWLINE); } - + /* Remote address. */ if (p->su_remote) { @@ -8582,7 +8587,7 @@ bgp_show_neighbor (struct vty *vty, struct bgp *bgp, enum show_type type, if (use_json) { - vty_out (vty, "%s%s", json_object_to_json_string(json), VTY_NEWLINE); + vty_out (vty, "%s%s", json_object_to_json_string_ext(json, JSON_C_TO_STRING_PRETTY), VTY_NEWLINE); json_object_free(json); } else @@ -8674,7 +8679,7 @@ bgp_show_neighbor_vty (struct vty *vty, const char *name, if (use_json) { json_object_boolean_true_add(json, "bgpNoSuchInstance"); - vty_out (vty, "%s%s", json_object_to_json_string(json), VTY_NEWLINE); + vty_out (vty, "%s%s", json_object_to_json_string_ext(json, JSON_C_TO_STRING_PRETTY), VTY_NEWLINE); json_object_free(json); } else @@ -8817,7 +8822,7 @@ DEFUN (show_ip_bgp_community_info, { vty_out (vty, "Address Refcnt Community%s", VTY_NEWLINE); - hash_iterate (community_hash (), + hash_iterate (community_hash (), (void (*) (struct hash_backet *, void *)) community_show_all_iterator, vty); @@ -10062,7 +10067,7 @@ bgp_vty_init (void) install_default (BGP_VPNV6_NODE); install_default (BGP_ENCAP_NODE); install_default (BGP_ENCAPV6_NODE); - + /* "bgp multiple-instance" commands. */ install_element (CONFIG_NODE, &bgp_multiple_instance_cmd); install_element (CONFIG_NODE, &no_bgp_multiple_instance_cmd); @@ -10155,7 +10160,7 @@ bgp_vty_init (void) /* "bgp always-compare-med" commands */ install_element (BGP_NODE, &bgp_always_compare_med_cmd); install_element (BGP_NODE, &no_bgp_always_compare_med_cmd); - + /* "bgp deterministic-med" commands */ install_element (BGP_NODE, &bgp_deterministic_med_cmd); install_element (BGP_NODE, &no_bgp_deterministic_med_cmd); @@ -10167,7 +10172,7 @@ bgp_vty_init (void) install_element (BGP_NODE, &no_bgp_graceful_restart_stalepath_time_cmd); install_element (BGP_NODE, &bgp_graceful_restart_restart_time_cmd); install_element (BGP_NODE, &no_bgp_graceful_restart_restart_time_cmd); - + /* "bgp fast-external-failover" commands */ install_element (BGP_NODE, &bgp_fast_external_failover_cmd); install_element (BGP_NODE, &no_bgp_fast_external_failover_cmd); @@ -10205,7 +10210,7 @@ bgp_vty_init (void) /* "no bgp default ipv4-unicast" commands. */ install_element (BGP_NODE, &no_bgp_default_ipv4_unicast_cmd); install_element (BGP_NODE, &bgp_default_ipv4_unicast_cmd); - + /* "bgp network import-check" commands. */ install_element (BGP_NODE, &bgp_network_import_check_cmd); install_element (BGP_NODE, &bgp_network_import_check_exact_cmd); @@ -10311,7 +10316,7 @@ bgp_vty_init (void) install_element (BGP_VPNV6_NODE, &no_neighbor_set_peer_group_cmd); install_element (BGP_ENCAP_NODE, &no_neighbor_set_peer_group_cmd); install_element (BGP_ENCAPV6_NODE, &no_neighbor_set_peer_group_cmd); - + /* "neighbor softreconfiguration inbound" commands.*/ install_element (BGP_NODE, &neighbor_soft_reconfiguration_cmd); install_element (BGP_NODE, &no_neighbor_soft_reconfiguration_cmd); @@ -10729,6 +10734,23 @@ bgp_vty_init (void) install_element (BGP_NODE, &neighbor_weight_cmd); install_element (BGP_NODE, &no_neighbor_weight_cmd); + install_element (BGP_IPV4_NODE, &neighbor_weight_cmd); + install_element (BGP_IPV4_NODE, &no_neighbor_weight_cmd); + install_element (BGP_IPV4M_NODE, &neighbor_weight_cmd); + install_element (BGP_IPV4M_NODE, &no_neighbor_weight_cmd); + install_element (BGP_IPV6_NODE, &neighbor_weight_cmd); + install_element (BGP_IPV6_NODE, &no_neighbor_weight_cmd); + install_element (BGP_IPV6M_NODE, &neighbor_weight_cmd); + install_element (BGP_IPV6M_NODE, &no_neighbor_weight_cmd); + install_element (BGP_VPNV4_NODE, &neighbor_weight_cmd); + install_element (BGP_VPNV4_NODE, &no_neighbor_weight_cmd); + install_element (BGP_VPNV6_NODE, &neighbor_weight_cmd); + install_element (BGP_VPNV6_NODE, &no_neighbor_weight_cmd); + install_element (BGP_ENCAP_NODE, &neighbor_weight_cmd); + install_element (BGP_ENCAP_NODE, &no_neighbor_weight_cmd); + install_element (BGP_ENCAPV6_NODE, &neighbor_weight_cmd); + install_element (BGP_ENCAPV6_NODE, &no_neighbor_weight_cmd); + /* "neighbor override-capability" commands. */ install_element (BGP_NODE, &neighbor_override_capability_cmd); install_element (BGP_NODE, &no_neighbor_override_capability_cmd); @@ -10988,60 +11010,38 @@ bgp_vty_init (void) install_element (VIEW_NODE, &show_bgp_updgrps_adj_s_cmd); install_element (VIEW_NODE, &show_bgp_instance_updgrps_adj_s_cmd); install_element (VIEW_NODE, &show_bgp_updgrps_afi_adj_s_cmd); - install_element (RESTRICTED_NODE, &show_ip_bgp_summary_cmd); - install_element (RESTRICTED_NODE, &show_ip_bgp_updgrps_cmd); - install_element (RESTRICTED_NODE, &show_bgp_instance_all_ipv6_updgrps_cmd); - install_element (RESTRICTED_NODE, &show_ip_bgp_updgrps_adj_cmd); - install_element (RESTRICTED_NODE, &show_ip_bgp_instance_updgrps_adj_cmd); - install_element (RESTRICTED_NODE, &show_bgp_updgrps_adj_cmd); - install_element (RESTRICTED_NODE, &show_bgp_instance_updgrps_adj_cmd); - install_element (RESTRICTED_NODE, &show_bgp_updgrps_afi_adj_cmd); - install_element (RESTRICTED_NODE, &show_ip_bgp_updgrps_adj_s_cmd); - install_element (RESTRICTED_NODE, &show_ip_bgp_instance_updgrps_adj_s_cmd); - install_element (RESTRICTED_NODE, &show_bgp_updgrps_adj_s_cmd); - install_element (RESTRICTED_NODE, &show_bgp_instance_updgrps_adj_s_cmd); - install_element (RESTRICTED_NODE, &show_bgp_updgrps_afi_adj_s_cmd); - install_element (ENABLE_NODE, &show_ip_bgp_summary_cmd); - install_element (ENABLE_NODE, &show_ip_bgp_updgrps_cmd); - install_element (ENABLE_NODE, &show_bgp_instance_all_ipv6_updgrps_cmd); - install_element (ENABLE_NODE, &show_ip_bgp_updgrps_adj_cmd); - install_element (ENABLE_NODE, &show_ip_bgp_instance_updgrps_adj_cmd); - install_element (ENABLE_NODE, &show_bgp_updgrps_adj_cmd); - install_element (ENABLE_NODE, &show_bgp_instance_updgrps_adj_cmd); - install_element (ENABLE_NODE, &show_bgp_updgrps_afi_adj_cmd); - install_element (ENABLE_NODE, &show_ip_bgp_updgrps_adj_s_cmd); - install_element (ENABLE_NODE, &show_ip_bgp_instance_updgrps_adj_s_cmd); - install_element (ENABLE_NODE, &show_bgp_updgrps_adj_s_cmd); - install_element (ENABLE_NODE, &show_bgp_instance_updgrps_adj_s_cmd); - install_element (ENABLE_NODE, &show_bgp_updgrps_afi_adj_s_cmd); + install_element (VIEW_NODE, &show_ip_bgp_summary_cmd); + install_element (VIEW_NODE, &show_ip_bgp_updgrps_cmd); + install_element (VIEW_NODE, &show_bgp_instance_all_ipv6_updgrps_cmd); + install_element (VIEW_NODE, &show_ip_bgp_updgrps_adj_cmd); + install_element (VIEW_NODE, &show_ip_bgp_instance_updgrps_adj_cmd); + install_element (VIEW_NODE, &show_bgp_updgrps_adj_cmd); + install_element (VIEW_NODE, &show_bgp_instance_updgrps_adj_cmd); + install_element (VIEW_NODE, &show_bgp_updgrps_afi_adj_cmd); + install_element (VIEW_NODE, &show_ip_bgp_updgrps_adj_s_cmd); + install_element (VIEW_NODE, &show_ip_bgp_instance_updgrps_adj_s_cmd); + install_element (VIEW_NODE, &show_bgp_updgrps_adj_s_cmd); + install_element (VIEW_NODE, &show_bgp_instance_updgrps_adj_s_cmd); + install_element (VIEW_NODE, &show_bgp_updgrps_afi_adj_s_cmd); /* "show ip bgp neighbors" commands. */ install_element (VIEW_NODE, &show_ip_bgp_neighbors_cmd); - install_element (ENABLE_NODE, &show_ip_bgp_neighbors_cmd); /* "show ip bgp peer-group" commands. */ install_element (VIEW_NODE, &show_ip_bgp_peer_groups_cmd); install_element (VIEW_NODE, &show_ip_bgp_instance_peer_groups_cmd); install_element (VIEW_NODE, &show_ip_bgp_peer_group_cmd); install_element (VIEW_NODE, &show_ip_bgp_instance_peer_group_cmd); - install_element (ENABLE_NODE, &show_ip_bgp_peer_groups_cmd); - install_element (ENABLE_NODE, &show_ip_bgp_instance_peer_groups_cmd); - install_element (ENABLE_NODE, &show_ip_bgp_peer_group_cmd); - install_element (ENABLE_NODE, &show_ip_bgp_instance_peer_group_cmd); /* "show ip bgp paths" commands. */ install_element (VIEW_NODE, &show_ip_bgp_paths_cmd); install_element (VIEW_NODE, &show_ip_bgp_ipv4_paths_cmd); - install_element (ENABLE_NODE, &show_ip_bgp_paths_cmd); - install_element (ENABLE_NODE, &show_ip_bgp_ipv4_paths_cmd); /* "show ip bgp community" commands. */ install_element (VIEW_NODE, &show_ip_bgp_community_info_cmd); - install_element (ENABLE_NODE, &show_ip_bgp_community_info_cmd); /* "show ip bgp attribute-info" commands. */ install_element (VIEW_NODE, &show_ip_bgp_attr_info_cmd); - install_element (ENABLE_NODE, &show_ip_bgp_attr_info_cmd); /* "redistribute" commands. */ install_element (BGP_NODE, &bgp_redistribute_ipv4_cmd); @@ -11083,19 +11083,13 @@ bgp_vty_init (void) /* "show bgp memory" commands. */ install_element (VIEW_NODE, &show_bgp_memory_cmd); - install_element (RESTRICTED_NODE, &show_bgp_memory_cmd); - install_element (ENABLE_NODE, &show_bgp_memory_cmd); - + /* "show bgp views" commands. */ install_element (VIEW_NODE, &show_bgp_views_cmd); - install_element (RESTRICTED_NODE, &show_bgp_views_cmd); - install_element (ENABLE_NODE, &show_bgp_views_cmd); - + /* "show bgp vrfs" commands. */ install_element (VIEW_NODE, &show_bgp_vrfs_cmd); - install_element (RESTRICTED_NODE, &show_bgp_vrfs_cmd); - install_element (ENABLE_NODE, &show_bgp_vrfs_cmd); - + /* Community-list. */ community_list_vty (); } @@ -11219,7 +11213,7 @@ community_list_unset_vty (struct vty *vty, int argc, struct cmd_token **argv, if (argc > 1) { - // Check the list direct. + // Check the list direct. if (strncmp (argv[1], "p", 1) == 0) direct = COMMUNITY_PERMIT; else if (strncmp (argv[1], "d", 1) == 0) @@ -11396,7 +11390,7 @@ DEFUN (show_ip_community_list_arg, } static int -extcommunity_list_set_vty (struct vty *vty, int argc, struct cmd_token **argv, +extcommunity_list_set_vty (struct vty *vty, int argc, struct cmd_token **argv, int style) { /* CHECK ME dwalton finish this @@ -11416,7 +11410,7 @@ extcommunity_list_set_vty (struct vty *vty, int argc, struct cmd_token **argv, return CMD_WARNING; } - // All digit name check. + // All digit name check. if (reject_all_digit_name && all_digit (argv[0])) { vty_out (vty, "%% Community name cannot have all digits%s", VTY_NEWLINE); @@ -11431,7 +11425,7 @@ extcommunity_list_set_vty (struct vty *vty, int argc, struct cmd_token **argv, ret = extcommunity_list_set (bgp_clist, argv[0], str, direct, style); - // Free temporary community list string allocated by argv_concat(). + // Free temporary community list string allocated by argv_concat(). if (str) XFREE (MTYPE_TMP, str); @@ -11726,8 +11720,6 @@ community_list_vty (void) install_element (CONFIG_NODE, &no_ip_community_list_expanded_all_cmd); install_element (VIEW_NODE, &show_ip_community_list_cmd); install_element (VIEW_NODE, &show_ip_community_list_arg_cmd); - install_element (ENABLE_NODE, &show_ip_community_list_cmd); - install_element (ENABLE_NODE, &show_ip_community_list_arg_cmd); /* Extcommunity-list. */ install_element (CONFIG_NODE, &ip_extcommunity_list_standard_cmd); @@ -11736,6 +11728,4 @@ community_list_vty (void) install_element (CONFIG_NODE, &no_ip_extcommunity_list_expanded_all_cmd); install_element (VIEW_NODE, &show_ip_extcommunity_list_cmd); install_element (VIEW_NODE, &show_ip_extcommunity_list_arg_cmd); - install_element (ENABLE_NODE, &show_ip_extcommunity_list_cmd); - install_element (ENABLE_NODE, &show_ip_extcommunity_list_arg_cmd); } diff --git a/bgpd/bgp_zebra.c b/bgpd/bgp_zebra.c index c2df521e79..789dba53f8 100644 --- a/bgpd/bgp_zebra.c +++ b/bgpd/bgp_zebra.c @@ -46,6 +46,10 @@ Boston, MA 02111-1307, USA. */ #include "bgpd/bgp_nexthop.h" #include "bgpd/bgp_nht.h" #include "bgpd/bgp_bfd.h" +#if ENABLE_BGP_VNC +# include "bgpd/rfapi/rfapi_backend.h" +# include "bgpd/rfapi/vnc_export_bgp.h" +#endif /* All information about zebra. */ struct zclient *zclient = NULL; @@ -601,13 +605,13 @@ zebra_read_ipv4 (int command, struct zclient *zclient, zebra_size_t length, /* Type, flags, message. */ api.type = stream_getc (s); api.instance = stream_getw (s); - api.flags = stream_getc (s); + api.flags = stream_getl (s); api.message = stream_getc (s); /* IPv4 prefix. */ memset (&p, 0, sizeof (struct prefix_ipv4)); p.family = AF_INET; - p.prefixlen = stream_getc (s); + p.prefixlen = MIN(IPV4_MAX_PREFIXLEN, stream_getc (s)); stream_get (&p.prefix, s, PSIZE (p.prefixlen)); /* Nexthop, ifindex, distance, metric. */ @@ -636,7 +640,7 @@ zebra_read_ipv4 (int command, struct zclient *zclient, zebra_size_t length, api.metric = 0; if (CHECK_FLAG (api.message, ZAPI_MESSAGE_TAG)) - api.tag = stream_getw (s); + api.tag = stream_getl (s); else api.tag = 0; @@ -645,7 +649,7 @@ zebra_read_ipv4 (int command, struct zclient *zclient, zebra_size_t length, if (bgp_debug_zebra((struct prefix *)&p)) { char buf[2][INET_ADDRSTRLEN]; - zlog_debug("Rx IPv4 route add VRF %u %s[%d] %s/%d nexthop %s metric %u tag %d", + zlog_debug("Rx IPv4 route add VRF %u %s[%d] %s/%d nexthop %s metric %u tag %"ROUTE_TAG_PRI, vrf_id, zebra_route_string(api.type), api.instance, inet_ntop(AF_INET, &p.prefix, buf[0], sizeof(buf[0])), @@ -677,7 +681,7 @@ zebra_read_ipv4 (int command, struct zclient *zclient, zebra_size_t length, { char buf[2][INET_ADDRSTRLEN]; zlog_debug("Rx IPv4 route delete VRF %u %s[%d] %s/%d " - "nexthop %s metric %u tag %d", + "nexthop %s metric %u tag %"ROUTE_TAG_PRI, vrf_id, zebra_route_string(api.type), api.instance, inet_ntop(AF_INET, &p.prefix, buf[0], sizeof(buf[0])), @@ -716,13 +720,13 @@ zebra_read_ipv6 (int command, struct zclient *zclient, zebra_size_t length, /* Type, flags, message. */ api.type = stream_getc (s); api.instance = stream_getw (s); - api.flags = stream_getc (s); + api.flags = stream_getl (s); api.message = stream_getc (s); /* IPv6 prefix. */ memset (&p, 0, sizeof (struct prefix_ipv6)); p.family = AF_INET6; - p.prefixlen = stream_getc (s); + p.prefixlen = MIN(IPV6_MAX_PREFIXLEN, stream_getc (s)); stream_get (&p.prefix, s, PSIZE (p.prefixlen)); /* Nexthop, ifindex, distance, metric. */ @@ -753,7 +757,7 @@ zebra_read_ipv6 (int command, struct zclient *zclient, zebra_size_t length, api.metric = 0; if (CHECK_FLAG (api.message, ZAPI_MESSAGE_TAG)) - api.tag = stream_getw (s); + api.tag = stream_getl (s); else api.tag = 0; @@ -766,7 +770,7 @@ zebra_read_ipv6 (int command, struct zclient *zclient, zebra_size_t length, if (bgp_debug_zebra((struct prefix *)&p)) { char buf[2][INET6_ADDRSTRLEN]; - zlog_debug("Rx IPv6 route add VRF %u %s[%d] %s/%d nexthop %s metric %u tag %d", + zlog_debug("Rx IPv6 route add VRF %u %s[%d] %s/%d nexthop %s metric %u tag %"ROUTE_TAG_PRI, vrf_id, zebra_route_string(api.type), api.instance, inet_ntop(AF_INET6, &p.prefix, buf[0], sizeof(buf[0])), @@ -797,7 +801,7 @@ zebra_read_ipv6 (int command, struct zclient *zclient, zebra_size_t length, { char buf[2][INET6_ADDRSTRLEN]; zlog_debug("Rx IPv6 route delete VRF %u %s[%d] %s/%d " - "nexthop %s metric %u tag %d", + "nexthop %s metric %u tag %"ROUTE_TAG_PRI, vrf_id, zebra_route_string(api.type), api.instance, inet_ntop(AF_INET6, &p.prefix, buf[0], sizeof(buf[0])), @@ -1199,7 +1203,7 @@ void bgp_zebra_announce (struct prefix *p, struct bgp_info *info, struct bgp *bgp, afi_t afi, safi_t safi) { - int flags; + u_int32_t flags; u_char distance; struct peer *peer; struct bgp_info *mpinfo; @@ -1207,7 +1211,7 @@ bgp_zebra_announce (struct prefix *p, struct bgp_info *info, struct bgp *bgp, u_int32_t nhcount, metric; struct bgp_info local_info; struct bgp_info *info_cp = &local_info; - u_short tag; + route_tag_t tag; /* Don't try to install if we're not connected to Zebra or Zebra doesn't * know of this instance. @@ -1372,7 +1376,7 @@ bgp_zebra_announce (struct prefix *p, struct bgp_info *info, struct bgp *bgp, if (bgp_debug_zebra(p)) { int i; - zlog_debug("Tx IPv4 route %s VRF %u %s/%d metric %u tag %d" + zlog_debug("Tx IPv4 route %s VRF %u %s/%d metric %u tag %"ROUTE_TAG_PRI " count %d", (valid_nh_count ? "add":"delete"), bgp->vrf_id, inet_ntop(AF_INET, &p->u.prefix4, buf[0], sizeof(buf[0])), @@ -1553,7 +1557,7 @@ bgp_zebra_announce (struct prefix *p, struct bgp_info *info, struct bgp *bgp, if (bgp_debug_zebra(p)) { int i; - zlog_debug("Tx IPv4 route %s VRF %u %s/%d metric %u tag %d", + zlog_debug("Tx IPv4 route %s VRF %u %s/%d metric %u tag %"ROUTE_TAG_PRI, valid_nh_count ? "add" : "delete", bgp->vrf_id, inet_ntop(AF_INET, &p->u.prefix4, buf[0], sizeof(buf[0])), p->prefixlen, api.metric, api.tag); @@ -1575,7 +1579,7 @@ bgp_zebra_announce (struct prefix *p, struct bgp_info *info, struct bgp *bgp, if (bgp_debug_zebra(p)) { int i; - zlog_debug("Tx IPv6 route %s VRF %u %s/%d metric %u tag %d", + zlog_debug("Tx IPv6 route %s VRF %u %s/%d metric %u tag %"ROUTE_TAG_PRI, valid_nh_count ? "add" : "delete", bgp->vrf_id, inet_ntop(AF_INET6, &p->u.prefix6, buf[0], sizeof(buf[0])), p->prefixlen, api.metric, api.tag); @@ -1620,7 +1624,7 @@ bgp_zebra_announce_table (struct bgp *bgp, afi_t afi, safi_t safi) void bgp_zebra_withdraw (struct prefix *p, struct bgp_info *info, safi_t safi) { - int flags; + u_int32_t flags; struct peer *peer; peer = info->peer; @@ -1679,7 +1683,7 @@ bgp_zebra_withdraw (struct prefix *p, struct bgp_info *info, safi_t safi) if (bgp_debug_zebra(p)) { char buf[2][INET_ADDRSTRLEN]; - zlog_debug("Tx IPv4 route delete VRF %u %s/%d metric %u tag %d", + zlog_debug("Tx IPv4 route delete VRF %u %s/%d metric %u tag %"ROUTE_TAG_PRI, peer->bgp->vrf_id, inet_ntop(AF_INET, &p->u.prefix4, buf[0], sizeof(buf[0])), p->prefixlen, api.metric, api.tag); @@ -1719,7 +1723,7 @@ bgp_zebra_withdraw (struct prefix *p, struct bgp_info *info, safi_t safi) if (bgp_debug_zebra(p)) { char buf[2][INET6_ADDRSTRLEN]; - zlog_debug("Tx IPv6 route delete VRF %u %s/%d metric %u tag %d", + zlog_debug("Tx IPv6 route delete VRF %u %s/%d metric %u tag %"ROUTE_TAG_PRI, peer->bgp->vrf_id, inet_ntop(AF_INET6, &p->u.prefix6, buf[0], sizeof(buf[0])), p->prefixlen, api.metric, api.tag); @@ -1806,6 +1810,13 @@ bgp_redistribute_set (struct bgp *bgp, afi_t afi, int type, u_short instance) if (vrf_bitmap_check (zclient->redist[afi][type], bgp->vrf_id)) return CMD_WARNING; +#if ENABLE_BGP_VNC + if (bgp->vrf_id == VRF_DEFAULT && + type == ZEBRA_ROUTE_VNC_DIRECT) { + vnc_export_bgp_enable(bgp, afi); /* only enables if mode bits cfg'd */ + } +#endif + vrf_bitmap_set (zclient->redist[afi][type], bgp->vrf_id); } @@ -1933,6 +1944,13 @@ bgp_redistribute_unreg (struct bgp *bgp, afi_t afi, int type, u_short instance) vrf_bitmap_unset (zclient->redist[afi][type], bgp->vrf_id); } +#if ENABLE_BGP_VNC + if (bgp->vrf_id == VRF_DEFAULT && + type == ZEBRA_ROUTE_VNC_DIRECT) { + vnc_export_bgp_disable(bgp, afi); + } +#endif + if (bgp_install_info_to_zebra (bgp)) { /* Send distribute delete message to zebra. */ @@ -2103,14 +2121,10 @@ bgp_zebra_init (struct thread_master *master) zclient->interface_nbr_address_add = bgp_interface_nbr_address_add; zclient->interface_nbr_address_delete = bgp_interface_nbr_address_delete; zclient->interface_vrf_update = bgp_interface_vrf_update; - zclient->ipv4_route_add = zebra_read_ipv4; - zclient->ipv4_route_delete = zebra_read_ipv4; zclient->redistribute_route_ipv4_add = zebra_read_ipv4; zclient->redistribute_route_ipv4_del = zebra_read_ipv4; zclient->interface_up = bgp_interface_up; zclient->interface_down = bgp_interface_down; - zclient->ipv6_route_add = zebra_read_ipv6; - zclient->ipv6_route_delete = zebra_read_ipv6; zclient->redistribute_route_ipv6_add = zebra_read_ipv6; zclient->redistribute_route_ipv6_del = zebra_read_ipv6; zclient->nexthop_update = bgp_read_nexthop_update; diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c index 9a7b297fb3..eb8a50b267 100644 --- a/bgpd/bgpd.c +++ b/bgpd/bgpd.c @@ -63,6 +63,10 @@ Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA #include "bgpd/bgp_damp.h" #include "bgpd/bgp_mplsvpn.h" #include "bgpd/bgp_encap.h" +#if ENABLE_BGP_VNC +#include "bgpd/rfapi/bgp_rfapi_cfg.h" +#include "bgpd/rfapi/rfapi_backend.h" +#endif #include "bgpd/bgp_advertise.h" #include "bgpd/bgp_network.h" #include "bgpd/bgp_vty.h" @@ -886,7 +890,6 @@ peer_global_config_reset (struct peer *peer) int v6only; - peer->weight = 0; peer->change_local_as = 0; peer->ttl = (peer_sort (peer) == BGP_PEER_IBGP ? MAXTTL : 1); if (peer->update_source) @@ -1127,7 +1130,7 @@ peer_unlock_with_caller (const char *name, struct peer *peer) } /* Allocate new peer object, implicitely locked. */ -static struct peer * +struct peer * peer_new (struct bgp *bgp) { afi_t afi; @@ -1153,7 +1156,6 @@ peer_new (struct bgp *bgp) peer->bgp = bgp; peer = peer_lock (peer); /* initial reference */ bgp_lock (bgp); - peer->weight = 0; peer->password = NULL; /* Set default flags. */ @@ -1249,6 +1251,7 @@ peer_xfer_config (struct peer *peer_dst, struct peer *peer_src) peer_dst->afc[afi][safi] = peer_src->afc[afi][safi]; peer_dst->af_flags[afi][safi] = peer_src->af_flags[afi][safi]; peer_dst->allowas_in[afi][safi] = peer_src->allowas_in[afi][safi]; + peer_dst->weight[afi][safi] = peer_src->weight[afi][safi]; } for (afidx = BGP_AF_START; afidx < BGP_AF_MAX; afidx++) @@ -2195,9 +2198,6 @@ peer_group2peer_config_copy (struct peer_group *group, struct peer *peer) /* GTSM hops */ peer->gtsm_hops = conf->gtsm_hops; - /* Weight */ - peer->weight = conf->weight; - /* this flag is per-neighbor and so has to be preserved */ v6only = CHECK_FLAG(peer->flags, PEER_FLAG_IFPEER_V6ONLY); @@ -2287,6 +2287,9 @@ peer_group2peer_config_copy_af (struct peer_group *group, struct peer *peer, /* allowas-in */ peer->allowas_in[afi][safi] = conf->allowas_in[afi][safi]; + /* weight */ + peer->weight[afi][safi] = conf->weight[afi][safi]; + /* default-originate route-map */ if (conf->default_rmap[afi][safi].name) { @@ -2885,6 +2888,12 @@ bgp_create (as_t *as, const char *name, enum bgp_instance_type inst_type) bgp->as = *as; +#if ENABLE_BGP_VNC + bgp->rfapi = bgp_rfapi_new(bgp); + assert(bgp->rfapi); + assert(bgp->rfapi_cfg); +#endif /* ENABLE_BGP_VNC */ + if (name) { bgp->name = XSTRDUP(MTYPE_BGP, name); @@ -3165,6 +3174,11 @@ bgp_delete (struct bgp *bgp) /* TODO - Other memory may need to be freed - e.g., NHT */ +#if ENABLE_BGP_VNC + rfapi_delete(bgp); + bgp_cleanup_routes(); /* rfapi cleanup can create route entries! */ +#endif + /* Remove visibility via the master list - there may however still be * routes to be processed still referencing the struct bgp. */ @@ -3663,6 +3677,7 @@ static const struct peer_flag_action peer_af_flag_action_list[] = { PEER_FLAG_REMOVE_PRIVATE_AS_ALL_REPLACE,1, peer_change_reset_out }, { PEER_FLAG_ADDPATH_TX_ALL_PATHS, 1, peer_change_reset }, { PEER_FLAG_ADDPATH_TX_BESTPATH_PER_AS, 1, peer_change_reset }, + { PEER_FLAG_WEIGHT, 0, peer_change_reset_in }, { 0, 0, 0 } }; @@ -4498,15 +4513,47 @@ peer_port_unset (struct peer *peer) return 0; } +/* + * Helper function that is called after the name of the policy + * being used by a peer has changed (AF specific). Automatically + * initiates inbound or outbound processing as needed. + */ +static void +peer_on_policy_change (struct peer *peer, afi_t afi, safi_t safi, int outbound) +{ + if (outbound) + { + update_group_adjust_peer (peer_af_find (peer, afi, safi)); + if (peer->status == Established) + bgp_announce_route(peer, afi, safi); + } + else + { + if (peer->status != Established) + return; + + if (CHECK_FLAG (peer->af_flags[afi][safi], PEER_FLAG_SOFT_RECONFIG)) + bgp_soft_reconfig_in (peer, afi, safi); + else if (CHECK_FLAG (peer->cap, PEER_CAP_REFRESH_OLD_RCV) + || CHECK_FLAG (peer->cap, PEER_CAP_REFRESH_NEW_RCV)) + bgp_route_refresh_send (peer, afi, safi, 0, 0, 0); + } +} + + /* neighbor weight. */ int -peer_weight_set (struct peer *peer, u_int16_t weight) +peer_weight_set (struct peer *peer, afi_t afi, safi_t safi, u_int16_t weight) { struct peer_group *group; struct listnode *node, *nnode; - SET_FLAG (peer->config, PEER_CONFIG_WEIGHT); - peer->weight = weight; + if (peer->weight[afi][safi] != weight) + { + peer->weight[afi][safi] = weight; + SET_FLAG (peer->af_flags[afi][safi], PEER_FLAG_WEIGHT); + peer_on_policy_change (peer, afi, safi, 0); + } if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP)) return 0; @@ -4515,35 +4562,71 @@ peer_weight_set (struct peer *peer, u_int16_t weight) group = peer->group; for (ALL_LIST_ELEMENTS (group->peer, node, nnode, peer)) { - peer->weight = group->conf->weight; + if (peer->weight[afi][safi] != weight) + { + peer->weight[afi][safi] = weight; + SET_FLAG (peer->af_flags[afi][safi], PEER_FLAG_WEIGHT); + peer_on_policy_change (peer, afi, safi, 0); } - return 1; + } + return 0; } int -peer_weight_unset (struct peer *peer) +peer_weight_unset (struct peer *peer, afi_t afi, safi_t safi) { struct peer_group *group; struct listnode *node, *nnode; - /* Set default weight. */ + /* not the peer-group itself but a peer in a peer-group */ if (peer_group_active (peer)) - peer->weight = peer->group->conf->weight; + { + group = peer->group; + + /* inherit weight from the peer-group */ + if (CHECK_FLAG (group->conf->af_flags[afi][safi], PEER_FLAG_WEIGHT)) + { + peer->weight[afi][safi] = group->conf->weight[afi][safi]; + peer_af_flag_set (peer, afi, safi, PEER_FLAG_WEIGHT); + peer_on_policy_change (peer, afi, safi, 0); + } + else + { + if (CHECK_FLAG (peer->af_flags[afi][safi], PEER_FLAG_WEIGHT)) + { + peer->weight[afi][safi] = 0; + peer_af_flag_unset (peer, afi, safi, PEER_FLAG_WEIGHT); + peer_on_policy_change (peer, afi, safi, 0); + } + } + } + else - peer->weight = 0; - - UNSET_FLAG (peer->config, PEER_CONFIG_WEIGHT); - - if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP)) - return 0; + { + if (CHECK_FLAG (peer->af_flags[afi][safi], PEER_FLAG_WEIGHT)) + { + peer->weight[afi][safi] = 0; + peer_af_flag_unset (peer, afi, safi, PEER_FLAG_WEIGHT); + peer_on_policy_change (peer, afi, safi, 0); + } /* peer-group member updates. */ group = peer->group; + + if (group) + { for (ALL_LIST_ELEMENTS (group->peer, node, nnode, peer)) { - peer->weight = 0; + if (CHECK_FLAG (peer->af_flags[afi][safi], PEER_FLAG_WEIGHT)) + { + peer->weight[afi][safi] = 0; + peer_af_flag_unset (peer, afi, safi, PEER_FLAG_WEIGHT); + peer_on_policy_change (peer, afi, safi, 0); + } + } } - return 1; + } + return 0; } int @@ -4773,7 +4856,7 @@ peer_allowas_in_set (struct peer *peer, afi_t afi, safi_t safi, int allow_num) { peer->allowas_in[afi][safi] = allow_num; SET_FLAG (peer->af_flags[afi][safi], PEER_FLAG_ALLOWAS_IN); - peer_change_action (peer, afi, safi, peer_change_reset_in); + peer_on_policy_change (peer, afi, safi, 0); } if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP)) @@ -4786,7 +4869,7 @@ peer_allowas_in_set (struct peer *peer, afi_t afi, safi_t safi, int allow_num) { peer->allowas_in[afi][safi] = allow_num; SET_FLAG (peer->af_flags[afi][safi], PEER_FLAG_ALLOWAS_IN); - peer_change_action (peer, afi, safi, peer_change_reset_in); + peer_on_policy_change (peer, afi, safi, 0); } } @@ -4803,6 +4886,7 @@ peer_allowas_in_unset (struct peer *peer, afi_t afi, safi_t safi) { peer->allowas_in[afi][safi] = 0; peer_af_flag_unset (peer, afi, safi, PEER_FLAG_ALLOWAS_IN); + peer_on_policy_change (peer, afi, safi, 0); } if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP)) @@ -4815,6 +4899,7 @@ peer_allowas_in_unset (struct peer *peer, afi_t afi, safi_t safi) { peer->allowas_in[afi][safi] = 0; peer_af_flag_unset (peer, afi, safi, PEER_FLAG_ALLOWAS_IN); + peer_on_policy_change (peer, afi, safi, 0); } } return 0; @@ -5049,33 +5134,6 @@ peer_password_unset (struct peer *peer) return 0; } -/* - * Helper function that is called after the name of the policy - * being used by a peer has changed (AF specific). Automatically - * initiates inbound or outbound processing as needed. - */ -static void -peer_on_policy_change (struct peer *peer, afi_t afi, safi_t safi, int outbound) -{ - if (outbound) - { - update_group_adjust_peer (peer_af_find (peer, afi, safi)); - if (peer->status == Established) - bgp_announce_route(peer, afi, safi); - } - else - { - if (peer->status != Established) - return; - - if (CHECK_FLAG (peer->af_flags[afi][safi], PEER_FLAG_SOFT_RECONFIG)) - bgp_soft_reconfig_in (peer, afi, safi); - else if (CHECK_FLAG (peer->cap, PEER_CAP_REFRESH_OLD_RCV) - || CHECK_FLAG (peer->cap, PEER_CAP_REFRESH_NEW_RCV)) - bgp_route_refresh_send (peer, afi, safi, 0, 0, 0); - } -} - /* Set distribute list to the peer. */ int @@ -5233,6 +5291,9 @@ peer_distribute_update (struct access_list *access) } } } +#if ENABLE_BGP_VNC + vnc_prefix_list_update(bgp); +#endif } } @@ -6605,16 +6666,6 @@ bgp_config_write_peer_global (struct vty *vty, struct bgp *bgp, peer->connect, VTY_NEWLINE); } - /* weight */ - if (CHECK_FLAG (peer->config, PEER_CONFIG_WEIGHT)) - { - if (! peer_group_active (peer) || g_peer->weight != peer->weight) - { - vty_out (vty, " neighbor %s weight %d%s", addr, peer->weight, - VTY_NEWLINE); - } - } - /* capability dynamic */ if (CHECK_FLAG (peer->flags, PEER_FLAG_DYNAMIC_CAPABILITY)) { @@ -6977,6 +7028,20 @@ bgp_config_write_peer_af (struct vty *vty, struct bgp *bgp, } } + /* weight */ + if (peer_af_flag_check (peer, afi, safi, PEER_FLAG_WEIGHT)) + if (! peer_group_active (peer) + || ! peer_af_flag_check (g_peer, afi, safi, PEER_FLAG_WEIGHT) + || peer->weight[afi][safi] != g_peer->weight[afi][safi]) + { + if (peer->weight[afi][safi]) + { + afi_header_vty_out (vty, afi, safi, write, + " neighbor %s weight %d%s", + addr, peer->weight[afi][safi], VTY_NEWLINE); + } + } + /* Filter. */ bgp_config_write_filter (vty, peer, afi, safi, write); @@ -7337,6 +7402,10 @@ bgp_config_write (struct vty *vty) /* ENCAPv6 configuration. */ write += bgp_config_write_family (vty, bgp, AFI_IP6, SAFI_ENCAP); +#if ENABLE_BGP_VNC + write += bgp_rfapi_cfg_write(vty, bgp); +#endif + write++; } return write; @@ -7407,6 +7476,10 @@ bgp_init (void) /* Init zebra. */ bgp_zebra_init(bm->master); +#if ENABLE_BGP_VNC + vnc_zebra_init (bm->master); +#endif + /* BGP VTY commands installation. */ bgp_vty_init (); @@ -7419,6 +7492,9 @@ bgp_init (void) bgp_scan_vty_init(); bgp_mplsvpn_init (); bgp_encap_init (); +#if ENABLE_BGP_VNC + rfapi_init (); +#endif /* Access list initialize. */ access_list_init (); diff --git a/bgpd/bgpd.conf.vnc.sample b/bgpd/bgpd.conf.vnc.sample new file mode 100644 index 0000000000..863abde3a6 --- /dev/null +++ b/bgpd/bgpd.conf.vnc.sample @@ -0,0 +1,89 @@ +hostname H192.1.1.1 +password zebra +#enable password zebra +log stdout notifications +log monitor notifications +#debug bgp + +line vty +exec-timeout 1000 +exit + + +router bgp 64512 + + # Must set a router-id if no zebra (default 0.0.0.0) + bgp router-id 192.1.1.1 + + neighbor 192.1.1.2 remote-as 64512 + neighbor 192.1.1.2 description H192.1.1.2 + neighbor 192.1.1.2 update-source 192.1.1.1 + neighbor 192.1.1.2 advertisement-interval 1 + no neighbor 192.1.1.2 activate + + neighbor 192.1.1.3 remote-as 64512 + neighbor 192.1.1.3 description H192.1.1.3 + neighbor 192.1.1.3 update-source 192.1.1.1 + neighbor 192.1.1.3 advertisement-interval 1 + no neighbor 192.1.1.3 activate + + address-family vpnv4 + neighbor 192.1.1.2 activate + neighbor 192.1.1.3 activate + exit-address-family + + address-family vpnv6 + neighbor 192.1.1.2 activate + neighbor 192.1.1.3 activate + exit-address-family + + vnc defaults + rd auto:vn:5226 + response-lifetime 45 + rt both 1000:1 1000:2 + exit-vnc + + vnc nve-group group1 + prefix vn 172.16.0.0/16 + exit-vnc + + vnc nve-group red + prefix vn 10.0.0.0/8 + rd auto:vn:10 + rt both 1000:10 + exit-vnc + + vnc nve-group blue + prefix vn 20.0.0.0/8 + rd auto:vn:20 + rt both 1000:20 + exit-vnc + + vnc nve-group green + prefix vn 30.0.0.0/8 + rd auto:vn:20 + rt both 1000:30 + exit-vnc + + vnc nve-group rfc4291v6c + prefix vn ::ac10:0/112 + rd auto:vn:5227 + rt both 2000:1 + exit-vnc + + vnc nve-group rfc4291v6m + prefix vn ::ffff:ac10:0/112 + rd auto:vn:5528 + rt both 3000:1 + exit-vnc + + vnc nve-group rfc6052v6 + prefix vn 64:ff9b::ac10:0/112 + rd auto:vn:5529 + rt both 4000:1 + exit-vnc + +exit + + + diff --git a/bgpd/bgpd.h b/bgpd/bgpd.h index a6d3b61e55..ee105201e8 100644 --- a/bgpd/bgpd.h +++ b/bgpd/bgpd.h @@ -351,6 +351,11 @@ struct bgp u_int32_t addpath_tx_id; int addpath_tx_used[AFI_MAX][SAFI_MAX]; + +#if ENABLE_BGP_VNC + struct rfapi_cfg *rfapi_cfg; + struct rfapi *rfapi; +#endif }; #define BGP_ROUTE_ADV_HOLD(bgp) (bgp->main_peers_update_hold) @@ -417,6 +422,8 @@ struct bgp_rd #define RMAP_OUT 1 #define RMAP_MAX 2 +#include "filter.h" + /* BGP filter structure. */ struct bgp_filter { @@ -657,6 +664,9 @@ struct peer #define PEER_FLAG_DYNAMIC_NEIGHBOR (1 << 12) /* dynamic neighbor */ #define PEER_FLAG_CAPABILITY_ENHE (1 << 13) /* Extended next-hop (rfc 5549)*/ #define PEER_FLAG_IFPEER_V6ONLY (1 << 14) /* if-based peer is v6 only */ +#if ENABLE_BGP_VNC +#define PEER_FLAG_IS_RFAPI_HD (1 << 15) /* attached to rfapi HD */ +#endif /* NSF mode (graceful restart) */ u_char nsf[AFI_MAX][SAFI_MAX]; @@ -687,6 +697,7 @@ struct peer #define PEER_FLAG_REMOVE_PRIVATE_AS_ALL_REPLACE (1 << 21) /* remove-private-as all replace-as */ #define PEER_FLAG_ADDPATH_TX_ALL_PATHS (1 << 22) /* addpath-tx-all-paths */ #define PEER_FLAG_ADDPATH_TX_BESTPATH_PER_AS (1 << 23) /* addpath-tx-bestpath-per-AS */ +#define PEER_FLAG_WEIGHT (1 << 24) /* weight */ /* MD5 password */ char *password; @@ -719,12 +730,10 @@ struct peer /* Default attribute value for the peer. */ u_int32_t config; -#define PEER_CONFIG_WEIGHT (1 << 0) /* Default weight. */ -#define PEER_CONFIG_TIMER (1 << 1) /* keepalive & holdtime */ -#define PEER_CONFIG_CONNECT (1 << 2) /* connect */ -#define PEER_CONFIG_ROUTEADV (1 << 3) /* route advertise */ +#define PEER_CONFIG_TIMER (1 << 0) /* keepalive & holdtime */ +#define PEER_CONFIG_CONNECT (1 << 1) /* connect */ +#define PEER_CONFIG_ROUTEADV (1 << 2) /* route advertise */ - u_int32_t weight; u_int32_t holdtime; u_int32_t keepalive; u_int32_t connect; @@ -779,7 +788,8 @@ struct peer /* Syncronization list and time. */ struct bgp_synchronize *sync[AFI_MAX][SAFI_MAX]; time_t synctime; - time_t last_write; /* timestamp when the last UPDATE msg was written */ + time_t last_write; /* timestamp when the last msg was written */ + time_t last_update; /* timestamp when the last UPDATE msg was written */ /* Send prefix count. */ unsigned long scount[AFI_MAX][SAFI_MAX]; @@ -817,6 +827,9 @@ struct peer /* allowas-in. */ char allowas_in[AFI_MAX][SAFI_MAX]; + /* weight */ + unsigned long weight[AFI_MAX][SAFI_MAX]; + /* peer reset cause */ char last_reset; #define PEER_DOWN_RID_CHANGE 1 /* bgp router-id command */ @@ -940,6 +953,9 @@ struct bgp_nlri #define BGP_ATTR_AS4_AGGREGATOR 18 #define BGP_ATTR_AS_PATHLIMIT 21 #define BGP_ATTR_ENCAP 23 +#if ENABLE_BGP_VNC +#define BGP_ATTR_VNC 255 +#endif /* BGP update origin. */ #define BGP_ORIGIN_IGP 0 @@ -1054,6 +1070,7 @@ struct bgp_nlri /* RFC4364 */ #define SAFI_MPLS_LABELED_VPN 128 +#define BGP_SAFI_VPN 128 /* BGP uptime string length. */ #define BGP_UPTIME_LEN 25 @@ -1277,8 +1294,8 @@ extern int peer_default_originate_unset (struct peer *, afi_t, safi_t); extern int peer_port_set (struct peer *, u_int16_t); extern int peer_port_unset (struct peer *); -extern int peer_weight_set (struct peer *, u_int16_t); -extern int peer_weight_unset (struct peer *); +extern int peer_weight_set (struct peer *, afi_t, safi_t, u_int16_t); +extern int peer_weight_unset (struct peer *, afi_t, safi_t); extern int peer_timers_set (struct peer *, u_int32_t keepalive, u_int32_t holdtime); extern int peer_timers_unset (struct peer *); @@ -1506,4 +1523,8 @@ bgp_vrf_unlink (struct bgp *bgp, struct vrf *vrf) } extern void bgp_update_redist_vrf_bitmaps (struct bgp*, vrf_id_t); + +/* For benefit of rfapi */ +extern struct peer * peer_new (struct bgp *bgp); + #endif /* _QUAGGA_BGPD_H */ diff --git a/bgpd/rfapi/.gitignore b/bgpd/rfapi/.gitignore new file mode 100644 index 0000000000..0638d7514b --- /dev/null +++ b/bgpd/rfapi/.gitignore @@ -0,0 +1 @@ +.dirstamp diff --git a/bgpd/rfapi/bgp_rfapi_cfg.c b/bgpd/rfapi/bgp_rfapi_cfg.c new file mode 100644 index 0000000000..9e4eafa6cf --- /dev/null +++ b/bgpd/rfapi/bgp_rfapi_cfg.c @@ -0,0 +1,4643 @@ +/* + * + * Copyright 2009-2016, LabN Consulting, L.L.C. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ +#include "lib/zebra.h" + +#include "lib/command.h" +#include "lib/prefix.h" +#include "lib/memory.h" +#include "lib/linklist.h" +#include "lib/table.h" +#include "lib/plist.h" +#include "lib/routemap.h" + +#include "bgpd/bgpd.h" +#include "bgpd/bgp_attr.h" +#include "bgpd/bgp_mplsvpn.h" +#include "bgpd/bgp_route.h" + +#include "bgpd/bgp_ecommunity.h" +#include "bgpd/rfapi/rfapi.h" +#include "bgpd/rfapi/bgp_rfapi_cfg.h" +#include "bgpd/rfapi/rfapi_backend.h" +#include "bgpd/rfapi/rfapi_import.h" +#include "bgpd/rfapi/rfapi_private.h" +#include "bgpd/rfapi/rfapi_monitor.h" +#include "bgpd/rfapi/vnc_zebra.h" +#include "bgpd/rfapi/vnc_export_bgp.h" +#include "bgpd/rfapi/vnc_export_bgp_p.h" +#include "bgpd/rfapi/rfapi_vty.h" +#include "bgpd/rfapi/vnc_import_bgp.h" + +#if ENABLE_BGP_VNC + +#undef BGP_VNC_DEBUG_MATCH_GROUP + + +DEFINE_MGROUP(RFAPI, "rfapi") +DEFINE_MTYPE(RFAPI, RFAPI_CFG, "NVE Configuration") +DEFINE_MTYPE(RFAPI, RFAPI_GROUP_CFG, "NVE Group Configuration") +DEFINE_MTYPE(RFAPI, RFAPI_L2_CFG, "RFAPI L2 Group Configuration") +DEFINE_MTYPE(RFAPI, RFAPI_RFP_GROUP_CFG, "RFAPI RFP Group Configuration") +DEFINE_MTYPE(RFAPI, RFAPI, "RFAPI Generic") +DEFINE_MTYPE(RFAPI, RFAPI_DESC, "RFAPI Descriptor") +DEFINE_MTYPE(RFAPI, RFAPI_IMPORTTABLE, "RFAPI Import Table") +DEFINE_MTYPE(RFAPI, RFAPI_MONITOR, "RFAPI Monitor VPN") +DEFINE_MTYPE(RFAPI, RFAPI_MONITOR_ENCAP, "RFAPI Monitor Encap") +DEFINE_MTYPE(RFAPI, RFAPI_NEXTHOP, "RFAPI Next Hop") +DEFINE_MTYPE(RFAPI, RFAPI_VN_OPTION, "RFAPI VN Option") +DEFINE_MTYPE(RFAPI, RFAPI_UN_OPTION, "RFAPI UN Option") +DEFINE_MTYPE(RFAPI, RFAPI_WITHDRAW, "RFAPI Withdraw") +DEFINE_MTYPE(RFAPI, RFAPI_RFG_NAME, "RFAPI RFGName") +DEFINE_MTYPE(RFAPI, RFAPI_ADB, "RFAPI Advertisement Data") +DEFINE_MTYPE(RFAPI, RFAPI_ETI, "RFAPI Export Table Info") +DEFINE_MTYPE(RFAPI, RFAPI_NVE_ADDR, "RFAPI NVE Address") +DEFINE_MTYPE(RFAPI, RFAPI_PREFIX_BAG, "RFAPI Prefix Bag") +DEFINE_MTYPE(RFAPI, RFAPI_IT_EXTRA, "RFAPI IT Extra") +DEFINE_MTYPE(RFAPI, RFAPI_INFO, "RFAPI Info") +DEFINE_MTYPE(RFAPI, RFAPI_ADDR, "RFAPI Addr") +DEFINE_MTYPE(RFAPI, RFAPI_UPDATED_RESPONSE_QUEUE, "RFAPI Updated Rsp Queue") +DEFINE_MTYPE(RFAPI, RFAPI_RECENT_DELETE, "RFAPI Recently Deleted Route") +DEFINE_MTYPE(RFAPI, RFAPI_L2ADDR_OPT, "RFAPI L2 Address Option") +DEFINE_MTYPE(RFAPI, RFAPI_AP, "RFAPI Advertised Prefix") +DEFINE_MTYPE(RFAPI, RFAPI_MONITOR_ETH, "RFAPI Monitor Ethernet") + +DEFINE_QOBJ_TYPE(rfapi_nve_group_cfg) +DEFINE_QOBJ_TYPE(rfapi_l2_group_cfg) +/*********************************************************************** + * RFAPI Support + ***********************************************************************/ + + +/* + * compaitibility to old quagga_time call + * time_t value in terms of stabilised absolute time. + * replacement for POSIX time() + */ +time_t +rfapi_time (time_t *t) +{ + time_t clock = bgp_clock(); + if (t) + *t = clock; + return clock; +} + +void +nve_group_to_nve_list ( + struct rfapi_nve_group_cfg *rfg, + struct list **nves, + uint8_t family) /* AF_INET, AF_INET6 */ +{ + struct listnode *hln; + struct rfapi_descriptor *rfd; + + /* + * loop over nves in this grp, add to list + */ + for (ALL_LIST_ELEMENTS_RO (rfg->nves, hln, rfd)) + { + if (rfd->vn_addr.addr_family == family) + { + if (!*nves) + *nves = list_new (); + listnode_add (*nves, rfd); + } + } +} + + +struct rfapi_nve_group_cfg * +bgp_rfapi_cfg_match_group ( + struct rfapi_cfg *hc, + struct prefix *vn, + struct prefix *un) +{ + struct rfapi_nve_group_cfg *rfg_vn = NULL; + struct rfapi_nve_group_cfg *rfg_un = NULL; + + struct route_table *rt_vn; + struct route_table *rt_un; + struct route_node *rn_vn; + struct route_node *rn_un; + + struct rfapi_nve_group_cfg *rfg; + struct listnode *node, *nnode; + + switch (vn->family) + { + case AF_INET: + rt_vn = &(hc->nve_groups_vn[AFI_IP]); + break; + case AF_INET6: + rt_vn = &(hc->nve_groups_vn[AFI_IP6]); + break; + default: + return NULL; + } + + switch (un->family) + { + case AF_INET: + rt_un = &(hc->nve_groups_un[AFI_IP]); + break; + case AF_INET6: + rt_un = &(hc->nve_groups_un[AFI_IP6]); + break; + default: + return NULL; + } + + rn_vn = route_node_match (rt_vn, vn); /* NB locks node */ + if (rn_vn) + { + rfg_vn = rn_vn->info; + route_unlock_node (rn_vn); + } + + rn_un = route_node_match (rt_un, un); /* NB locks node */ + if (rn_un) + { + rfg_un = rn_un->info; + route_unlock_node (rn_un); + } + +#if BGP_VNC_DEBUG_MATCH_GROUP + { + char buf[BUFSIZ]; + + prefix2str (vn, buf, BUFSIZ); + zlog_debug ("%s: vn prefix: %s", __func__, buf); + + prefix2str (un, buf, BUFSIZ); + zlog_debug ("%s: un prefix: %s", __func__, buf); + + zlog_debug ("%s: rn_vn=%p, rn_un=%p, rfg_vn=%p, rfg_un=%p", + __func__, rn_vn, rn_un, rfg_vn, rfg_un); + } +#endif + + + if (rfg_un == rfg_vn) /* same group */ + return rfg_un; + if (!rfg_un) /* un doesn't match, return vn-matched grp */ + return rfg_vn; + if (!rfg_vn) /* vn doesn't match, return un-matched grp */ + return rfg_un; + + /* + * Two different nve groups match: the group configured earlier wins. + * For now, just walk the sequential list and pick the first one. + * If this approach is too slow, then store serial numbers in the + * nve group structures as they are defined and just compare + * serial numbers. + */ + for (ALL_LIST_ELEMENTS (hc->nve_groups_sequential, node, nnode, rfg)) + { + if ((rfg == rfg_un) || (rfg == rfg_vn)) + { + return rfg; + } + } + zlog_debug ("%s: shouldn't happen, returning NULL when un and vn match", + __func__); + return NULL; /* shouldn't happen */ +} + +/*------------------------------------------ + * rfapi_get_rfp_start_val + * + * Returns value passed to rfapi on rfp_start + * + * input: + * void * bgp structure + * + * returns: + * void * + *------------------------------------------*/ +void * +rfapi_get_rfp_start_val (void *bgpv) +{ + struct bgp *bgp = bgpv; + if (bgp == NULL || bgp->rfapi == NULL) + return NULL; + return bgp->rfapi->rfp; +} + +/*------------------------------------------ + * bgp_rfapi_is_vnc_configured + * + * Returns if VNC (BGP VPN messaging /VPN & encap SAFIs) are configured + * + * input: + * bgp NULL (=use default instance) + * + * output: + * + * return value: If VNC is configured for the bgpd instance + * 0 Success + * ENXIO VNC not configured + --------------------------------------------*/ +int +bgp_rfapi_is_vnc_configured (struct bgp *bgp) +{ + if (bgp == NULL) + bgp = bgp_get_default (); + + if (bgp && bgp->rfapi_cfg) + { + struct peer *peer; + struct peer_group *group; + struct listnode *node, *nnode; + /* if have configured VPN neighbors, assume running VNC */ + for (ALL_LIST_ELEMENTS (bgp->group, node, nnode, group)) + { + if (group->conf->afc[AFI_IP][SAFI_MPLS_VPN] || + group->conf->afc[AFI_IP6][SAFI_MPLS_VPN]) + return 0; + } + for (ALL_LIST_ELEMENTS (bgp->peer, node, nnode, peer)) + { + if (peer->afc[AFI_IP][SAFI_MPLS_VPN] || + peer->afc[AFI_IP6][SAFI_MPLS_VPN]) + return 0; + } + } + return ENXIO; +} + +/*********************************************************************** + * VNC Configuration/CLI + ***********************************************************************/ + + +DEFUN (vnc_advertise_un_method, + vnc_advertise_un_method_cmd, + "vnc advertise-un-method (encap-safi|encap-attr)", + VNC_CONFIG_STR + "Method of advertising UN addresses\n" + "Via Encapsulation SAFI\n" + "Via Tunnel Encap attribute (in VPN SAFI)\n") +{ + struct bgp *bgp = vty->index; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (!bgp->rfapi_cfg) + { + vty_out (vty, "VNC not configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + + if (!strncmp (argv[0], "encap-safi", 7)) + { + bgp->rfapi_cfg->flags |= BGP_VNC_CONFIG_ADV_UN_METHOD_ENCAP; + } + else + { + bgp->rfapi_cfg->flags &= ~BGP_VNC_CONFIG_ADV_UN_METHOD_ENCAP; + } + + return CMD_SUCCESS; +} + +/*------------------------------------------------------------------------- + * RFG defaults + *-----------------------------------------------------------------------*/ + + +DEFUN (vnc_defaults, + vnc_defaults_cmd, + "vnc defaults", VNC_CONFIG_STR "Configure default NVE group\n") +{ + vty->node = BGP_VNC_DEFAULTS_NODE; + return CMD_SUCCESS; +} + +static int +set_ecom_list ( + struct vty *vty, + int argc, + const char **argv, + struct ecommunity **list) +{ + struct ecommunity *ecom = NULL; + struct ecommunity *ecomadd; + + for (; argc; --argc, ++argv) + { + + ecomadd = ecommunity_str2com (*argv, ECOMMUNITY_ROUTE_TARGET, 0); + if (!ecomadd) + { + vty_out (vty, "Malformed community-list value%s", VTY_NEWLINE); + if (ecom) + ecommunity_free (&ecom); + return CMD_WARNING; + } + + if (ecom) + { + ecommunity_merge (ecom, ecomadd); + ecommunity_free (&ecomadd); + } + else + { + ecom = ecomadd; + } + } + + if (*list) + { + ecommunity_free (&*list); + } + *list = ecom; + + return CMD_SUCCESS; +} + +DEFUN (vnc_defaults_rt_import, + vnc_defaults_rt_import_cmd, + "rt import .RTLIST", + "Specify default route targets\n" + "Import filter\n" + "Space separated route target list (A.B.C.D:MN|EF:OPQR|GHJK:MN)\n") +{ + struct bgp *bgp = vty->index; + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + return set_ecom_list (vty, argc, argv, + &bgp->rfapi_cfg->default_rt_import_list); +} + +DEFUN (vnc_defaults_rt_export, + vnc_defaults_rt_export_cmd, + "rt export .RTLIST", + "Configure default route targets\n" + "Export filter\n" + "Space separated route target list (A.B.C.D:MN|EF:OPQR|GHJK:MN)\n") +{ + struct bgp *bgp = vty->index; + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + return set_ecom_list (vty, argc, argv, + &bgp->rfapi_cfg->default_rt_export_list); +} + +DEFUN (vnc_defaults_rt_both, + vnc_defaults_rt_both_cmd, + "rt both .RTLIST", + "Configure default route targets\n" + "Export+import filters\n" + "Space separated route target list (A.B.C.D:MN|EF:OPQR|GHJK:MN)\n") +{ + int rc; + struct bgp *bgp = vty->index; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + rc = + set_ecom_list (vty, argc, argv, &bgp->rfapi_cfg->default_rt_import_list); + if (rc != CMD_SUCCESS) + return rc; + return set_ecom_list (vty, argc, argv, + &bgp->rfapi_cfg->default_rt_export_list); +} + +DEFUN (vnc_defaults_rd, + vnc_defaults_rd_cmd, + "rd ASN:nn_or_IP-address:nn", + "Specify default route distinguisher\n" + "Route Distinguisher (: | : | auto:vn: )\n") +{ + int ret; + struct prefix_rd prd; + struct bgp *bgp = vty->index; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (!strncmp (argv[0], "auto:vn:", 8)) + { + /* + * use AF_UNIX to designate automatically-assigned RD + * auto:vn:nn where nn is a 2-octet quantity + */ + char *end = NULL; + uint32_t value32 = strtoul (argv[0] + 8, &end, 10); + uint16_t value = value32 & 0xffff; + + if (!*(argv[0] + 5) || *end) + { + vty_out (vty, "%% Malformed rd%s", VTY_NEWLINE); + return CMD_WARNING; + } + if (value32 > 0xffff) + { + vty_out (vty, "%% Malformed rd (must be less than %u%s", + 0x0ffff, VTY_NEWLINE); + return CMD_WARNING; + } + + memset (&prd, 0, sizeof (prd)); + prd.family = AF_UNIX; + prd.prefixlen = 64; + prd.val[0] = (RD_TYPE_IP >> 8) & 0x0ff; + prd.val[1] = RD_TYPE_IP & 0x0ff; + prd.val[6] = (value >> 8) & 0x0ff; + prd.val[7] = value & 0x0ff; + + } + else + { + + ret = str2prefix_rd (argv[0], &prd); + if (!ret) + { + vty_out (vty, "%% Malformed rd%s", VTY_NEWLINE); + return CMD_WARNING; + } + } + + bgp->rfapi_cfg->default_rd = prd; + return CMD_SUCCESS; +} + +DEFUN (vnc_defaults_l2rd, + vnc_defaults_l2rd_cmd, + "l2rd (ID|auto:vn)", + "Specify default Local Nve ID value to use in RD for L2 routes\n" + "Fixed value 1-255\n" + "use the low-order octet of the NVE's VN address\n") +{ + struct bgp *bgp = vty->index; + uint8_t value = 0; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (!strcmp (argv[0], "auto:vn")) + { + value = 0; + } + else + { + char *end = NULL; + unsigned long value_l = strtoul (argv[0], &end, 10); + + value = value_l & 0xff; + if (!*(argv[0]) || *end) + { + vty_out (vty, "%% Malformed l2 nve ID \"%s\"%s", argv[0], + VTY_NEWLINE); + return CMD_WARNING; + } + if ((value_l < 1) || (value_l > 0xff)) + { + vty_out (vty, + "%% Malformed l2 nve id (must be greater than 0 and less than %u%s", + 0x100, VTY_NEWLINE); + return CMD_WARNING; + } + } + bgp->rfapi_cfg->flags |= BGP_VNC_CONFIG_L2RD; + bgp->rfapi_cfg->default_l2rd = value; + + return CMD_SUCCESS; +} + +DEFUN (vnc_defaults_no_l2rd, + vnc_defaults_no_l2rd_cmd, + "no l2rd", + NO_STR + "Specify default Local Nve ID value to use in RD for L2 routes\n") +{ + struct bgp *bgp = vty->index; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + bgp->rfapi_cfg->default_l2rd = 0; + bgp->rfapi_cfg->flags &= ~BGP_VNC_CONFIG_L2RD; + + return CMD_SUCCESS; +} + +DEFUN (vnc_defaults_responselifetime, + vnc_defaults_responselifetime_cmd, + "response-lifetime (LIFETIME|infinite)", + "Specify default response lifetime\n" + "Response lifetime in seconds\n" "Infinite response lifetime\n") +{ + uint32_t rspint; + struct bgp *bgp = vty->index; + struct rfapi *h = NULL; + struct listnode *hdnode; + struct rfapi_descriptor *rfd; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + h = bgp->rfapi; + if (!h) + return CMD_WARNING; + + if (!strcmp (argv[0], "infinite")) + { + rspint = RFAPI_INFINITE_LIFETIME; + } + else + { + VTY_GET_INTEGER ("Response Lifetime", rspint, argv[0]); + if (rspint > INT32_MAX) + rspint = INT32_MAX; /* is really an int, not an unsigned int */ + } + + bgp->rfapi_cfg->default_response_lifetime = rspint; + + for (ALL_LIST_ELEMENTS_RO (&h->descriptors, hdnode, rfd)) + if (rfd->rfg && !(rfd->rfg->flags & RFAPI_RFG_RESPONSE_LIFETIME)) + rfd->response_lifetime = rfd->rfg->response_lifetime = rspint; + + return CMD_SUCCESS; +} + +static struct rfapi_nve_group_cfg * +rfapi_group_lookup_byname (struct bgp *bgp, const char *name) +{ + struct rfapi_nve_group_cfg *rfg; + struct listnode *node, *nnode; + + for (ALL_LIST_ELEMENTS + (bgp->rfapi_cfg->nve_groups_sequential, node, nnode, rfg)) + { + if (!strcmp (rfg->name, name)) + return rfg; + } + return NULL; +} + +static struct rfapi_nve_group_cfg * +rfapi_group_new () +{ + return XCALLOC (MTYPE_RFAPI_GROUP_CFG, sizeof (struct rfapi_nve_group_cfg)); +} + +static struct rfapi_l2_group_cfg * +rfapi_l2_group_lookup_byname (struct bgp *bgp, const char *name) +{ + struct rfapi_l2_group_cfg *rfg; + struct listnode *node, *nnode; + + if (bgp->rfapi_cfg->l2_groups == NULL) /* not the best place for this */ + bgp->rfapi_cfg->l2_groups = list_new (); + + for (ALL_LIST_ELEMENTS (bgp->rfapi_cfg->l2_groups, node, nnode, rfg)) + { + if (!strcmp (rfg->name, name)) + return rfg; + } + return NULL; +} + +static struct rfapi_l2_group_cfg * +rfapi_l2_group_new () +{ + return XCALLOC (MTYPE_RFAPI_L2_CFG, sizeof (struct rfapi_l2_group_cfg)); +} + +static void +rfapi_l2_group_del (struct rfapi_l2_group_cfg *rfg) +{ + XFREE (MTYPE_RFAPI_L2_CFG, rfg); +} + +static int +rfapi_str2route_type ( + const char *l3str, + const char *pstr, + afi_t *afi, + int *type) +{ + if (!l3str || !pstr) + return EINVAL; + + if (!strcmp (l3str, "ipv4")) + { + *afi = AFI_IP; + } + else + { + if (!strcmp (l3str, "ipv6")) + *afi = AFI_IP6; + else + return ENOENT; + } + + if (!strcmp (pstr, "connected")) + *type = ZEBRA_ROUTE_CONNECT; + if (!strcmp (pstr, "kernel")) + *type = ZEBRA_ROUTE_KERNEL; + if (!strcmp (pstr, "static")) + *type = ZEBRA_ROUTE_STATIC; + if (!strcmp (pstr, "bgp")) + *type = ZEBRA_ROUTE_BGP; + if (!strcmp (pstr, "bgp-direct")) + *type = ZEBRA_ROUTE_BGP_DIRECT; + if (!strcmp (pstr, "bgp-direct-to-nve-groups")) + *type = ZEBRA_ROUTE_BGP_DIRECT_EXT; + + if (!strcmp (pstr, "rip")) + { + if (*afi == AFI_IP) + *type = ZEBRA_ROUTE_RIP; + else + *type = ZEBRA_ROUTE_RIPNG; + } + + if (!strcmp (pstr, "ripng")) + { + if (*afi == AFI_IP) + return EAFNOSUPPORT; + *type = ZEBRA_ROUTE_RIPNG; + } + + if (!strcmp (pstr, "ospf")) + { + if (*afi == AFI_IP) + *type = ZEBRA_ROUTE_OSPF; + else + *type = ZEBRA_ROUTE_OSPF6; + } + + if (!strcmp (pstr, "ospf6")) + { + if (*afi == AFI_IP) + return EAFNOSUPPORT; + *type = ZEBRA_ROUTE_OSPF6; + } + + return 0; +} + +/*------------------------------------------------------------------------- + * redistribute + *-----------------------------------------------------------------------*/ + +#define VNC_REDIST_ENABLE(bgp, afi, type) do { \ + switch (type) { \ + case ZEBRA_ROUTE_BGP_DIRECT: \ + vnc_import_bgp_redist_enable((bgp), (afi)); \ + break; \ + case ZEBRA_ROUTE_BGP_DIRECT_EXT: \ + vnc_import_bgp_exterior_redist_enable((bgp), (afi));\ + break; \ + default: \ + vnc_redistribute_set((bgp), (afi), (type)); \ + break; \ + } \ +} while (0) + +#define VNC_REDIST_DISABLE(bgp, afi, type) do { \ + switch (type) { \ + case ZEBRA_ROUTE_BGP_DIRECT: \ + vnc_import_bgp_redist_disable((bgp), (afi)); \ + break; \ + case ZEBRA_ROUTE_BGP_DIRECT_EXT: \ + vnc_import_bgp_exterior_redist_disable((bgp), (afi));\ + break; \ + default: \ + vnc_redistribute_unset((bgp), (afi), (type)); \ + break; \ + } \ +} while (0) + +static uint8_t redist_was_enabled[AFI_MAX][ZEBRA_ROUTE_MAX]; + +static void +vnc_redistribute_prechange (struct bgp *bgp) +{ + afi_t afi; + int type; + + zlog_debug ("%s: entry", __func__); + memset (redist_was_enabled, 0, sizeof (redist_was_enabled)); + + /* + * Look to see if we have any redistribution enabled. If so, flush + * the corresponding routes and turn off redistribution temporarily. + * We need to do it because the RD's used for the redistributed + * routes depend on the nve group. + */ + for (afi = AFI_IP; afi < AFI_MAX; ++afi) + { + for (type = 0; type < ZEBRA_ROUTE_MAX; ++type) + { + if (bgp->rfapi_cfg->redist[afi][type]) + { + redist_was_enabled[afi][type] = 1; + VNC_REDIST_DISABLE (bgp, afi, type); + } + } + } + zlog_debug ("%s: return", __func__); +} + +static void +vnc_redistribute_postchange (struct bgp *bgp) +{ + afi_t afi; + int type; + + zlog_debug ("%s: entry", __func__); + /* + * If we turned off redistribution above, turn it back on. Doing so + * will tell zebra to resend the routes to us + */ + for (afi = AFI_IP; afi < AFI_MAX; ++afi) + { + for (type = 0; type < ZEBRA_ROUTE_MAX; ++type) + { + if (redist_was_enabled[afi][type]) + { + VNC_REDIST_ENABLE (bgp, afi, type); + } + } + } + zlog_debug ("%s: return", __func__); +} + +DEFUN (vnc_redistribute_rh_roo_localadmin, + vnc_redistribute_rh_roo_localadmin_cmd, + "vnc redistribute resolve-nve roo-ec-local-admin <0-65535>", + VNC_CONFIG_STR + "Redistribute routes into VNC\n" + "Resolve-NVE mode\n" + "Route Origin Extended Community Local Admin Field\n" "Field value\n") +{ + struct bgp *bgp = vty->index; + uint32_t localadmin; + char *endptr; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + if (!bgp->rfapi_cfg) + { + vty_out (vty, "RFAPI not configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + localadmin = strtoul (argv[0], &endptr, 0); + if (!*(argv[0]) || *endptr) + { + vty_out (vty, "%% Malformed value%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (localadmin > 0xffff) + { + vty_out (vty, "%% Value out of range (0-%d)%s", 0xffff, VTY_NEWLINE); + return CMD_WARNING; + } + + if (bgp->rfapi_cfg->resolve_nve_roo_local_admin == localadmin) + return CMD_SUCCESS; + + if ((bgp->rfapi_cfg->flags & BGP_VNC_CONFIG_EXPORT_BGP_MODE_BITS) == + BGP_VNC_CONFIG_EXPORT_BGP_MODE_CE) + { + + vnc_export_bgp_prechange (bgp); + } + vnc_redistribute_prechange (bgp); + + bgp->rfapi_cfg->resolve_nve_roo_local_admin = localadmin; + + if ((bgp->rfapi_cfg->flags & BGP_VNC_CONFIG_EXPORT_BGP_MODE_BITS) == + BGP_VNC_CONFIG_EXPORT_BGP_MODE_CE) + { + + vnc_export_bgp_postchange (bgp); + } + vnc_redistribute_postchange (bgp); + + return CMD_SUCCESS; +} + + +DEFUN (vnc_redistribute_mode, + vnc_redistribute_mode_cmd, + "vnc redistribute mode (nve-group|plain|resolve-nve)", + VNC_CONFIG_STR + "Redistribute routes into VNC\n" + "Redistribution mode\n" + "Based on redistribute nve-group\n" + "Unmodified\n" "Resolve each nexthop to connected NVEs\n") +{ + struct bgp *bgp = vty->index; + vnc_redist_mode_t newmode; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + if (!bgp->rfapi_cfg) + { + vty_out (vty, "RFAPI not configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + + switch (*argv[0]) + { + case 'n': + newmode = VNC_REDIST_MODE_RFG; + break; + + case 'p': + newmode = VNC_REDIST_MODE_PLAIN; + break; + + case 'r': + newmode = VNC_REDIST_MODE_RESOLVE_NVE; + break; + + default: + vty_out (vty, "unknown redistribute mode%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (newmode != bgp->rfapi_cfg->redist_mode) + { + vnc_redistribute_prechange (bgp); + bgp->rfapi_cfg->redist_mode = newmode; + vnc_redistribute_postchange (bgp); + } + + return CMD_SUCCESS; +} + +DEFUN (vnc_redistribute_protocol, + vnc_redistribute_protocol_cmd, + "vnc redistribute (ipv4|ipv6) (bgp|bgp-direct|bgp-direct-to-nve-groups|connected|kernel|ospf|rip|static)", + VNC_CONFIG_STR + "Redistribute routes into VNC\n" + "IPv4 routes\n" + "IPv6 routes\n" + "From BGP\n" + "From BGP without Zebra\n" + "From BGP without Zebra, only to configured NVE groups\n" + "Connected interfaces\n" + "From kernel routes\n" + "From Open Shortest Path First (OSPF)\n" + "From Routing Information Protocol (RIP)\n" "From Static routes\n") +{ + int type = ZEBRA_ROUTE_MAX; /* init to bogus value */ + struct bgp *bgp = vty->index; + afi_t afi; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + if (!bgp->rfapi_cfg) + { + vty_out (vty, "RFAPI not configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (rfapi_str2route_type (argv[0], argv[1], &afi, &type)) + { + vty_out (vty, "%% Invalid route type%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (type == ZEBRA_ROUTE_BGP_DIRECT_EXT) + { + if (bgp->rfapi_cfg->redist_bgp_exterior_view_name) + { + VNC_REDIST_DISABLE (bgp, afi, type); /* disabled view implicitly */ + free (bgp->rfapi_cfg->redist_bgp_exterior_view_name); + bgp->rfapi_cfg->redist_bgp_exterior_view_name = NULL; + } + bgp->rfapi_cfg->redist_bgp_exterior_view = bgp; + } + + VNC_REDIST_ENABLE (bgp, afi, type); + + return CMD_SUCCESS; +} + +DEFUN (vnc_no_redistribute_protocol, + vnc_no_redistribute_protocol_cmd, + "no vnc redistribute (ipv4|ipv6) (bgp|bgp-direct|bgp-direct-to-nve-groups|connected|kernel|ospf|rip|static)", + NO_STR + VNC_CONFIG_STR + "Redistribute from other protocol\n" + "IPv4 routes\n" + "IPv6 routes\n" + "From BGP\n" + "From BGP without Zebra\n" + "From BGP without Zebra, only to configured NVE groups\n" + "Connected interfaces\n" + "From kernel routes\n" + "From Open Shortest Path First (OSPF)\n" + "From Routing Information Protocol (RIP)\n" "From Static routes\n") +{ + int type; + struct bgp *bgp = vty->index; + afi_t afi; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + if (!bgp->rfapi_cfg) + { + vty_out (vty, "RFAPI not configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (rfapi_str2route_type (argv[0], argv[1], &afi, &type)) + { + vty_out (vty, "%% Invalid route type%s", VTY_NEWLINE); + return CMD_WARNING; + } + + VNC_REDIST_DISABLE (bgp, afi, type); + + if (type == ZEBRA_ROUTE_BGP_DIRECT_EXT) + { + if (bgp->rfapi_cfg->redist_bgp_exterior_view_name) + { + free (bgp->rfapi_cfg->redist_bgp_exterior_view_name); + bgp->rfapi_cfg->redist_bgp_exterior_view_name = NULL; + } + bgp->rfapi_cfg->redist_bgp_exterior_view = NULL; + } + + return CMD_SUCCESS; +} + +DEFUN (vnc_redistribute_bgp_exterior, + vnc_redistribute_bgp_exterior_cmd, + "vnc redistribute (ipv4|ipv6) bgp-direct-to-nve-groups view NAME", + VNC_CONFIG_STR + "Redistribute routes into VNC\n" + "IPv4 routes\n" + "IPv6 routes\n" + "From BGP without Zebra, only to configured NVE groups\n" + "From BGP view\n" "BGP view name\n") +{ + int type; + struct bgp *bgp = vty->index; + afi_t afi; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + if (!bgp->rfapi_cfg) + { + vty_out (vty, "RFAPI not configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (rfapi_str2route_type (argv[0], "bgp-direct-to-nve-groups", &afi, &type)) + { + vty_out (vty, "%% Invalid route type%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (bgp->rfapi_cfg->redist_bgp_exterior_view_name) + free (bgp->rfapi_cfg->redist_bgp_exterior_view_name); + bgp->rfapi_cfg->redist_bgp_exterior_view_name = strdup (argv[1]); + /* could be NULL if name is not defined yet */ + bgp->rfapi_cfg->redist_bgp_exterior_view = bgp_lookup_by_name (argv[1]); + + VNC_REDIST_ENABLE (bgp, afi, type); + + return CMD_SUCCESS; +} + +DEFUN (vnc_redistribute_nvegroup, + vnc_redistribute_nvegroup_cmd, + "vnc redistribute nve-group NAME", + VNC_CONFIG_STR + "Assign a NVE group to routes redistributed from another routing protocol\n" + "NVE group\n" "Group name\n") +{ + struct bgp *bgp = vty->index; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (!bgp->rfapi_cfg) + { + vty_out (vty, "rfapi not configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + vnc_redistribute_prechange (bgp); + + /* + * OK if nve group doesn't exist yet; we'll set the pointer + * when the group is defined later + */ + bgp->rfapi_cfg->rfg_redist = rfapi_group_lookup_byname (bgp, argv[0]); + if (bgp->rfapi_cfg->rfg_redist_name) + free (bgp->rfapi_cfg->rfg_redist_name); + bgp->rfapi_cfg->rfg_redist_name = strdup (argv[0]); + + vnc_redistribute_postchange (bgp); + + return CMD_SUCCESS; +} + +DEFUN (vnc_redistribute_no_nvegroup, + vnc_redistribute_no_nvegroup_cmd, + "no vnc redistribute nve-group", + NO_STR + VNC_CONFIG_STR + "Redistribute from other protocol\n" + "Assign a NVE group to routes redistributed from another routing protocol\n") +{ + struct bgp *bgp = vty->index; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (!bgp->rfapi_cfg) + { + vty_out (vty, "rfapi not configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + vnc_redistribute_prechange (bgp); + + bgp->rfapi_cfg->rfg_redist = NULL; + if (bgp->rfapi_cfg->rfg_redist_name) + free (bgp->rfapi_cfg->rfg_redist_name); + bgp->rfapi_cfg->rfg_redist_name = NULL; + + vnc_redistribute_postchange (bgp); + + return CMD_SUCCESS; +} + + +DEFUN (vnc_redistribute_lifetime, + vnc_redistribute_lifetime_cmd, + "vnc redistribute lifetime (LIFETIME|infinite)", + VNC_CONFIG_STR + "Assign a lifetime to routes redistributed from another routing protocol\n" + "lifetime value (32 bit)\n") +{ + struct bgp *bgp = vty->index; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (!bgp->rfapi_cfg) + { + vty_out (vty, "rfapi not configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + vnc_redistribute_prechange (bgp); + + if (!strcmp (argv[0], "infinite")) + { + bgp->rfapi_cfg->redist_lifetime = RFAPI_INFINITE_LIFETIME; + } + else + { + VTY_GET_INTEGER ("Response Lifetime", bgp->rfapi_cfg->redist_lifetime, + argv[0]); + } + + vnc_redistribute_postchange (bgp); + + return CMD_SUCCESS; +} + +/*-- redist policy, non-nvegroup start --*/ + +DEFUN (vnc_redist_bgpdirect_no_prefixlist, + vnc_redist_bgpdirect_no_prefixlist_cmd, + "no vnc redistribute (bgp-direct|bgp-direct-to-nve-groups) (ipv4|ipv6) prefix-list", + NO_STR + VNC_CONFIG_STR + "Redistribute from other protocol\n" + "Redistribute from BGP directly\n" + "Redistribute from BGP without Zebra, only to configured NVE groups\n" + "IPv4 routes\n" + "IPv6 routes\n" "Prefix-list for filtering redistributed routes\n") +{ + struct bgp *bgp = vty->index; + afi_t afi; + struct rfapi_cfg *hc; + uint8_t route_type = 0; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (!(hc = bgp->rfapi_cfg)) + { + vty_out (vty, "rfapi not configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (!strcmp (argv[0], "bgp-direct")) + { + route_type = ZEBRA_ROUTE_BGP_DIRECT; + } + else + { + route_type = ZEBRA_ROUTE_BGP_DIRECT_EXT; + } + + if (!strcmp (argv[1], "ipv4")) + { + afi = AFI_IP; + } + else + { + afi = AFI_IP6; + } + + vnc_redistribute_prechange (bgp); + + if (hc->plist_redist_name[route_type][afi]) + free (hc->plist_redist_name[route_type][afi]); + hc->plist_redist_name[route_type][afi] = NULL; + hc->plist_redist[route_type][afi] = NULL; + + vnc_redistribute_postchange (bgp); + + return CMD_SUCCESS; +} + +DEFUN (vnc_redist_bgpdirect_prefixlist, + vnc_redist_bgpdirect_prefixlist_cmd, + "vnc redistribute (bgp-direct|bgp-direct-to-nve-groups) (ipv4|ipv6) prefix-list NAME", + VNC_CONFIG_STR + "Redistribute from other protocol\n" + "Redistribute from BGP directly\n" + "Redistribute from BGP without Zebra, only to configured NVE groups\n" + "IPv4 routes\n" + "IPv6 routes\n" + "Prefix-list for filtering redistributed routes\n" + "prefix list name\n") +{ + struct bgp *bgp = vty->index; + struct rfapi_cfg *hc; + afi_t afi; + uint8_t route_type = 0; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (!(hc = bgp->rfapi_cfg)) + { + vty_out (vty, "rfapi not configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (!strcmp (argv[0], "bgp-direct")) + { + route_type = ZEBRA_ROUTE_BGP_DIRECT; + } + else + { + route_type = ZEBRA_ROUTE_BGP_DIRECT_EXT; + } + + if (!strcmp (argv[1], "ipv4")) + { + afi = AFI_IP; + } + else + { + afi = AFI_IP6; + } + + vnc_redistribute_prechange (bgp); + + if (hc->plist_redist_name[route_type][afi]) + free (hc->plist_redist_name[route_type][afi]); + hc->plist_redist_name[route_type][afi] = strdup (argv[2]); + hc->plist_redist[route_type][afi] = prefix_list_lookup (afi, argv[2]); + + vnc_redistribute_postchange (bgp); + + return CMD_SUCCESS; +} + +DEFUN (vnc_redist_bgpdirect_no_routemap, + vnc_redist_bgpdirect_no_routemap_cmd, + "no vnc redistribute (bgp-direct|bgp-direct-to-nve-groups) route-map", + NO_STR + VNC_CONFIG_STR + "Redistribute from other protocols\n" + "Redistribute from BGP directly\n" + "Redistribute from BGP without Zebra, only to configured NVE groups\n" + "Route-map for filtering redistributed routes\n") +{ + struct bgp *bgp = vty->index; + struct rfapi_cfg *hc; + uint8_t route_type = 0; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (!(hc = bgp->rfapi_cfg)) + { + vty_out (vty, "rfapi not configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (!strcmp (argv[0], "bgp-direct")) + { + route_type = ZEBRA_ROUTE_BGP_DIRECT; + } + else + { + route_type = ZEBRA_ROUTE_BGP_DIRECT_EXT; + } + + vnc_redistribute_prechange (bgp); + + if (hc->routemap_redist_name[route_type]) + free (hc->routemap_redist_name[route_type]); + hc->routemap_redist_name[route_type] = NULL; + hc->routemap_redist[route_type] = NULL; + + vnc_redistribute_postchange (bgp); + + return CMD_SUCCESS; +} + +DEFUN (vnc_redist_bgpdirect_routemap, + vnc_redist_bgpdirect_routemap_cmd, + "vnc redistribute (bgp-direct|bgp-direct-to-nve-groups) route-map NAME", + VNC_CONFIG_STR + "Redistribute from other protocols\n" + "Redistribute from BGP directly\n" + "Redistribute from BGP without Zebra, only to configured NVE groups\n" + "Route-map for filtering exported routes\n" "route map name\n") +{ + struct bgp *bgp = vty->index; + struct rfapi_cfg *hc; + uint8_t route_type = 0; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (!(hc = bgp->rfapi_cfg)) + { + vty_out (vty, "rfapi not configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (!strcmp (argv[0], "bgp-direct")) + { + route_type = ZEBRA_ROUTE_BGP_DIRECT; + } + else + { + route_type = ZEBRA_ROUTE_BGP_DIRECT_EXT; + } + + vnc_redistribute_prechange (bgp); + + if (hc->routemap_redist_name[route_type]) + free (hc->routemap_redist_name[route_type]); + hc->routemap_redist_name[route_type] = strdup (argv[1]); + hc->routemap_redist[route_type] = route_map_lookup_by_name (argv[1]); + + vnc_redistribute_postchange (bgp); + + return CMD_SUCCESS; +} + +/*-- redist policy, non-nvegroup end --*/ + +/*-- redist policy, nvegroup start --*/ + +DEFUN (vnc_nve_group_redist_bgpdirect_no_prefixlist, + vnc_nve_group_redist_bgpdirect_no_prefixlist_cmd, + "no redistribute bgp-direct (ipv4|ipv6) prefix-list", + NO_STR + "Redistribute from other protocol\n" + "Redistribute from BGP directly\n" + "Disable redistribute filter\n" + "IPv4 routes\n" + "IPv6 routes\n" "Prefix-list for filtering redistributed routes\n") +{ + struct bgp *bgp = vty->index; + VTY_DECLVAR_CONTEXT_SUB(rfapi_nve_group_cfg, rfg) + afi_t afi; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (!bgp->rfapi_cfg) + { + vty_out (vty, "rfapi not configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + /* make sure it's still in list */ + if (!listnode_lookup (bgp->rfapi_cfg->nve_groups_sequential, rfg)) + { + /* Not in list anymore */ + vty_out (vty, "Current NVE group no longer exists%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (!strcmp (argv[0], "ipv4")) + { + afi = AFI_IP; + } + else + { + afi = AFI_IP6; + } + + vnc_redistribute_prechange (bgp); + + if (rfg->plist_redist_name[ZEBRA_ROUTE_BGP_DIRECT][afi]) + free (rfg->plist_redist_name[ZEBRA_ROUTE_BGP_DIRECT][afi]); + rfg->plist_redist_name[ZEBRA_ROUTE_BGP_DIRECT][afi] = NULL; + rfg->plist_redist[ZEBRA_ROUTE_BGP_DIRECT][afi] = NULL; + + vnc_redistribute_postchange (bgp); + + return CMD_SUCCESS; +} + +DEFUN (vnc_nve_group_redist_bgpdirect_prefixlist, + vnc_nve_group_redist_bgpdirect_prefixlist_cmd, + "redistribute bgp-direct (ipv4|ipv6) prefix-list NAME", + "Redistribute from other protocol\n" + "Redistribute from BGP directly\n" + "IPv4 routes\n" + "IPv6 routes\n" + "Prefix-list for filtering redistributed routes\n" + "prefix list name\n") +{ + struct bgp *bgp = vty->index; + VTY_DECLVAR_CONTEXT_SUB(rfapi_nve_group_cfg, rfg); + afi_t afi; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (!bgp->rfapi_cfg) + { + vty_out (vty, "rfapi not configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + /* make sure it's still in list */ + if (!listnode_lookup (bgp->rfapi_cfg->nve_groups_sequential, rfg)) + { + /* Not in list anymore */ + vty_out (vty, "Current NVE group no longer exists%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (!strcmp (argv[0], "ipv4")) + { + afi = AFI_IP; + } + else + { + afi = AFI_IP6; + } + + vnc_redistribute_prechange (bgp); + + if (rfg->plist_redist_name[ZEBRA_ROUTE_BGP_DIRECT][afi]) + free (rfg->plist_redist_name[ZEBRA_ROUTE_BGP_DIRECT][afi]); + rfg->plist_redist_name[ZEBRA_ROUTE_BGP_DIRECT][afi] = strdup (argv[1]); + rfg->plist_redist[ZEBRA_ROUTE_BGP_DIRECT][afi] = + prefix_list_lookup (afi, argv[1]); + + vnc_redistribute_postchange (bgp); + + return CMD_SUCCESS; +} + +DEFUN (vnc_nve_group_redist_bgpdirect_no_routemap, + vnc_nve_group_redist_bgpdirect_no_routemap_cmd, + "no redistribute bgp-direct route-map", + NO_STR + "Redistribute from other protocols\n" + "Redistribute from BGP directly\n" + "Disable redistribute filter\n" + "Route-map for filtering redistributed routes\n") +{ + struct bgp *bgp = vty->index; + VTY_DECLVAR_CONTEXT_SUB(rfapi_nve_group_cfg, rfg); + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (!bgp->rfapi_cfg) + { + vty_out (vty, "rfapi not configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + /* make sure it's still in list */ + if (!listnode_lookup (bgp->rfapi_cfg->nve_groups_sequential, rfg)) + { + /* Not in list anymore */ + vty_out (vty, "Current NVE group no longer exists%s", VTY_NEWLINE); + return CMD_WARNING; + } + + vnc_redistribute_prechange (bgp); + + if (rfg->routemap_redist_name[ZEBRA_ROUTE_BGP_DIRECT]) + free (rfg->routemap_redist_name[ZEBRA_ROUTE_BGP_DIRECT]); + rfg->routemap_redist_name[ZEBRA_ROUTE_BGP_DIRECT] = NULL; + rfg->routemap_redist[ZEBRA_ROUTE_BGP_DIRECT] = NULL; + + vnc_redistribute_postchange (bgp); + + return CMD_SUCCESS; +} + +DEFUN (vnc_nve_group_redist_bgpdirect_routemap, + vnc_nve_group_redist_bgpdirect_routemap_cmd, + "redistribute bgp-direct route-map NAME", + "Redistribute from other protocols\n" + "Redistribute from BGP directly\n" + "Route-map for filtering exported routes\n" "route map name\n") +{ + struct bgp *bgp = vty->index; + VTY_DECLVAR_CONTEXT_SUB(rfapi_nve_group_cfg, rfg); + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (!bgp->rfapi_cfg) + { + vty_out (vty, "rfapi not configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + /* make sure it's still in list */ + if (!listnode_lookup (bgp->rfapi_cfg->nve_groups_sequential, rfg)) + { + /* Not in list anymore */ + vty_out (vty, "Current NVE group no longer exists%s", VTY_NEWLINE); + return CMD_WARNING; + } + + vnc_redistribute_prechange (bgp); + + if (rfg->routemap_redist_name[ZEBRA_ROUTE_BGP_DIRECT]) + free (rfg->routemap_redist_name[ZEBRA_ROUTE_BGP_DIRECT]); + rfg->routemap_redist_name[ZEBRA_ROUTE_BGP_DIRECT] = strdup (argv[0]); + rfg->routemap_redist[ZEBRA_ROUTE_BGP_DIRECT] = + route_map_lookup_by_name (argv[0]); + + vnc_redistribute_postchange (bgp); + + return CMD_SUCCESS; +} + +/*-- redist policy, nvegroup end --*/ + +/*------------------------------------------------------------------------- + * export + *-----------------------------------------------------------------------*/ + +DEFUN (vnc_export_mode, + vnc_export_mode_cmd, + "vnc export (bgp|zebra) mode (group-nve|ce|none|registering-nve)", + VNC_CONFIG_STR + "Export to other protocols\n" + "Export to BGP\n" + "Export to Zebra (experimental)\n" + "Select export mode\n" + "Export routes with nve-group next-hops\n" + "Export routes with NVE connected router next-hops\n" + "Disable export\n" "Export routes with registering NVE as next-hop\n") +{ + struct bgp *bgp = vty->index; + uint32_t oldmode = 0; + uint32_t newmode = 0; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (!bgp->rfapi_cfg) + { + vty_out (vty, "VNC not configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (*argv[0] == 'b') + { + oldmode = bgp->rfapi_cfg->flags & BGP_VNC_CONFIG_EXPORT_BGP_MODE_BITS; + switch (*argv[1]) + { + case 'g': + newmode = BGP_VNC_CONFIG_EXPORT_BGP_MODE_GRP; + break; + case 'c': + newmode = BGP_VNC_CONFIG_EXPORT_BGP_MODE_CE; + break; + case 'n': + newmode = 0; + break; + case 'r': + newmode = BGP_VNC_CONFIG_EXPORT_BGP_MODE_RH; + break; + default: + vty_out (vty, "Invalid mode specified%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (newmode == oldmode) + { + vty_out (vty, "Mode unchanged%s", VTY_NEWLINE); + return CMD_SUCCESS; + } + + vnc_export_bgp_prechange (bgp); + + bgp->rfapi_cfg->flags &= ~BGP_VNC_CONFIG_EXPORT_BGP_MODE_BITS; + bgp->rfapi_cfg->flags |= newmode; + + vnc_export_bgp_postchange (bgp); + + + } + else + { + /* + * export to zebra with RH mode is not yet implemented + */ + vty_out (vty, "Changing modes for zebra export not implemented yet%s", + VTY_NEWLINE); + return CMD_WARNING; + + oldmode = bgp->rfapi_cfg->flags & BGP_VNC_CONFIG_EXPORT_ZEBRA_MODE_BITS; + bgp->rfapi_cfg->flags &= ~BGP_VNC_CONFIG_EXPORT_ZEBRA_MODE_BITS; + switch (*argv[1]) + { + case 'g': + if (oldmode == BGP_VNC_CONFIG_EXPORT_ZEBRA_MODE_RH) + { + /* TBD */ + } + bgp->rfapi_cfg->flags |= BGP_VNC_CONFIG_EXPORT_ZEBRA_MODE_GRP; + if (oldmode != BGP_VNC_CONFIG_EXPORT_ZEBRA_MODE_GRP) + { + /* TBD */ + } + break; + case 'n': + if (oldmode == BGP_VNC_CONFIG_EXPORT_ZEBRA_MODE_RH) + { + /* TBD */ + } + if (oldmode == BGP_VNC_CONFIG_EXPORT_ZEBRA_MODE_GRP) + { + /* TBD */ + } + break; + case 'r': + if (oldmode == BGP_VNC_CONFIG_EXPORT_ZEBRA_MODE_GRP) + { + /* TBD */ + } + bgp->rfapi_cfg->flags |= BGP_VNC_CONFIG_EXPORT_ZEBRA_MODE_RH; + if (oldmode != BGP_VNC_CONFIG_EXPORT_ZEBRA_MODE_RH) + { + /* TBD */ + } + break; + default: + vty_out (vty, "Invalid mode%s", VTY_NEWLINE); + return CMD_WARNING; + } + } + + return CMD_SUCCESS; +} + +static struct rfapi_rfg_name * +rfgn_new () +{ + return XCALLOC (MTYPE_RFAPI_RFG_NAME, sizeof (struct rfapi_rfg_name)); +} + +static void +rfgn_free (struct rfapi_rfg_name *rfgn) +{ + XFREE (MTYPE_RFAPI_RFG_NAME, rfgn); +} + +DEFUN (vnc_export_nvegroup, + vnc_export_nvegroup_cmd, + "vnc export (bgp|zebra) group-nve group NAME", + VNC_CONFIG_STR + "Export to other protocols\n" + "Export to BGP\n" + "Export to Zebra (experimental)\n" + "NVE group, used in 'group-nve' export mode\n" + "NVE group\n" "Group name\n") +{ + struct bgp *bgp = vty->index; + struct rfapi_nve_group_cfg *rfg_new; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (!bgp->rfapi_cfg) + { + vty_out (vty, "rfapi not configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + rfg_new = rfapi_group_lookup_byname (bgp, argv[1]); + + if (*argv[0] == 'b') + { + + struct listnode *node; + struct rfapi_rfg_name *rfgn; + + /* + * Set group for export to BGP Direct + */ + + /* see if group is already included in export list */ + for (ALL_LIST_ELEMENTS_RO (bgp->rfapi_cfg->rfg_export_direct_bgp_l, + node, rfgn)) + { + + if (!strcmp (rfgn->name, argv[1])) + { + /* already in the list: we're done */ + return CMD_SUCCESS; + } + } + + rfgn = rfgn_new (); + rfgn->name = strdup (argv[1]); + rfgn->rfg = rfg_new; /* OK if not set yet */ + + listnode_add (bgp->rfapi_cfg->rfg_export_direct_bgp_l, rfgn); + + zlog_debug ("%s: testing rfg_new", __func__); + if (rfg_new) + { + zlog_debug ("%s: testing bgp grp mode enabled", __func__); + if (VNC_EXPORT_BGP_GRP_ENABLED (bgp->rfapi_cfg)) + zlog_debug ("%s: calling vnc_direct_bgp_add_group", __func__); + vnc_direct_bgp_add_group (bgp, rfg_new); + } + + } + else + { + + struct listnode *node; + struct rfapi_rfg_name *rfgn; + + /* + * Set group for export to Zebra + */ + + /* see if group is already included in export list */ + for (ALL_LIST_ELEMENTS_RO (bgp->rfapi_cfg->rfg_export_zebra_l, + node, rfgn)) + { + + if (!strcmp (rfgn->name, argv[1])) + { + /* already in the list: we're done */ + return CMD_SUCCESS; + } + } + + rfgn = rfgn_new (); + rfgn->name = strdup (argv[1]); + rfgn->rfg = rfg_new; /* OK if not set yet */ + + listnode_add (bgp->rfapi_cfg->rfg_export_zebra_l, rfgn); + + if (rfg_new) + { + if (VNC_EXPORT_ZEBRA_GRP_ENABLED (bgp->rfapi_cfg)) + vnc_zebra_add_group (bgp, rfg_new); + } + } + + return CMD_SUCCESS; +} + +/* + * This command applies to routes exported from VNC to BGP directly + * without going though zebra + */ +DEFUN (vnc_no_export_nvegroup, + vnc_no_export_nvegroup_cmd, + "vnc export (bgp|zebra) group-nve no group NAME", + VNC_CONFIG_STR + "Export to other protocols\n" + "Export to BGP\n" + "Export to Zebra (experimental)\n" + "NVE group, used in 'group-nve' export mode\n" + "Disable export of VNC routes\n" "NVE group\n" "Group name\n") +{ + struct bgp *bgp = vty->index; + struct listnode *node, *nnode; + struct rfapi_rfg_name *rfgn; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (!bgp->rfapi_cfg) + { + vty_out (vty, "rfapi not configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (*argv[0] == 'b') + { + for (ALL_LIST_ELEMENTS (bgp->rfapi_cfg->rfg_export_direct_bgp_l, + node, nnode, rfgn)) + { + + if (rfgn->name && !strcmp (rfgn->name, argv[1])) + { + zlog_debug ("%s: matched \"%s\"", __func__, rfgn->name); + if (rfgn->rfg) + vnc_direct_bgp_del_group (bgp, rfgn->rfg); + free (rfgn->name); + list_delete_node (bgp->rfapi_cfg->rfg_export_direct_bgp_l, + node); + rfgn_free (rfgn); + break; + } + } + } + else + { + for (ALL_LIST_ELEMENTS (bgp->rfapi_cfg->rfg_export_zebra_l, + node, nnode, rfgn)) + { + + zlog_debug ("does rfg \"%s\" match?", rfgn->name); + if (rfgn->name && !strcmp (rfgn->name, argv[1])) + { + if (rfgn->rfg) + vnc_zebra_del_group (bgp, rfgn->rfg); + free (rfgn->name); + list_delete_node (bgp->rfapi_cfg->rfg_export_zebra_l, node); + rfgn_free (rfgn); + break; + } + } + } + return CMD_SUCCESS; +} + +DEFUN (vnc_nve_group_export_no_prefixlist, + vnc_nve_group_export_no_prefixlist_cmd, + "no export (bgp|zebra) (ipv4|ipv6) prefix-list [NAME]", + NO_STR + "Export to other protocols\n" + "Export to BGP\n" + "Export to Zebra (experimental)\n" + "IPv4 routes\n" + "IPv6 routes\n" + "Prefix-list for filtering exported routes\n" "prefix list name\n") +{ + struct bgp *bgp = vty->index; + VTY_DECLVAR_CONTEXT_SUB(rfapi_nve_group_cfg, rfg); + afi_t afi; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (!bgp->rfapi_cfg) + { + vty_out (vty, "rfapi not configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + /* make sure it's still in list */ + if (!listnode_lookup (bgp->rfapi_cfg->nve_groups_sequential, rfg)) + { + /* Not in list anymore */ + vty_out (vty, "Current NVE group no longer exists%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (!strcmp (argv[1], "ipv4")) + { + afi = AFI_IP; + } + else + { + afi = AFI_IP6; + } + + if (*argv[0] == 'b') + { + if (((argc >= 3) && !strcmp (argv[2], rfg->plist_export_bgp_name[afi])) + || (argc < 3)) + { + + if (rfg->plist_export_bgp_name[afi]) + free (rfg->plist_export_bgp_name[afi]); + rfg->plist_export_bgp_name[afi] = NULL; + rfg->plist_export_bgp[afi] = NULL; + + vnc_direct_bgp_reexport_group_afi (bgp, rfg, afi); + } + } + else + { + if (((argc >= 3) + && !strcmp (argv[2], rfg->plist_export_zebra_name[afi])) + || (argc < 3)) + { + if (rfg->plist_export_zebra_name[afi]) + free (rfg->plist_export_zebra_name[afi]); + rfg->plist_export_zebra_name[afi] = NULL; + rfg->plist_export_zebra[afi] = NULL; + + vnc_zebra_reexport_group_afi (bgp, rfg, afi); + } + } + return CMD_SUCCESS; +} + +DEFUN (vnc_nve_group_export_prefixlist, + vnc_nve_group_export_prefixlist_cmd, + "export (bgp|zebra) (ipv4|ipv6) prefix-list NAME", + "Export to other protocols\n" + "Export to BGP\n" + "Export to Zebra (experimental)\n" + "IPv4 routes\n" + "IPv6 routes\n" + "Prefix-list for filtering exported routes\n" "prefix list name\n") +{ + struct bgp *bgp = vty->index; + VTY_DECLVAR_CONTEXT_SUB(rfapi_nve_group_cfg, rfg); + afi_t afi; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (!bgp->rfapi_cfg) + { + vty_out (vty, "rfapi not configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + /* make sure it's still in list */ + if (!listnode_lookup (bgp->rfapi_cfg->nve_groups_sequential, rfg)) + { + /* Not in list anymore */ + vty_out (vty, "Current NVE group no longer exists%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (!strcmp (argv[1], "ipv4")) + { + afi = AFI_IP; + } + else + { + afi = AFI_IP6; + } + + if (*argv[0] == 'b') + { + if (rfg->plist_export_bgp_name[afi]) + free (rfg->plist_export_bgp_name[afi]); + rfg->plist_export_bgp_name[afi] = strdup (argv[2]); + rfg->plist_export_bgp[afi] = prefix_list_lookup (afi, argv[2]); + + vnc_direct_bgp_reexport_group_afi (bgp, rfg, afi); + + } + else + { + if (rfg->plist_export_zebra_name[afi]) + free (rfg->plist_export_zebra_name[afi]); + rfg->plist_export_zebra_name[afi] = strdup (argv[2]); + rfg->plist_export_zebra[afi] = prefix_list_lookup (afi, argv[2]); + + vnc_zebra_reexport_group_afi (bgp, rfg, afi); + } + return CMD_SUCCESS; +} + +DEFUN (vnc_nve_group_export_no_routemap, + vnc_nve_group_export_no_routemap_cmd, + "no export (bgp|zebra) route-map [NAME]", + NO_STR + "Export to other protocols\n" + "Export to BGP\n" + "Export to Zebra (experimental)\n" + "Route-map for filtering exported routes\n" "route map name\n") +{ + struct bgp *bgp = vty->index; + VTY_DECLVAR_CONTEXT_SUB(rfapi_nve_group_cfg, rfg); + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (!bgp->rfapi_cfg) + { + vty_out (vty, "rfapi not configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + /* make sure it's still in list */ + if (!listnode_lookup (bgp->rfapi_cfg->nve_groups_sequential, rfg)) + { + /* Not in list anymore */ + vty_out (vty, "Current NVE group no longer exists%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (*argv[0] == 'b') + { + if (((argc >= 2) && !strcmp (argv[1], rfg->routemap_export_bgp_name)) || + (argc < 2)) + { + + if (rfg->routemap_export_bgp_name) + free (rfg->routemap_export_bgp_name); + rfg->routemap_export_bgp_name = NULL; + rfg->routemap_export_bgp = NULL; + + vnc_direct_bgp_reexport_group_afi (bgp, rfg, AFI_IP); + vnc_direct_bgp_reexport_group_afi (bgp, rfg, AFI_IP6); + } + } + else + { + if (((argc >= 2) && !strcmp (argv[1], rfg->routemap_export_zebra_name)) + || (argc < 2)) + { + if (rfg->routemap_export_zebra_name) + free (rfg->routemap_export_zebra_name); + rfg->routemap_export_zebra_name = NULL; + rfg->routemap_export_zebra = NULL; + + vnc_zebra_reexport_group_afi (bgp, rfg, AFI_IP); + vnc_zebra_reexport_group_afi (bgp, rfg, AFI_IP6); + } + } + return CMD_SUCCESS; +} + +DEFUN (vnc_nve_group_export_routemap, + vnc_nve_group_export_routemap_cmd, + "export (bgp|zebra) route-map NAME", + "Export to other protocols\n" + "Export to BGP\n" + "Export to Zebra (experimental)\n" + "Route-map for filtering exported routes\n" "route map name\n") +{ + struct bgp *bgp = vty->index; + VTY_DECLVAR_CONTEXT_SUB(rfapi_nve_group_cfg, rfg); + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (!bgp->rfapi_cfg) + { + vty_out (vty, "rfapi not configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + /* make sure it's still in list */ + if (!listnode_lookup (bgp->rfapi_cfg->nve_groups_sequential, rfg)) + { + /* Not in list anymore */ + vty_out (vty, "Current NVE group no longer exists%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (*argv[0] == 'b') + { + if (rfg->routemap_export_bgp_name) + free (rfg->routemap_export_bgp_name); + rfg->routemap_export_bgp_name = strdup (argv[1]); + rfg->routemap_export_bgp = route_map_lookup_by_name (argv[1]); + vnc_direct_bgp_reexport_group_afi (bgp, rfg, AFI_IP); + vnc_direct_bgp_reexport_group_afi (bgp, rfg, AFI_IP6); + } + else + { + if (rfg->routemap_export_zebra_name) + free (rfg->routemap_export_zebra_name); + rfg->routemap_export_zebra_name = strdup (argv[1]); + rfg->routemap_export_zebra = route_map_lookup_by_name (argv[1]); + vnc_zebra_reexport_group_afi (bgp, rfg, AFI_IP); + vnc_zebra_reexport_group_afi (bgp, rfg, AFI_IP6); + } + return CMD_SUCCESS; +} + +DEFUN (vnc_nve_export_no_prefixlist, + vnc_nve_export_no_prefixlist_cmd, + "no vnc export (bgp|zebra) (ipv4|ipv6) prefix-list [NAME]", + NO_STR + VNC_CONFIG_STR + "Export to other protocols\n" + "Export to BGP\n" + "Export to Zebra (experimental)\n" + "IPv4 prefixes\n" + "IPv6 prefixes\n" + "Prefix-list for filtering exported routes\n" "Prefix list name\n") +{ + struct bgp *bgp = vty->index; + struct rfapi_cfg *hc; + afi_t afi; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (!(hc = bgp->rfapi_cfg)) + { + vty_out (vty, "rfapi not configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (!strcmp (argv[1], "ipv4")) + { + afi = AFI_IP; + } + else + { + afi = AFI_IP6; + } + + if (*argv[0] == 'b') + { + if (((argc >= 3) && !strcmp (argv[2], hc->plist_export_bgp_name[afi])) + || (argc < 3)) + { + + if (hc->plist_export_bgp_name[afi]) + free (hc->plist_export_bgp_name[afi]); + hc->plist_export_bgp_name[afi] = NULL; + hc->plist_export_bgp[afi] = NULL; + vnc_direct_bgp_reexport (bgp, afi); + } + } + else + { + if (((argc >= 3) && !strcmp (argv[2], hc->plist_export_zebra_name[afi])) + || (argc < 3)) + { + + if (hc->plist_export_zebra_name[afi]) + free (hc->plist_export_zebra_name[afi]); + hc->plist_export_zebra_name[afi] = NULL; + hc->plist_export_zebra[afi] = NULL; + /* TBD vnc_zebra_rh_reexport(bgp, afi); */ + } + } + return CMD_SUCCESS; +} + +DEFUN (vnc_nve_export_prefixlist, + vnc_nve_export_prefixlist_cmd, + "vnc export (bgp|zebra) (ipv4|ipv6) prefix-list NAME", + VNC_CONFIG_STR + "Export to other protocols\n" + "Export to BGP\n" + "Export to Zebra (experimental)\n" + "Filters, used in 'registering-nve' export mode\n" + "IPv4 prefixes\n" + "IPv6 prefixes\n" + "Prefix-list for filtering exported routes\n" "Prefix list name\n") +{ + struct bgp *bgp = vty->index; + struct rfapi_cfg *hc; + afi_t afi; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (!(hc = bgp->rfapi_cfg)) + { + vty_out (vty, "rfapi not configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (!strcmp (argv[1], "ipv4")) + { + afi = AFI_IP; + } + else + { + afi = AFI_IP6; + } + + if (*argv[0] == 'b') + { + if (hc->plist_export_bgp_name[afi]) + free (hc->plist_export_bgp_name[afi]); + hc->plist_export_bgp_name[afi] = strdup (argv[2]); + hc->plist_export_bgp[afi] = prefix_list_lookup (afi, argv[2]); + vnc_direct_bgp_reexport (bgp, afi); + } + else + { + if (hc->plist_export_zebra_name[afi]) + free (hc->plist_export_zebra_name[afi]); + hc->plist_export_zebra_name[afi] = strdup (argv[2]); + hc->plist_export_zebra[afi] = prefix_list_lookup (afi, argv[2]); + /* TBD vnc_zebra_rh_reexport(bgp, afi); */ + } + return CMD_SUCCESS; +} + +DEFUN (vnc_nve_export_no_routemap, + vnc_nve_export_no_routemap_cmd, + "no vnc export (bgp|zebra) route-map [NAME]", + NO_STR + VNC_CONFIG_STR + "Export to other protocols\n" + "Export to BGP\n" + "Export to Zebra (experimental)\n" + "Route-map for filtering exported routes\n" "Route map name\n") +{ + struct bgp *bgp = vty->index; + struct rfapi_cfg *hc; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (!(hc = bgp->rfapi_cfg)) + { + vty_out (vty, "rfapi not configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (*argv[0] == 'b') + { + if (((argc >= 2) && !strcmp (argv[1], hc->routemap_export_bgp_name)) || + (argc < 2)) + { + + if (hc->routemap_export_bgp_name) + free (hc->routemap_export_bgp_name); + hc->routemap_export_bgp_name = NULL; + hc->routemap_export_bgp = NULL; + vnc_direct_bgp_reexport (bgp, AFI_IP); + vnc_direct_bgp_reexport (bgp, AFI_IP6); + } + } + else + { + if (((argc >= 2) && !strcmp (argv[1], hc->routemap_export_zebra_name)) + || (argc < 2)) + { + + if (hc->routemap_export_zebra_name) + free (hc->routemap_export_zebra_name); + hc->routemap_export_zebra_name = NULL; + hc->routemap_export_zebra = NULL; + /* TBD vnc_zebra_rh_reexport(bgp, AFI_IP); */ + /* TBD vnc_zebra_rh_reexport(bgp, AFI_IP6); */ + } + } + return CMD_SUCCESS; +} + +DEFUN (vnc_nve_export_routemap, + vnc_nve_export_routemap_cmd, + "vnc export (bgp|zebra) route-map NAME", + VNC_CONFIG_STR + "Export to other protocols\n" + "Export to BGP\n" + "Export to Zebra (experimental)\n" + "Filters, used in 'registering-nve' export mode\n" + "Route-map for filtering exported routes\n" "Route map name\n") +{ + struct bgp *bgp = vty->index; + struct rfapi_cfg *hc; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (!(hc = bgp->rfapi_cfg)) + { + vty_out (vty, "rfapi not configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (*argv[0] == 'b') + { + if (hc->routemap_export_bgp_name) + free (hc->routemap_export_bgp_name); + hc->routemap_export_bgp_name = strdup (argv[1]); + hc->routemap_export_bgp = route_map_lookup_by_name (argv[1]); + vnc_direct_bgp_reexport (bgp, AFI_IP); + vnc_direct_bgp_reexport (bgp, AFI_IP6); + } + else + { + if (hc->routemap_export_zebra_name) + free (hc->routemap_export_zebra_name); + hc->routemap_export_zebra_name = strdup (argv[1]); + hc->routemap_export_zebra = route_map_lookup_by_name (argv[1]); + /* TBD vnc_zebra_rh_reexport(bgp, AFI_IP); */ + /* TBD vnc_zebra_rh_reexport(bgp, AFI_IP6); */ + } + return CMD_SUCCESS; +} + + +/* + * respond to changes in the global prefix list configuration + */ +void +vnc_prefix_list_update (struct bgp *bgp) +{ + afi_t afi; + struct listnode *n; + struct rfapi_nve_group_cfg *rfg; + struct rfapi_cfg *hc; + int i; + + if (!bgp) + { + zlog_debug ("%s: No BGP process is configured", __func__); + return; + } + + if (!(hc = bgp->rfapi_cfg)) + { + zlog_debug ("%s: rfapi not configured", __func__); + return; + } + + for (afi = AFI_IP; afi < AFI_MAX; afi++) + { + /* + * Loop over nve groups + */ + for (ALL_LIST_ELEMENTS_RO (bgp->rfapi_cfg->nve_groups_sequential, + n, rfg)) + { + + if (rfg->plist_export_bgp_name[afi]) + { + rfg->plist_export_bgp[afi] = + prefix_list_lookup (afi, rfg->plist_export_bgp_name[afi]); + } + if (rfg->plist_export_zebra_name[afi]) + { + rfg->plist_export_zebra[afi] = + prefix_list_lookup (afi, rfg->plist_export_zebra_name[afi]); + } + for (i = 0; i < ZEBRA_ROUTE_MAX; ++i) + { + if (rfg->plist_redist_name[i][afi]) + { + rfg->plist_redist[i][afi] = + prefix_list_lookup (afi, rfg->plist_redist_name[i][afi]); + } + } + + vnc_direct_bgp_reexport_group_afi (bgp, rfg, afi); + /* TBD vnc_zebra_reexport_group_afi(bgp, rfg, afi); */ + } + + /* + * RH config, too + */ + if (hc->plist_export_bgp_name[afi]) + { + hc->plist_export_bgp[afi] = + prefix_list_lookup (afi, hc->plist_export_bgp_name[afi]); + } + if (hc->plist_export_zebra_name[afi]) + { + hc->plist_export_zebra[afi] = + prefix_list_lookup (afi, hc->plist_export_zebra_name[afi]); + } + + for (i = 0; i < ZEBRA_ROUTE_MAX; ++i) + { + if (hc->plist_redist_name[i][afi]) + { + hc->plist_redist[i][afi] = + prefix_list_lookup (afi, hc->plist_redist_name[i][afi]); + } + } + + } + + vnc_direct_bgp_reexport (bgp, AFI_IP); + vnc_direct_bgp_reexport (bgp, AFI_IP6); + + /* TBD vnc_zebra_rh_reexport(bgp, AFI_IP); */ + /* TBD vnc_zebra_rh_reexport(bgp, AFI_IP6); */ + + vnc_redistribute_prechange (bgp); + vnc_redistribute_postchange (bgp); +} + +/* + * respond to changes in the global route map configuration + */ +void +vnc_routemap_update (struct bgp *bgp, const char *unused) +{ + struct listnode *n; + struct rfapi_nve_group_cfg *rfg; + struct rfapi_cfg *hc; + int i; + + zlog_debug ("%s(arg=%s)", __func__, unused); + + if (!bgp) + { + zlog_debug ("%s: No BGP process is configured", __func__); + return; + } + + if (!(hc = bgp->rfapi_cfg)) + { + zlog_debug ("%s: rfapi not configured", __func__); + return; + } + + /* + * Loop over nve groups + */ + for (ALL_LIST_ELEMENTS_RO (bgp->rfapi_cfg->nve_groups_sequential, n, rfg)) + { + + if (rfg->routemap_export_bgp_name) + { + rfg->routemap_export_bgp = + route_map_lookup_by_name (rfg->routemap_export_bgp_name); + } + if (rfg->routemap_export_zebra_name) + { + rfg->routemap_export_bgp = + route_map_lookup_by_name (rfg->routemap_export_zebra_name); + } + for (i = 0; i < ZEBRA_ROUTE_MAX; ++i) + { + if (rfg->routemap_redist_name[i]) + { + rfg->routemap_redist[i] = + route_map_lookup_by_name (rfg->routemap_redist_name[i]); + } + } + + vnc_direct_bgp_reexport_group_afi (bgp, rfg, AFI_IP); + vnc_direct_bgp_reexport_group_afi (bgp, rfg, AFI_IP6); + /* TBD vnc_zebra_reexport_group_afi(bgp, rfg, afi); */ + } + + /* + * RH config, too + */ + if (hc->routemap_export_bgp_name) + { + hc->routemap_export_bgp = + route_map_lookup_by_name (hc->routemap_export_bgp_name); + } + if (hc->routemap_export_zebra_name) + { + hc->routemap_export_bgp = + route_map_lookup_by_name (hc->routemap_export_zebra_name); + } + for (i = 0; i < ZEBRA_ROUTE_MAX; ++i) + { + if (hc->routemap_redist_name[i]) + { + hc->routemap_redist[i] = + route_map_lookup_by_name (hc->routemap_redist_name[i]); + } + } + + vnc_direct_bgp_reexport (bgp, AFI_IP); + vnc_direct_bgp_reexport (bgp, AFI_IP6); + + /* TBD vnc_zebra_rh_reexport(bgp, AFI_IP); */ + /* TBD vnc_zebra_rh_reexport(bgp, AFI_IP6); */ + + vnc_redistribute_prechange (bgp); + vnc_redistribute_postchange (bgp); + + zlog_debug ("%s done", __func__); +} + +static void +vnc_routemap_event (route_map_event_t type, /* ignored */ + const char *rmap_name) /* ignored */ +{ + struct listnode *mnode, *mnnode; + struct bgp *bgp; + + zlog_debug ("%s(event type=%d)", __func__, type); + if (bm->bgp == NULL) /* may be called during cleanup */ + return; + + for (ALL_LIST_ELEMENTS (bm->bgp, mnode, mnnode, bgp)) + vnc_routemap_update (bgp, rmap_name); + + zlog_debug ("%s: done", __func__); +} + +/*------------------------------------------------------------------------- + * nve-group + *-----------------------------------------------------------------------*/ + + +DEFUN (vnc_nve_group, + vnc_nve_group_cmd, + "vnc nve-group NAME", + VNC_CONFIG_STR "Configure a NVE group\n" "Group name\n") +{ + struct rfapi_nve_group_cfg *rfg; + struct bgp *bgp = vty->index; + struct listnode *node, *nnode; + struct rfapi_rfg_name *rfgn; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + /* Search for name */ + rfg = rfapi_group_lookup_byname (bgp, argv[0]); + + if (!rfg) + { + rfg = rfapi_group_new (); + if (!rfg) + { + /* Error out of memory */ + vty_out (vty, "Can't allocate memory for NVE group%s", VTY_NEWLINE); + return CMD_WARNING; + } + rfg->name = strdup (argv[0]); + /* add to tail of list */ + listnode_add (bgp->rfapi_cfg->nve_groups_sequential, rfg); + + /* Copy defaults from struct rfapi_cfg */ + rfg->rd = bgp->rfapi_cfg->default_rd; + if (bgp->rfapi_cfg->flags & BGP_VNC_CONFIG_L2RD) + { + rfg->l2rd = bgp->rfapi_cfg->default_l2rd; + rfg->flags |= RFAPI_RFG_L2RD; + } + rfg->rd = bgp->rfapi_cfg->default_rd; + rfg->response_lifetime = bgp->rfapi_cfg->default_response_lifetime; + + if (bgp->rfapi_cfg->default_rt_export_list) + { + rfg->rt_export_list = + ecommunity_dup (bgp->rfapi_cfg->default_rt_export_list); + } + + if (bgp->rfapi_cfg->default_rt_import_list) + { + rfg->rt_import_list = + ecommunity_dup (bgp->rfapi_cfg->default_rt_import_list); + rfg->rfapi_import_table = + rfapiImportTableRefAdd (bgp, rfg->rt_import_list); + } + + /* + * If a redist nve group was named but the group was not defined, + * make the linkage now + */ + if (!bgp->rfapi_cfg->rfg_redist) + { + if (bgp->rfapi_cfg->rfg_redist_name && + !strcmp (bgp->rfapi_cfg->rfg_redist_name, rfg->name)) + { + + vnc_redistribute_prechange (bgp); + bgp->rfapi_cfg->rfg_redist = rfg; + vnc_redistribute_postchange (bgp); + + } + } + + /* + * Same treatment for bgp-direct export group + */ + for (ALL_LIST_ELEMENTS (bgp->rfapi_cfg->rfg_export_direct_bgp_l, + node, nnode, rfgn)) + { + + if (!strcmp (rfgn->name, rfg->name)) + { + rfgn->rfg = rfg; + vnc_direct_bgp_add_group (bgp, rfg); + break; + } + } + + /* + * Same treatment for zebra export group + */ + for (ALL_LIST_ELEMENTS (bgp->rfapi_cfg->rfg_export_zebra_l, + node, nnode, rfgn)) + { + + zlog_debug ("%s: ezport zebra: checking if \"%s\" == \"%s\"", + __func__, rfgn->name, rfg->name); + if (!strcmp (rfgn->name, rfg->name)) + { + rfgn->rfg = rfg; + vnc_zebra_add_group (bgp, rfg); + break; + } + } + } + + /* + * XXX subsequent calls will need to make sure this item is still + * in the linked list and has the same name + */ + VTY_PUSH_CONTEXT_SUB (BGP_VNC_NVE_GROUP_NODE, rfg); + + return CMD_SUCCESS; +} + +static void +bgp_rfapi_delete_nve_group ( + struct vty *vty, /* NULL = no output */ + struct bgp *bgp, + struct rfapi_nve_group_cfg *rfg) +{ + struct list *orphaned_nves = NULL; + struct listnode *node, *nnode; + + /* + * If there are currently-open NVEs that belong to this group, + * zero out their references to this group structure. + */ + if (rfg->nves) + { + struct rfapi_descriptor *rfd; + orphaned_nves = list_new (); + while ((rfd = listnode_head (rfg->nves))) + { + rfd->rfg = NULL; + listnode_delete (rfg->nves, rfd); + listnode_add (orphaned_nves, rfd); + } + list_delete (rfg->nves); + rfg->nves = NULL; + } + + /* delete it */ + free (rfg->name); + if (rfg->rfapi_import_table) + rfapiImportTableRefDelByIt (bgp, rfg->rfapi_import_table); + if (rfg->rt_import_list) + ecommunity_free (&rfg->rt_import_list); + if (rfg->rt_export_list) + ecommunity_free (&rfg->rt_export_list); + + if (rfg->vn_node) + { + rfg->vn_node->info = NULL; + route_unlock_node (rfg->vn_node); /* frees */ + } + if (rfg->un_node) + { + rfg->un_node->info = NULL; + route_unlock_node (rfg->un_node); /* frees */ + } + if (rfg->rfp_cfg) + XFREE (MTYPE_RFAPI_RFP_GROUP_CFG, rfg->rfp_cfg); + listnode_delete (bgp->rfapi_cfg->nve_groups_sequential, rfg); + + XFREE (MTYPE_RFAPI_GROUP_CFG, rfg); + + /* + * Attempt to reassign the orphaned nves to a new group. If + * a NVE can not be reassigned, its rfd->rfg will remain NULL + * and it will become a zombie until released by rfapi_close(). + */ + if (orphaned_nves) + { + struct rfapi_descriptor *rfd; + + for (ALL_LIST_ELEMENTS (orphaned_nves, node, nnode, rfd)) + { + /* + * 1. rfapi_close() equivalent except: + * a. don't free original descriptor + * b. remember query list + * c. remember advertised route list + * 2. rfapi_open() equivalent except: + * a. reuse original descriptor + * 3. rfapi_register() on remembered advertised route list + * 4. rfapi_query on rememebred query list + */ + + int rc; + + rc = rfapi_reopen (rfd, bgp); + + if (!rc) + { + list_delete_node (orphaned_nves, node); + if (vty) + vty_out (vty, "WARNING: reassigned NVE vn="); + rfapiPrintRfapiIpAddr (vty, &rfd->vn_addr); + if (vty) + vty_out (vty, " un="); + rfapiPrintRfapiIpAddr (vty, &rfd->un_addr); + if (vty) + vty_out (vty, " to new group \"%s\"%s", rfd->rfg->name, + VTY_NEWLINE); + + } + } + + for (ALL_LIST_ELEMENTS_RO (orphaned_nves, node, rfd)) + { + if (vty) + vty_out (vty, "WARNING: orphaned NVE vn="); + rfapiPrintRfapiIpAddr (vty, &rfd->vn_addr); + if (vty) + vty_out (vty, " un="); + rfapiPrintRfapiIpAddr (vty, &rfd->un_addr); + if (vty) + vty_out (vty, "%s", VTY_NEWLINE); + } + list_delete (orphaned_nves); + } +} + +static int +bgp_rfapi_delete_named_nve_group ( + struct vty *vty, /* NULL = no output */ + struct bgp *bgp, + const char *rfg_name) /* NULL = any */ +{ + struct rfapi_nve_group_cfg *rfg = NULL; + struct listnode *node, *nnode; + struct rfapi_rfg_name *rfgn; + + /* Search for name */ + if (rfg_name) + { + rfg = rfapi_group_lookup_byname (bgp, rfg_name); + if (!rfg) + { + if (vty) + vty_out (vty, "No NVE group named \"%s\"%s", rfg_name, + VTY_NEWLINE); + return CMD_WARNING; + } + } + + /* + * If this group is the redist nve group, unlink it + */ + if (rfg_name == NULL || bgp->rfapi_cfg->rfg_redist == rfg) + { + vnc_redistribute_prechange (bgp); + bgp->rfapi_cfg->rfg_redist = NULL; + vnc_redistribute_postchange (bgp); + } + + + /* + * remove reference from bgp direct export list + */ + for (ALL_LIST_ELEMENTS_RO (bgp->rfapi_cfg->rfg_export_direct_bgp_l, + node, rfgn)) + { + if (rfg_name == NULL || !strcmp (rfgn->name, rfg_name)) + { + rfgn->rfg = NULL; + /* remove exported routes from this group */ + vnc_direct_bgp_del_group (bgp, rfg); + break; + } + } + + /* + * remove reference from zebra export list + */ + for (ALL_LIST_ELEMENTS_RO (bgp->rfapi_cfg->rfg_export_zebra_l, node, rfgn)) + { + + if (rfg_name == NULL || !strcmp (rfgn->name, rfg_name)) + { + rfgn->rfg = NULL; + /* remove exported routes from this group */ + vnc_zebra_del_group (bgp, rfg); + break; + } + } + if (rfg) + bgp_rfapi_delete_nve_group (vty, bgp, rfg); + else /* must be delete all */ + for (ALL_LIST_ELEMENTS + (bgp->rfapi_cfg->nve_groups_sequential, node, nnode, rfg)) + bgp_rfapi_delete_nve_group (vty, bgp, rfg); + return CMD_SUCCESS; +} + +DEFUN (vnc_no_nve_group, + vnc_no_nve_group_cmd, + "no vnc nve-group NAME", + NO_STR + VNC_CONFIG_STR + "Configure a NVE group\n" + "Group name\n") +{ + struct bgp *bgp = vty->index; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + return bgp_rfapi_delete_named_nve_group (vty, bgp, argv[0]); +} + +DEFUN (vnc_nve_group_prefix, + vnc_nve_group_prefix_cmd, + "prefix (vn|un) (A.B.C.D/M|X:X::X:X/M)", + "Specify prefixes matching NVE VN or UN interfaces\n" + "VN prefix\n" + "UN prefix\n" + "IPv4 prefix\n" + "IPv6 prefix\n") +{ + VTY_DECLVAR_CONTEXT_SUB(rfapi_nve_group_cfg, rfg); + struct prefix p; + int afi; + struct route_table *rt; + struct route_node *rn; + int is_un_prefix = 0; + + struct bgp *bgp = vty->index; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + /* make sure it's still in list */ + if (!listnode_lookup (bgp->rfapi_cfg->nve_groups_sequential, rfg)) + { + /* Not in list anymore */ + vty_out (vty, "Current NVE group no longer exists%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (!str2prefix (argv[1], &p)) + { + vty_out (vty, "Malformed prefix \"%s\"%s", argv[1], VTY_NEWLINE); + return CMD_WARNING; + } + + afi = family2afi (p.family); + if (!afi) + { + vty_out (vty, "Unsupported address family%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (*(argv[0]) == 'u') + { + rt = &(bgp->rfapi_cfg->nve_groups_un[afi]); + is_un_prefix = 1; + } + else + { + rt = &(bgp->rfapi_cfg->nve_groups_vn[afi]); + } + + rn = route_node_get (rt, &p); /* NB locks node */ + if (rn->info) + { + /* + * There is already a group with this prefix + */ + route_unlock_node (rn); + if (rn->info != rfg) + { + /* + * different group name: fail + */ + vty_out (vty, "nve group \"%s\" already has \"%s\" prefix %s%s", + ((struct rfapi_nve_group_cfg *) (rn->info))->name, + argv[0], argv[1], VTY_NEWLINE); + return CMD_WARNING; + } + else + { + /* + * same group name: it's already in the correct place + * in the table, so we're done. + * + * Implies rfg->(vn|un)_prefix is already correct. + */ + return CMD_SUCCESS; + } + } + + if (bgp->rfapi_cfg->rfg_redist == rfg) + { + vnc_redistribute_prechange (bgp); + } + + /* New prefix, new node */ + + if (is_un_prefix) + { + + /* detach rfg from previous route table location */ + if (rfg->un_node) + { + rfg->un_node->info = NULL; + route_unlock_node (rfg->un_node); /* frees */ + } + rfg->un_node = rn; /* back ref */ + rfg->un_prefix = p; + + } + else + { + + /* detach rfg from previous route table location */ + if (rfg->vn_node) + { + rfg->vn_node->info = NULL; + route_unlock_node (rfg->vn_node); /* frees */ + } + rfg->vn_node = rn; /* back ref */ + rfg->vn_prefix = p; + } + + /* attach */ + rn->info = rfg; + + if (bgp->rfapi_cfg->rfg_redist == rfg) + { + vnc_redistribute_postchange (bgp); + } + + return CMD_SUCCESS; +} + +DEFUN (vnc_nve_group_rt_import, + vnc_nve_group_rt_import_cmd, + "rt import .RTLIST", + "Specify route targets\n" + "Import filter\n" + "Space separated route target list (A.B.C.D:MN|EF:OPQR|GHJK:MN)\n") +{ + VTY_DECLVAR_CONTEXT_SUB(rfapi_nve_group_cfg, rfg); + struct bgp *bgp = vty->index; + int rc; + struct listnode *node; + struct rfapi_rfg_name *rfgn; + int is_export_bgp = 0; + int is_export_zebra = 0; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + /* make sure it's still in list */ + if (!listnode_lookup (bgp->rfapi_cfg->nve_groups_sequential, rfg)) + { + /* Not in list anymore */ + vty_out (vty, "Current NVE group no longer exists%s", VTY_NEWLINE); + return CMD_WARNING; + } + + rc = set_ecom_list (vty, argc, argv, &rfg->rt_import_list); + if (rc != CMD_SUCCESS) + return rc; + + for (ALL_LIST_ELEMENTS_RO (bgp->rfapi_cfg->rfg_export_direct_bgp_l, + node, rfgn)) + { + + if (rfgn->rfg == rfg) + { + is_export_bgp = 1; + break; + } + } + + if (is_export_bgp) + vnc_direct_bgp_del_group (bgp, rfg); + + for (ALL_LIST_ELEMENTS_RO (bgp->rfapi_cfg->rfg_export_zebra_l, node, rfgn)) + { + + if (rfgn->rfg == rfg) + { + is_export_zebra = 1; + break; + } + } + + if (is_export_zebra) + vnc_zebra_del_group (bgp, rfg); + + /* + * stop referencing old import table, now reference new one + */ + if (rfg->rfapi_import_table) + rfapiImportTableRefDelByIt (bgp, rfg->rfapi_import_table); + rfg->rfapi_import_table = rfapiImportTableRefAdd (bgp, rfg->rt_import_list); + + if (is_export_bgp) + vnc_direct_bgp_add_group (bgp, rfg); + + if (is_export_zebra) + vnc_zebra_add_group (bgp, rfg); + + return CMD_SUCCESS; +} + +DEFUN (vnc_nve_group_rt_export, + vnc_nve_group_rt_export_cmd, + "rt export .RTLIST", + "Specify route targets\n" + "Export filter\n" + "Space separated route target list (A.B.C.D:MN|EF:OPQR|GHJK:MN)\n") +{ + VTY_DECLVAR_CONTEXT_SUB(rfapi_nve_group_cfg, rfg); + struct bgp *bgp = vty->index; + int rc; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + /* make sure it's still in list */ + if (!listnode_lookup (bgp->rfapi_cfg->nve_groups_sequential, rfg)) + { + /* Not in list anymore */ + vty_out (vty, "Current NVE group no longer exists%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (bgp->rfapi_cfg->rfg_redist == rfg) + { + vnc_redistribute_prechange (bgp); + } + + rc = set_ecom_list (vty, argc, argv, &rfg->rt_export_list); + + if (bgp->rfapi_cfg->rfg_redist == rfg) + { + vnc_redistribute_postchange (bgp); + } + + return rc; +} + +DEFUN (vnc_nve_group_rt_both, + vnc_nve_group_rt_both_cmd, + "rt both .RTLIST", + "Specify route targets\n" + "Export+import filters\n" + "Space separated route target list (A.B.C.D:MN|EF:OPQR|GHJK:MN)\n") +{ + VTY_DECLVAR_CONTEXT_SUB(rfapi_nve_group_cfg, rfg); + struct bgp *bgp = vty->index; + int rc; + int is_export_bgp = 0; + int is_export_zebra = 0; + struct listnode *node; + struct rfapi_rfg_name *rfgn; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + /* make sure it's still in list */ + if (!listnode_lookup (bgp->rfapi_cfg->nve_groups_sequential, rfg)) + { + /* Not in list anymore */ + vty_out (vty, "Current NVE group no longer exists%s", VTY_NEWLINE); + return CMD_WARNING; + } + + rc = set_ecom_list (vty, argc, argv, &rfg->rt_import_list); + if (rc != CMD_SUCCESS) + return rc; + + for (ALL_LIST_ELEMENTS_RO (bgp->rfapi_cfg->rfg_export_direct_bgp_l, + node, rfgn)) + { + + if (rfgn->rfg == rfg) + { + is_export_bgp = 1; + break; + } + } + + if (is_export_bgp) + vnc_direct_bgp_del_group (bgp, rfg); + + for (ALL_LIST_ELEMENTS_RO (bgp->rfapi_cfg->rfg_export_zebra_l, node, rfgn)) + { + + if (rfgn->rfg == rfg) + { + is_export_zebra = 1; + break; + } + } + + if (is_export_zebra) + { + zlog_debug ("%s: is_export_zebra", __func__); + vnc_zebra_del_group (bgp, rfg); + } + + /* + * stop referencing old import table, now reference new one + */ + if (rfg->rfapi_import_table) + rfapiImportTableRefDelByIt (bgp, rfg->rfapi_import_table); + rfg->rfapi_import_table = rfapiImportTableRefAdd (bgp, rfg->rt_import_list); + + if (is_export_bgp) + vnc_direct_bgp_add_group (bgp, rfg); + + if (is_export_zebra) + vnc_zebra_add_group (bgp, rfg); + + if (bgp->rfapi_cfg->rfg_redist == rfg) + { + vnc_redistribute_prechange (bgp); + } + + rc = set_ecom_list (vty, argc, argv, &rfg->rt_export_list); + + if (bgp->rfapi_cfg->rfg_redist == rfg) + { + vnc_redistribute_postchange (bgp); + } + + return rc; + +} + +DEFUN (vnc_nve_group_l2rd, + vnc_nve_group_l2rd_cmd, + "l2rd (ID|auto:vn)", + "Specify default Local Nve ID value to use in RD for L2 routes\n" + "Fixed value 1-255\n" + "use the low-order octet of the NVE's VN address\n") +{ + VTY_DECLVAR_CONTEXT_SUB(rfapi_nve_group_cfg, rfg); + struct bgp *bgp = vty->index; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + /* make sure it's still in list */ + if (!listnode_lookup (bgp->rfapi_cfg->nve_groups_sequential, rfg)) + { + /* Not in list anymore */ + vty_out (vty, "Current NVE group no longer exists%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (!strcmp (argv[0], "auto:vn")) + { + rfg->l2rd = 0; + } + else + { + char *end = NULL; + unsigned long value_l = strtoul (argv[0], &end, 10); + uint8_t value = value_l & 0xff; + + if (!*(argv[0]) || *end) + { + vty_out (vty, "%% Malformed l2 nve ID \"%s\"%s", argv[0], + VTY_NEWLINE); + return CMD_WARNING; + } + if ((value_l < 1) || (value_l > 0xff)) + { + vty_out (vty, + "%% Malformed l2 nve id (must be greater than 0 and less than %u%s", + 0x100, VTY_NEWLINE); + return CMD_WARNING; + } + + rfg->l2rd = value; + } + rfg->flags |= RFAPI_RFG_L2RD; + + return CMD_SUCCESS; +} + +DEFUN (vnc_nve_group_no_l2rd, + vnc_nve_group_no_l2rd_cmd, + "no l2rd", + NO_STR + "Specify default Local Nve ID value to use in RD for L2 routes\n") +{ + VTY_DECLVAR_CONTEXT_SUB(rfapi_nve_group_cfg, rfg); + struct bgp *bgp = vty->index; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + /* make sure it's still in list */ + if (!listnode_lookup (bgp->rfapi_cfg->nve_groups_sequential, rfg)) + { + /* Not in list anymore */ + vty_out (vty, "Current NVE group no longer exists%s", VTY_NEWLINE); + return CMD_WARNING; + } + + rfg->l2rd = 0; + rfg->flags &= ~RFAPI_RFG_L2RD; + + return CMD_SUCCESS; +} + +DEFUN (vnc_nve_group_rd, + vnc_nve_group_rd_cmd, + "rd ASN:nn_or_IP-address:nn", + "Specify route distinguisher\n" + "Route Distinguisher (: | : | auto:vn: )\n") +{ + int ret; + struct prefix_rd prd; + VTY_DECLVAR_CONTEXT_SUB(rfapi_nve_group_cfg, rfg); + struct bgp *bgp = vty->index; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + /* make sure it's still in list */ + if (!listnode_lookup (bgp->rfapi_cfg->nve_groups_sequential, rfg)) + { + /* Not in list anymore */ + vty_out (vty, "Current NVE group no longer exists%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (!strncmp (argv[0], "auto:vn:", 8)) + { + /* + * use AF_UNIX to designate automatically-assigned RD + * auto:vn:nn where nn is a 2-octet quantity + */ + char *end = NULL; + uint32_t value32 = strtoul (argv[0] + 8, &end, 10); + uint16_t value = value32 & 0xffff; + + if (!*(argv[0] + 5) || *end) + { + vty_out (vty, "%% Malformed rd%s", VTY_NEWLINE); + return CMD_WARNING; + } + if (value32 > 0xffff) + { + vty_out (vty, "%% Malformed rd (must be less than %u%s", + 0x0ffff, VTY_NEWLINE); + return CMD_WARNING; + } + + memset (&prd, 0, sizeof (prd)); + prd.family = AF_UNIX; + prd.prefixlen = 64; + prd.val[0] = (RD_TYPE_IP >> 8) & 0x0ff; + prd.val[1] = RD_TYPE_IP & 0x0ff; + prd.val[6] = (value >> 8) & 0x0ff; + prd.val[7] = value & 0x0ff; + + } + else + { + + ret = str2prefix_rd (argv[0], &prd); + if (!ret) + { + vty_out (vty, "%% Malformed rd%s", VTY_NEWLINE); + return CMD_WARNING; + } + } + + if (bgp->rfapi_cfg->rfg_redist == rfg) + { + vnc_redistribute_prechange (bgp); + } + + rfg->rd = prd; + + if (bgp->rfapi_cfg->rfg_redist == rfg) + { + vnc_redistribute_postchange (bgp); + } + return CMD_SUCCESS; +} + +DEFUN (vnc_nve_group_responselifetime, + vnc_nve_group_responselifetime_cmd, + "response-lifetime (LIFETIME|infinite)", + "Specify response lifetime\n" + "Response lifetime in seconds\n" "Infinite response lifetime\n") +{ + unsigned int rspint; + VTY_DECLVAR_CONTEXT_SUB(rfapi_nve_group_cfg, rfg); + struct bgp *bgp = vty->index; + struct rfapi_descriptor *rfd; + struct listnode *hdnode; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + /* make sure it's still in list */ + if (!listnode_lookup (bgp->rfapi_cfg->nve_groups_sequential, rfg)) + { + /* Not in list anymore */ + vty_out (vty, "Current NVE group no longer exists%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (!strcmp (argv[0], "infinite")) + { + rspint = RFAPI_INFINITE_LIFETIME; + } + else + { + VTY_GET_INTEGER ("Response Lifetime", rspint, argv[0]); + } + + rfg->response_lifetime = rspint; + rfg->flags |= RFAPI_RFG_RESPONSE_LIFETIME; + if (rfg->nves) + for (ALL_LIST_ELEMENTS_RO (rfg->nves, hdnode, rfd)) + rfd->response_lifetime = rspint; + return CMD_SUCCESS; +} + +/* + * Sigh. This command, like exit-address-family, is a hack to deal + * with the lack of rigorous level control in the command handler. + * TBD fix command handler. + */ +DEFUN (exit_vnc, + exit_vnc_cmd, + "exit-vnc", + "Exit VNC configuration mode\n") +{ + if (vty->node == BGP_VNC_DEFAULTS_NODE || + vty->node == BGP_VNC_NVE_GROUP_NODE || + vty->node == BGP_VNC_L2_GROUP_NODE) + { + + vty->node = BGP_NODE; + } + return CMD_SUCCESS; +} + +static struct cmd_node bgp_vnc_defaults_node = { + BGP_VNC_DEFAULTS_NODE, + "%s(config-router-vnc-defaults)# ", + 1 +}; + +static struct cmd_node bgp_vnc_nve_group_node = { + BGP_VNC_NVE_GROUP_NODE, + "%s(config-router-vnc-nve-group)# ", + 1 +}; + +/*------------------------------------------------------------------------- + * vnc-l2-group + *-----------------------------------------------------------------------*/ + + +DEFUN (vnc_l2_group, + vnc_l2_group_cmd, + "vnc l2-group NAME", + VNC_CONFIG_STR "Configure a L2 group\n" "Group name\n") +{ + struct rfapi_l2_group_cfg *rfg; + struct bgp *bgp = vty->index; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + /* Search for name */ + rfg = rfapi_l2_group_lookup_byname (bgp, argv[0]); + + if (!rfg) + { + rfg = rfapi_l2_group_new (); + if (!rfg) + { + /* Error out of memory */ + vty_out (vty, "Can't allocate memory for L2 group%s", VTY_NEWLINE); + return CMD_WARNING; + } + rfg->name = strdup (argv[0]); + /* add to tail of list */ + listnode_add (bgp->rfapi_cfg->l2_groups, rfg); + } + + /* + * XXX subsequent calls will need to make sure this item is still + * in the linked list and has the same name + */ + VTY_PUSH_CONTEXT_SUB (BGP_VNC_L2_GROUP_NODE, rfg); + return CMD_SUCCESS; +} + +static void +bgp_rfapi_delete_l2_group ( + struct vty *vty, /* NULL = no output */ + struct bgp *bgp, + struct rfapi_l2_group_cfg *rfg) +{ + /* delete it */ + free (rfg->name); + if (rfg->rt_import_list) + ecommunity_free (&rfg->rt_import_list); + if (rfg->rt_export_list) + ecommunity_free (&rfg->rt_export_list); + if (rfg->labels) + list_delete (rfg->labels); + if (rfg->rfp_cfg) + XFREE (MTYPE_RFAPI_RFP_GROUP_CFG, rfg->rfp_cfg); + listnode_delete (bgp->rfapi_cfg->l2_groups, rfg); + + rfapi_l2_group_del (rfg); +} + +static int +bgp_rfapi_delete_named_l2_group ( + struct vty *vty, /* NULL = no output */ + struct bgp *bgp, + const char *rfg_name) /* NULL = any */ +{ + struct rfapi_l2_group_cfg *rfg = NULL; + struct listnode *node, *nnode; + + /* Search for name */ + if (rfg_name) + { + rfg = rfapi_l2_group_lookup_byname (bgp, rfg_name); + if (!rfg) + { + if (vty) + vty_out (vty, "No L2 group named \"%s\"%s", rfg_name, + VTY_NEWLINE); + return CMD_WARNING; + } + } + + if (rfg) + bgp_rfapi_delete_l2_group (vty, bgp, rfg); + else /* must be delete all */ + for (ALL_LIST_ELEMENTS (bgp->rfapi_cfg->l2_groups, node, nnode, rfg)) + bgp_rfapi_delete_l2_group (vty, bgp, rfg); + return CMD_SUCCESS; +} + +DEFUN (vnc_no_l2_group, + vnc_no_l2_group_cmd, + "no vnc l2-group NAME", + NO_STR + VNC_CONFIG_STR + "Configure a L2 group\n" + "Group name\n") +{ + struct bgp *bgp = vty->index; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + return bgp_rfapi_delete_named_l2_group (vty, bgp, argv[0]); +} + + +DEFUN (vnc_l2_group_lni, + vnc_l2_group_lni_cmd, + "logical-network-id <0-4294967295>", + "Specify Logical Network ID associated with group\n" + "value\n") +{ + VTY_DECLVAR_CONTEXT_SUB(rfapi_l2_group_cfg, rfg); + struct bgp *bgp = vty->index; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + /* make sure it's still in list */ + if (!listnode_lookup (bgp->rfapi_cfg->l2_groups, rfg)) + { + /* Not in list anymore */ + vty_out (vty, "Current L2 group no longer exists%s", VTY_NEWLINE); + return CMD_WARNING; + } + + VTY_GET_INTEGER ("logical-network-id", rfg->logical_net_id, argv[0]); + + return CMD_SUCCESS; +} + +DEFUN (vnc_l2_group_labels, + vnc_l2_group_labels_cmd, + "labels .LABELLIST", + "Specify label values associated with group\n" + "Space separated list of label values <0-1048575>\n") +{ + VTY_DECLVAR_CONTEXT_SUB(rfapi_l2_group_cfg, rfg); + struct bgp *bgp = vty->index; + struct list *ll; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + /* make sure it's still in list */ + if (!listnode_lookup (bgp->rfapi_cfg->l2_groups, rfg)) + { + /* Not in list anymore */ + vty_out (vty, "Current L2 group no longer exists%s", VTY_NEWLINE); + return CMD_WARNING; + } + + ll = rfg->labels; + if (ll == NULL) + { + ll = list_new (); + rfg->labels = ll; + } + for (; argc; --argc, ++argv) + { + uint32_t label; + VTY_GET_INTEGER_RANGE ("Label value", label, argv[0], 0, 1048575); + if (!listnode_lookup (ll, (void *) (uintptr_t) label)) + listnode_add (ll, (void *) (uintptr_t) label); + } + + return CMD_SUCCESS; +} + +DEFUN (vnc_l2_group_no_labels, + vnc_l2_group_no_labels_cmd, + "no labels .LABELLIST", + NO_STR + "Remove label values associated with L2 group\n" + "Specify label values associated with L2 group\n" + "Space separated list of label values <0-1048575>\n") +{ + VTY_DECLVAR_CONTEXT_SUB(rfapi_l2_group_cfg, rfg); + struct bgp *bgp = vty->index; + struct list *ll; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + /* make sure it's still in list */ + if (!listnode_lookup (bgp->rfapi_cfg->l2_groups, rfg)) + { + /* Not in list anymore */ + vty_out (vty, "Current L2 group no longer exists%s", VTY_NEWLINE); + return CMD_WARNING; + } + + ll = rfg->labels; + if (ll == NULL) + { + vty_out (vty, "Label no longer associated with group%s", VTY_NEWLINE); + return CMD_WARNING; + } + + for (; argc; --argc, ++argv) + { + uint32_t label; + VTY_GET_INTEGER_RANGE ("Label value", label, argv[0], 0, 1048575); + listnode_delete (ll, (void *) (uintptr_t) label); + } + + return CMD_SUCCESS; +} + +DEFUN (vnc_l2_group_rt, + vnc_l2_group_rt_cmd, + "rt (both|export|import) ASN:nn_or_IP-address:nn", + "Specify route targets\n" + "Export+import filters\n" + "Export filters\n" + "Import filters\n" + "A route target\n") +{ + VTY_DECLVAR_CONTEXT_SUB(rfapi_l2_group_cfg, rfg); + struct bgp *bgp = vty->index; + int rc = CMD_SUCCESS; + int do_import = 0; + int do_export = 0; + + switch (argv[0][0]) + { + case 'b': + do_export = 1; /* fall through */ + case 'i': + do_import = 1; + break; + case 'e': + do_export = 1; + break; + default: + vty_out (vty, "Unknown option, %s%s", argv[0], VTY_NEWLINE); + return CMD_ERR_NO_MATCH; + } + argc--; + argv++; + if (argc < 1) + return CMD_ERR_INCOMPLETE; + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + /* make sure it's still in list */ + if (!listnode_lookup (bgp->rfapi_cfg->l2_groups, rfg)) + { + /* Not in list anymore */ + vty_out (vty, "Current L2 group no longer exists%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (do_import) + rc = set_ecom_list (vty, argc, argv, &rfg->rt_import_list); + if (rc == CMD_SUCCESS && do_export) + rc = set_ecom_list (vty, argc, argv, &rfg->rt_export_list); + return rc; +} + + +static struct cmd_node bgp_vnc_l2_group_node = { + BGP_VNC_L2_GROUP_NODE, + "%s(config-router-vnc-l2-group)# ", + 1 +}; + +static struct rfapi_l2_group_cfg * +bgp_rfapi_get_group_by_lni_label ( + struct bgp *bgp, + uint32_t logical_net_id, + uint32_t label) +{ + struct rfapi_l2_group_cfg *rfg; + struct listnode *node; + + if (bgp->rfapi_cfg->l2_groups == NULL) /* not the best place for this */ + return NULL; + + label = label & 0xfffff; /* label is 20 bits! */ + + for (ALL_LIST_ELEMENTS_RO (bgp->rfapi_cfg->l2_groups, node, rfg)) + { + if (rfg->logical_net_id == logical_net_id) + { + struct listnode *lnode; + void *data; + for (ALL_LIST_ELEMENTS_RO (rfg->labels, lnode, data)) + if (((uint32_t) ((uintptr_t) data)) == label) + { /* match! */ + return rfg; + } + } + } + return NULL; +} + +struct list * +bgp_rfapi_get_labellist_by_lni_label ( + struct bgp *bgp, + uint32_t logical_net_id, + uint32_t label) +{ + struct rfapi_l2_group_cfg *rfg; + rfg = bgp_rfapi_get_group_by_lni_label (bgp, logical_net_id, label); + if (rfg) + { + return rfg->labels; + } + return NULL; +} + +struct ecommunity * +bgp_rfapi_get_ecommunity_by_lni_label ( + struct bgp *bgp, + uint32_t is_import, + uint32_t logical_net_id, + uint32_t label) +{ + struct rfapi_l2_group_cfg *rfg; + rfg = bgp_rfapi_get_group_by_lni_label (bgp, logical_net_id, label); + if (rfg) + { + if (is_import) + return rfg->rt_import_list; + else + return rfg->rt_export_list; + } + return NULL; +} + +void +bgp_rfapi_cfg_init (void) +{ + /* main bgpd code does not use this hook, but vnc does */ + route_map_event_hook (vnc_routemap_event); + + install_node (&bgp_vnc_defaults_node, NULL); + install_node (&bgp_vnc_nve_group_node, NULL); + install_node (&bgp_vnc_l2_group_node, NULL); + install_default (BGP_VNC_DEFAULTS_NODE); + install_default (BGP_VNC_NVE_GROUP_NODE); + install_default (BGP_VNC_L2_GROUP_NODE); + + /* + * Add commands + */ + install_element (BGP_NODE, &vnc_defaults_cmd); + install_element (BGP_NODE, &vnc_nve_group_cmd); + install_element (BGP_NODE, &vnc_no_nve_group_cmd); + install_element (BGP_NODE, &vnc_l2_group_cmd); + install_element (BGP_NODE, &vnc_no_l2_group_cmd); + install_element (BGP_NODE, &vnc_advertise_un_method_cmd); + install_element (BGP_NODE, &vnc_export_mode_cmd); + + install_element (BGP_VNC_DEFAULTS_NODE, &vnc_defaults_rt_import_cmd); + install_element (BGP_VNC_DEFAULTS_NODE, &vnc_defaults_rt_export_cmd); + install_element (BGP_VNC_DEFAULTS_NODE, &vnc_defaults_rt_both_cmd); + install_element (BGP_VNC_DEFAULTS_NODE, &vnc_defaults_rd_cmd); + install_element (BGP_VNC_DEFAULTS_NODE, &vnc_defaults_l2rd_cmd); + install_element (BGP_VNC_DEFAULTS_NODE, &vnc_defaults_no_l2rd_cmd); + install_element (BGP_VNC_DEFAULTS_NODE, &vnc_defaults_responselifetime_cmd); + install_element (BGP_VNC_DEFAULTS_NODE, &exit_vnc_cmd); + + install_element (BGP_NODE, &vnc_redistribute_protocol_cmd); + install_element (BGP_NODE, &vnc_no_redistribute_protocol_cmd); + install_element (BGP_NODE, &vnc_redistribute_nvegroup_cmd); + install_element (BGP_NODE, &vnc_redistribute_no_nvegroup_cmd); + install_element (BGP_NODE, &vnc_redistribute_lifetime_cmd); + install_element (BGP_NODE, &vnc_redistribute_rh_roo_localadmin_cmd); + install_element (BGP_NODE, &vnc_redistribute_mode_cmd); + install_element (BGP_NODE, &vnc_redistribute_bgp_exterior_cmd); + + install_element (BGP_NODE, &vnc_redist_bgpdirect_no_prefixlist_cmd); + install_element (BGP_NODE, &vnc_redist_bgpdirect_prefixlist_cmd); + install_element (BGP_NODE, &vnc_redist_bgpdirect_no_routemap_cmd); + install_element (BGP_NODE, &vnc_redist_bgpdirect_routemap_cmd); + + install_element (BGP_VNC_NVE_GROUP_NODE, + &vnc_nve_group_redist_bgpdirect_no_prefixlist_cmd); + install_element (BGP_VNC_NVE_GROUP_NODE, + &vnc_nve_group_redist_bgpdirect_prefixlist_cmd); + install_element (BGP_VNC_NVE_GROUP_NODE, + &vnc_nve_group_redist_bgpdirect_no_routemap_cmd); + install_element (BGP_VNC_NVE_GROUP_NODE, + &vnc_nve_group_redist_bgpdirect_routemap_cmd); + + install_element (BGP_NODE, &vnc_export_nvegroup_cmd); + install_element (BGP_NODE, &vnc_no_export_nvegroup_cmd); + install_element (BGP_NODE, &vnc_nve_export_prefixlist_cmd); + install_element (BGP_NODE, &vnc_nve_export_routemap_cmd); + install_element (BGP_NODE, &vnc_nve_export_no_prefixlist_cmd); + install_element (BGP_NODE, &vnc_nve_export_no_routemap_cmd); + + install_element (BGP_VNC_NVE_GROUP_NODE, &vnc_nve_group_l2rd_cmd); + install_element (BGP_VNC_NVE_GROUP_NODE, &vnc_nve_group_no_l2rd_cmd); + install_element (BGP_VNC_NVE_GROUP_NODE, &vnc_nve_group_prefix_cmd); + install_element (BGP_VNC_NVE_GROUP_NODE, &vnc_nve_group_rt_import_cmd); + install_element (BGP_VNC_NVE_GROUP_NODE, &vnc_nve_group_rt_export_cmd); + install_element (BGP_VNC_NVE_GROUP_NODE, &vnc_nve_group_rt_both_cmd); + install_element (BGP_VNC_NVE_GROUP_NODE, &vnc_nve_group_rd_cmd); + install_element (BGP_VNC_NVE_GROUP_NODE, + &vnc_nve_group_responselifetime_cmd); + install_element (BGP_VNC_NVE_GROUP_NODE, + &vnc_nve_group_export_prefixlist_cmd); + install_element (BGP_VNC_NVE_GROUP_NODE, + &vnc_nve_group_export_routemap_cmd); + install_element (BGP_VNC_NVE_GROUP_NODE, + &vnc_nve_group_export_no_prefixlist_cmd); + install_element (BGP_VNC_NVE_GROUP_NODE, + &vnc_nve_group_export_no_routemap_cmd); + install_element (BGP_VNC_NVE_GROUP_NODE, &exit_vnc_cmd); + + install_element (BGP_VNC_L2_GROUP_NODE, &vnc_l2_group_lni_cmd); + install_element (BGP_VNC_L2_GROUP_NODE, &vnc_l2_group_labels_cmd); + install_element (BGP_VNC_L2_GROUP_NODE, &vnc_l2_group_no_labels_cmd); + install_element (BGP_VNC_L2_GROUP_NODE, &vnc_l2_group_rt_cmd); + install_element (BGP_VNC_L2_GROUP_NODE, &exit_vnc_cmd); +} + +struct rfapi_cfg * +bgp_rfapi_cfg_new (struct rfapi_rfp_cfg *cfg) +{ + struct rfapi_cfg *h; + int afi; + + h = + (struct rfapi_cfg *) XCALLOC (MTYPE_RFAPI_CFG, sizeof (struct rfapi_cfg)); + assert (h); + + h->nve_groups_sequential = list_new (); + assert (h->nve_groups_sequential); + + for (afi = AFI_IP; afi < AFI_MAX; afi++) + { + /* ugly, to deal with addition of delegates, part of 0.99.24.1 merge */ + h->nve_groups_vn[afi].delegate = route_table_get_default_delegate (); + h->nve_groups_un[afi].delegate = route_table_get_default_delegate (); + } + h->default_response_lifetime = BGP_VNC_DEFAULT_RESPONSE_LIFETIME_DEFAULT; + h->rfg_export_direct_bgp_l = list_new (); + h->rfg_export_zebra_l = list_new (); + h->resolve_nve_roo_local_admin = + BGP_VNC_CONFIG_RESOLVE_NVE_ROO_LOCAL_ADMIN_DEFAULT; + + SET_FLAG (h->flags, BGP_VNC_CONFIG_FLAGS_DEFAULT); + + if (cfg == NULL) + { + h->rfp_cfg.download_type = RFAPI_RFP_DOWNLOAD_PARTIAL; + h->rfp_cfg.ftd_advertisement_interval = + RFAPI_RFP_CFG_DEFAULT_FTD_ADVERTISEMENT_INTERVAL; + h->rfp_cfg.holddown_factor = RFAPI_RFP_CFG_DEFAULT_HOLDDOWN_FACTOR; + h->rfp_cfg.use_updated_response = 0; + h->rfp_cfg.use_removes = 0; + } + else + { + h->rfp_cfg.download_type = cfg->download_type; + h->rfp_cfg.ftd_advertisement_interval = cfg->ftd_advertisement_interval; + h->rfp_cfg.holddown_factor = cfg->holddown_factor; + h->rfp_cfg.use_updated_response = cfg->use_updated_response; + h->rfp_cfg.use_removes = cfg->use_removes; + if (cfg->use_updated_response) + h->flags &= ~BGP_VNC_CONFIG_CALLBACK_DISABLE; + else + h->flags |= BGP_VNC_CONFIG_CALLBACK_DISABLE; + if (cfg->use_removes) + h->flags &= ~BGP_VNC_CONFIG_RESPONSE_REMOVAL_DISABLE; + else + h->flags |= BGP_VNC_CONFIG_RESPONSE_REMOVAL_DISABLE; + } + return h; +} + +void +bgp_rfapi_cfg_destroy (struct bgp *bgp, struct rfapi_cfg *h) +{ + if (h == NULL) + return; + + bgp_rfapi_delete_named_nve_group (NULL, bgp, NULL); + bgp_rfapi_delete_named_l2_group (NULL, bgp, NULL); + if (h->l2_groups != NULL) + list_delete (h->l2_groups); + list_delete (h->nve_groups_sequential); + list_delete (h->rfg_export_direct_bgp_l); + list_delete (h->rfg_export_zebra_l); + if (h->default_rt_export_list) + ecommunity_free (&h->default_rt_export_list); + if (h->default_rt_import_list) + ecommunity_free (&h->default_rt_import_list); + if (h->default_rfp_cfg) + XFREE (MTYPE_RFAPI_RFP_GROUP_CFG, h->default_rfp_cfg); + XFREE (MTYPE_RFAPI_CFG, h); + +} + +int +bgp_rfapi_cfg_write (struct vty *vty, struct bgp *bgp) +{ + struct listnode *node, *nnode; + struct rfapi_nve_group_cfg *rfg; + struct rfapi_cfg *hc = bgp->rfapi_cfg; + struct rfapi_rfg_name *rfgn; + int write = 0; + afi_t afi; + int type; + + if (hc->flags & BGP_VNC_CONFIG_ADV_UN_METHOD_ENCAP) + { + vty_out (vty, " vnc advertise-un-method encap-safi%s", VTY_NEWLINE); + write++; + } + + { /* was based on listen ports */ + /* for now allow both old and new */ + if (bgp->rfapi->rfp_methods.cfg_cb) + write += (bgp->rfapi->rfp_methods.cfg_cb) (vty, bgp->rfapi->rfp); + + if (write) + vty_out (vty, "!%s", VTY_NEWLINE); + + if (hc->l2_groups) + { + struct rfapi_l2_group_cfg *rfg = NULL; + struct listnode *gnode; + for (ALL_LIST_ELEMENTS_RO (hc->l2_groups, gnode, rfg)) + { + struct listnode *lnode; + void *data; + ++write; + vty_out (vty, " vnc l2-group %s%s", rfg->name, VTY_NEWLINE); + if (rfg->logical_net_id != 0) + vty_out (vty, " logical-network-id %u%s", rfg->logical_net_id, + VTY_NEWLINE); + if (rfg->labels != NULL && listhead (rfg->labels) != NULL) + { + vty_out (vty, " labels "); + for (ALL_LIST_ELEMENTS_RO (rfg->labels, lnode, data)) + { + vty_out (vty, "%hu ", (uint16_t) ((uintptr_t) data)); + } + vty_out (vty, "%s", VTY_NEWLINE); + } + + if (rfg->rt_import_list && rfg->rt_export_list && + ecommunity_cmp (rfg->rt_import_list, rfg->rt_export_list)) + { + char *b = ecommunity_ecom2str (rfg->rt_import_list, + ECOMMUNITY_FORMAT_ROUTE_MAP); + vty_out (vty, " rt both %s%s", b, VTY_NEWLINE); + XFREE (MTYPE_ECOMMUNITY_STR, b); + } + else + { + if (rfg->rt_import_list) + { + char *b = ecommunity_ecom2str (rfg->rt_import_list, + ECOMMUNITY_FORMAT_ROUTE_MAP); + vty_out (vty, " rt import %s%s", b, VTY_NEWLINE); + XFREE (MTYPE_ECOMMUNITY_STR, b); + } + if (rfg->rt_export_list) + { + char *b = ecommunity_ecom2str (rfg->rt_export_list, + ECOMMUNITY_FORMAT_ROUTE_MAP); + vty_out (vty, " rt export %s%s", b, VTY_NEWLINE); + XFREE (MTYPE_ECOMMUNITY_STR, b); + } + } + if (bgp->rfapi->rfp_methods.cfg_group_cb) + write += + (bgp->rfapi->rfp_methods.cfg_group_cb) (vty, + bgp->rfapi->rfp, + RFAPI_RFP_CFG_GROUP_L2, + rfg->name, + rfg->rfp_cfg); + vty_out (vty, " exit-vnc%s", VTY_NEWLINE); + vty_out (vty, "!%s", VTY_NEWLINE); + } + } + + if (hc->default_rd.family || + hc->default_response_lifetime || + hc->default_rt_import_list || + hc->default_rt_export_list || hc->nve_groups_sequential->count) + { + + + ++write; + vty_out (vty, " vnc defaults%s", VTY_NEWLINE); + + if (hc->default_rd.prefixlen) + { + char buf[BUFSIZ]; + buf[0] = buf[BUFSIZ - 1] = 0; + + if (AF_UNIX == hc->default_rd.family) + { + uint16_t value = 0; + + value = ((hc->default_rd.val[6] << 8) & 0x0ff00) | + (hc->default_rd.val[7] & 0x0ff); + + vty_out (vty, " rd auto:vn:%d%s", value, VTY_NEWLINE); + + } + else + { + + if (!prefix_rd2str (&hc->default_rd, buf, BUFSIZ) || + !buf[0] || buf[BUFSIZ - 1]) + { + + vty_out (vty, "!Error: Can't convert rd%s", VTY_NEWLINE); + } + else + { + vty_out (vty, " rd %s%s", buf, VTY_NEWLINE); + } + } + } + if (hc->default_response_lifetime) + { + vty_out (vty, " response-lifetime "); + if (hc->default_response_lifetime != UINT32_MAX) + vty_out (vty, "%d", hc->default_response_lifetime); + else + vty_out (vty, "infinite"); + vty_out (vty, "%s", VTY_NEWLINE); + } + if (hc->default_rt_import_list && hc->default_rt_export_list && + ecommunity_cmp (hc->default_rt_import_list, + hc->default_rt_export_list)) + { + char *b = ecommunity_ecom2str (hc->default_rt_import_list, + ECOMMUNITY_FORMAT_ROUTE_MAP); + vty_out (vty, " rt both %s%s", b, VTY_NEWLINE); + XFREE (MTYPE_ECOMMUNITY_STR, b); + } + else + { + if (hc->default_rt_import_list) + { + char *b = ecommunity_ecom2str (hc->default_rt_import_list, + ECOMMUNITY_FORMAT_ROUTE_MAP); + vty_out (vty, " rt import %s%s", b, VTY_NEWLINE); + XFREE (MTYPE_ECOMMUNITY_STR, b); + } + if (hc->default_rt_export_list) + { + char *b = ecommunity_ecom2str (hc->default_rt_export_list, + ECOMMUNITY_FORMAT_ROUTE_MAP); + vty_out (vty, " rt export %s%s", b, VTY_NEWLINE); + XFREE (MTYPE_ECOMMUNITY_STR, b); + } + } + if (bgp->rfapi->rfp_methods.cfg_group_cb) + write += + (bgp->rfapi->rfp_methods.cfg_group_cb) (vty, + bgp->rfapi->rfp, + RFAPI_RFP_CFG_GROUP_DEFAULT, + NULL, + bgp->rfapi_cfg->default_rfp_cfg); + vty_out (vty, " exit-vnc%s", VTY_NEWLINE); + vty_out (vty, "!%s", VTY_NEWLINE); + } + + for (ALL_LIST_ELEMENTS (hc->nve_groups_sequential, node, nnode, rfg)) + { + ++write; + vty_out (vty, " vnc nve-group %s%s", rfg->name, VTY_NEWLINE); + + if (rfg->vn_prefix.family && rfg->vn_node) + { + char buf[BUFSIZ]; + buf[0] = buf[BUFSIZ - 1] = 0; + + prefix2str (&rfg->vn_prefix, buf, BUFSIZ); + if (!buf[0] || buf[BUFSIZ - 1]) + { + vty_out (vty, "!Error: Can't convert prefix%s", VTY_NEWLINE); + } + else + { + vty_out (vty, " prefix %s %s%s", "vn", buf, VTY_NEWLINE); + } + } + + if (rfg->un_prefix.family && rfg->un_node) + { + char buf[BUFSIZ]; + buf[0] = buf[BUFSIZ - 1] = 0; + prefix2str (&rfg->un_prefix, buf, BUFSIZ); + if (!buf[0] || buf[BUFSIZ - 1]) + { + vty_out (vty, "!Error: Can't convert prefix%s", VTY_NEWLINE); + } + else + { + vty_out (vty, " prefix %s %s%s", "un", buf, VTY_NEWLINE); + } + } + + + if (rfg->rd.prefixlen) + { + char buf[BUFSIZ]; + buf[0] = buf[BUFSIZ - 1] = 0; + + if (AF_UNIX == rfg->rd.family) + { + + uint16_t value = 0; + + value = ((rfg->rd.val[6] << 8) & 0x0ff00) | + (rfg->rd.val[7] & 0x0ff); + + vty_out (vty, " rd auto:vn:%d%s", value, VTY_NEWLINE); + + } + else + { + + if (!prefix_rd2str (&rfg->rd, buf, BUFSIZ) || + !buf[0] || buf[BUFSIZ - 1]) + { + + vty_out (vty, "!Error: Can't convert rd%s", VTY_NEWLINE); + } + else + { + vty_out (vty, " rd %s%s", buf, VTY_NEWLINE); + } + } + } + if (rfg->flags & RFAPI_RFG_RESPONSE_LIFETIME) + { + vty_out (vty, " response-lifetime "); + if (rfg->response_lifetime != UINT32_MAX) + vty_out (vty, "%d", rfg->response_lifetime); + else + vty_out (vty, "infinite"); + vty_out (vty, "%s", VTY_NEWLINE); + } + + if (rfg->rt_import_list && rfg->rt_export_list && + ecommunity_cmp (rfg->rt_import_list, rfg->rt_export_list)) + { + char *b = ecommunity_ecom2str (rfg->rt_import_list, + ECOMMUNITY_FORMAT_ROUTE_MAP); + vty_out (vty, " rt both %s%s", b, VTY_NEWLINE); + XFREE (MTYPE_ECOMMUNITY_STR, b); + } + else + { + if (rfg->rt_import_list) + { + char *b = ecommunity_ecom2str (rfg->rt_import_list, + ECOMMUNITY_FORMAT_ROUTE_MAP); + vty_out (vty, " rt import %s%s", b, VTY_NEWLINE); + XFREE (MTYPE_ECOMMUNITY_STR, b); + } + if (rfg->rt_export_list) + { + char *b = ecommunity_ecom2str (rfg->rt_export_list, + ECOMMUNITY_FORMAT_ROUTE_MAP); + vty_out (vty, " rt export %s%s", b, VTY_NEWLINE); + XFREE (MTYPE_ECOMMUNITY_STR, b); + } + } + + /* + * route filtering: prefix-lists and route-maps + */ + for (afi = AFI_IP; afi < AFI_MAX; ++afi) + { + + const char *afistr = (afi == AFI_IP) ? "ipv4" : "ipv6"; + + if (rfg->plist_export_bgp_name[afi]) + { + vty_out (vty, " export bgp %s prefix-list %s%s", + afistr, rfg->plist_export_bgp_name[afi], + VTY_NEWLINE); + } + if (rfg->plist_export_zebra_name[afi]) + { + vty_out (vty, " export zebra %s prefix-list %s%s", + afistr, rfg->plist_export_zebra_name[afi], + VTY_NEWLINE); + } + /* + * currently we only support redist plists for bgp-direct. + * If we later add plist support for redistributing other + * protocols, we'll need to loop over protocols here + */ + if (rfg->plist_redist_name[ZEBRA_ROUTE_BGP_DIRECT][afi]) + { + vty_out (vty, " redistribute bgp-direct %s prefix-list %s%s", + afistr, + rfg->plist_redist_name[ZEBRA_ROUTE_BGP_DIRECT][afi], + VTY_NEWLINE); + } + if (rfg->plist_redist_name[ZEBRA_ROUTE_BGP_DIRECT_EXT][afi]) + { + vty_out (vty, + " redistribute bgp-direct-to-nve-groups %s prefix-list %s%s", + afistr, + rfg->plist_redist_name[ZEBRA_ROUTE_BGP_DIRECT_EXT] + [afi], VTY_NEWLINE); + } + } + + if (rfg->routemap_export_bgp_name) + { + vty_out (vty, " export bgp route-map %s%s", + rfg->routemap_export_bgp_name, VTY_NEWLINE); + } + if (rfg->routemap_export_zebra_name) + { + vty_out (vty, " export zebra route-map %s%s", + rfg->routemap_export_zebra_name, VTY_NEWLINE); + } + if (rfg->routemap_redist_name[ZEBRA_ROUTE_BGP_DIRECT]) + { + vty_out (vty, " redistribute bgp-direct route-map %s%s", + rfg->routemap_redist_name[ZEBRA_ROUTE_BGP_DIRECT], + VTY_NEWLINE); + } + if (rfg->routemap_redist_name[ZEBRA_ROUTE_BGP_DIRECT_EXT]) + { + vty_out (vty, + " redistribute bgp-direct-to-nve-groups route-map %s%s", + rfg->routemap_redist_name[ZEBRA_ROUTE_BGP_DIRECT_EXT], + VTY_NEWLINE); + } + if (bgp->rfapi->rfp_methods.cfg_group_cb) + write += + (bgp->rfapi->rfp_methods.cfg_group_cb) (vty, + bgp->rfapi->rfp, + RFAPI_RFP_CFG_GROUP_NVE, + rfg->name, rfg->rfp_cfg); + vty_out (vty, " exit-vnc%s", VTY_NEWLINE); + vty_out (vty, "!%s", VTY_NEWLINE); + } + } /* have listen ports */ + + /* + * route export to other protocols + */ + if (VNC_EXPORT_BGP_GRP_ENABLED (hc)) + { + vty_out (vty, " vnc export bgp mode group-nve%s", VTY_NEWLINE); + } + else if (VNC_EXPORT_BGP_RH_ENABLED (hc)) + { + vty_out (vty, " vnc export bgp mode registering-nve%s", VTY_NEWLINE); + } + else if (VNC_EXPORT_BGP_CE_ENABLED (hc)) + { + vty_out (vty, " vnc export bgp mode ce%s", VTY_NEWLINE); + } + + if (VNC_EXPORT_ZEBRA_GRP_ENABLED (hc)) + { + vty_out (vty, " vnc export zebra mode group-nve%s", VTY_NEWLINE); + } + else if (VNC_EXPORT_ZEBRA_RH_ENABLED (hc)) + { + vty_out (vty, " vnc export zebra mode registering-nve%s", VTY_NEWLINE); + } + + if (hc->rfg_export_direct_bgp_l) + { + for (ALL_LIST_ELEMENTS (hc->rfg_export_direct_bgp_l, node, nnode, rfgn)) + { + + vty_out (vty, " vnc export bgp group-nve group %s%s", + rfgn->name, VTY_NEWLINE); + } + } + + if (hc->rfg_export_zebra_l) + { + for (ALL_LIST_ELEMENTS (hc->rfg_export_zebra_l, node, nnode, rfgn)) + { + + vty_out (vty, " vnc export zebra group-nve group %s%s", + rfgn->name, VTY_NEWLINE); + } + } + + + if (hc->rfg_redist_name) + { + vty_out (vty, " vnc redistribute nve-group %s%s", + hc->rfg_redist_name, VTY_NEWLINE); + } + if (hc->redist_lifetime) + { + vty_out (vty, " vnc redistribute lifetime %d%s", + hc->redist_lifetime, VTY_NEWLINE); + } + if (hc->resolve_nve_roo_local_admin != + BGP_VNC_CONFIG_RESOLVE_NVE_ROO_LOCAL_ADMIN_DEFAULT) + { + + vty_out (vty, " vnc redistribute resolve-nve roo-ec-local-admin %d%s", + hc->resolve_nve_roo_local_admin, VTY_NEWLINE); + } + + if (hc->redist_mode) /* ! default */ + { + const char *s = ""; + + switch (hc->redist_mode) + { + case VNC_REDIST_MODE_PLAIN: + s = "plain"; + break; + case VNC_REDIST_MODE_RFG: + s = "nve-group"; + break; + case VNC_REDIST_MODE_RESOLVE_NVE: + s = "resolve-nve"; + break; + } + if (s) + { + vty_out (vty, " vnc redistribute mode %s%s", s, VTY_NEWLINE); + } + } + + /* + * route filtering: prefix-lists and route-maps + */ + for (afi = AFI_IP; afi < AFI_MAX; ++afi) + { + + const char *afistr = (afi == AFI_IP) ? "ipv4" : "ipv6"; + + if (hc->plist_export_bgp_name[afi]) + { + vty_out (vty, " vnc export bgp %s prefix-list %s%s", + afistr, hc->plist_export_bgp_name[afi], VTY_NEWLINE); + } + if (hc->plist_export_zebra_name[afi]) + { + vty_out (vty, " vnc export zebra %s prefix-list %s%s", + afistr, hc->plist_export_zebra_name[afi], VTY_NEWLINE); + } + if (hc->plist_redist_name[ZEBRA_ROUTE_BGP_DIRECT][afi]) + { + vty_out (vty, " vnc redistribute bgp-direct %s prefix-list %s%s", + afistr, hc->plist_redist_name[ZEBRA_ROUTE_BGP_DIRECT][afi], + VTY_NEWLINE); + } + } + + if (hc->routemap_export_bgp_name) + { + vty_out (vty, " vnc export bgp route-map %s%s", + hc->routemap_export_bgp_name, VTY_NEWLINE); + } + if (hc->routemap_export_zebra_name) + { + vty_out (vty, " vnc export zebra route-map %s%s", + hc->routemap_export_zebra_name, VTY_NEWLINE); + } + if (hc->routemap_redist_name[ZEBRA_ROUTE_BGP_DIRECT]) + { + vty_out (vty, " vnc redistribute bgp-direct route-map %s%s", + hc->routemap_redist_name[ZEBRA_ROUTE_BGP_DIRECT], VTY_NEWLINE); + } + + for (afi = AFI_IP; afi < AFI_MAX; ++afi) + { + for (type = 0; type < ZEBRA_ROUTE_MAX; ++type) + { + if (hc->redist[afi][type]) + { + if (type == ZEBRA_ROUTE_BGP_DIRECT_EXT && + hc->redist_bgp_exterior_view_name) + { + vty_out (vty, " vnc redistribute %s %s view %s%s", + ((afi == AFI_IP) ? "ipv4" : "ipv6"), + zebra_route_string (type), + hc->redist_bgp_exterior_view_name, VTY_NEWLINE); + } + else + { + vty_out (vty, " vnc redistribute %s %s%s", + ((afi == AFI_IP) ? "ipv4" : "ipv6"), + zebra_route_string (type), VTY_NEWLINE); + } + } + } + } + return write; +} + +void +bgp_rfapi_show_summary (struct bgp *bgp, struct vty *vty) +{ + struct rfapi_cfg *hc = bgp->rfapi_cfg; + int afi, type, redist = 0; + char tmp[40]; + if (hc == NULL) + return; + + vty_out (vty, "%-39s %-19s %s%s", "VNC Advertise method:", + (hc->flags & BGP_VNC_CONFIG_ADV_UN_METHOD_ENCAP + ? "Encapsulation SAFI" : "Tunnel Encap attribute"), + ((hc->flags & BGP_VNC_CONFIG_ADV_UN_METHOD_ENCAP) == + (BGP_VNC_CONFIG_ADV_UN_METHOD_ENCAP & + BGP_VNC_CONFIG_FLAGS_DEFAULT) ? "(default)" : ""), VTY_NEWLINE); + /* export */ + vty_out (vty, "%-39s ", "Export from VNC:"); + /* + * route export to other protocols + */ + if (VNC_EXPORT_BGP_GRP_ENABLED (hc)) + { + redist++; + vty_out (vty, "ToBGP Groups={"); + if (hc->rfg_export_direct_bgp_l) + { + int cnt = 0; + struct listnode *node, *nnode; + struct rfapi_rfg_name *rfgn; + for (ALL_LIST_ELEMENTS (hc->rfg_export_direct_bgp_l, + node, nnode, rfgn)) + { + if (cnt++ != 0) + vty_out (vty, ","); + + vty_out (vty, "%s", rfgn->name); + } + } + vty_out (vty, "}"); + } + else if (VNC_EXPORT_BGP_RH_ENABLED (hc)) + { + redist++; + vty_out (vty, "ToBGP {Registering NVE}"); + /* note filters, route-maps not shown */ + } + else if (VNC_EXPORT_BGP_CE_ENABLED (hc)) + { + redist++; + vty_out (vty, "ToBGP {NVE connected router:%d}", + hc->resolve_nve_roo_local_admin); + /* note filters, route-maps not shown */ + } + + if (VNC_EXPORT_ZEBRA_GRP_ENABLED (hc)) + { + redist++; + vty_out (vty, "%sToZebra Groups={", (redist == 1 ? "" : " ")); + if (hc->rfg_export_direct_bgp_l) + { + int cnt = 0; + struct listnode *node, *nnode; + struct rfapi_rfg_name *rfgn; + for (ALL_LIST_ELEMENTS (hc->rfg_export_zebra_l, node, nnode, rfgn)) + { + if (cnt++ != 0) + vty_out (vty, ","); + vty_out (vty, "%s", rfgn->name); + } + } + vty_out (vty, "}"); + } + else if (VNC_EXPORT_ZEBRA_RH_ENABLED (hc)) + { + redist++; + vty_out (vty, "%sToZebra {Registering NVE}", (redist == 1 ? "" : " ")); + /* note filters, route-maps not shown */ + } + vty_out (vty, "%-19s %s%s", (redist ? "" : "Off"), + (redist ? "" : "(default)"), VTY_NEWLINE); + + /* Redistribution */ + redist = 0; + vty_out (vty, "%-39s ", "Redistribution into VNC:"); + for (afi = AFI_IP; afi < AFI_MAX; ++afi) + { + for (type = 0; type < ZEBRA_ROUTE_MAX; ++type) + { + if (hc->redist[afi][type]) + { + vty_out (vty, "{%s,%s} ", + ((afi == AFI_IP) ? "ipv4" : "ipv6"), + zebra_route_string (type)); + redist++; + } + } + } + vty_out (vty, "%-19s %s%s", (redist ? "" : "Off"), + (redist ? "" : "(default)"), VTY_NEWLINE); + + vty_out (vty, "%-39s %3u%-16s %s%s", "RFP Registration Hold-Down Factor:", + hc->rfp_cfg.holddown_factor, "%", + (hc->rfp_cfg.holddown_factor == + RFAPI_RFP_CFG_DEFAULT_HOLDDOWN_FACTOR ? "(default)" : ""), + VTY_NEWLINE); + vty_out (vty, "%-39s %-19s %s%s", "RFP Updated responses:", + (hc->rfp_cfg.use_updated_response == 0 ? "Off" : "On"), + (hc->rfp_cfg.use_updated_response == 0 ? "(default)" : ""), + VTY_NEWLINE); + vty_out (vty, "%-39s %-19s %s%s", "RFP Removal responses:", + (hc->rfp_cfg.use_removes == 0 ? "Off" : "On"), + (hc->rfp_cfg.use_removes == 0 ? "(default)" : ""), VTY_NEWLINE); + vty_out (vty, "%-39s %-19s %s%s", "RFP Full table download:", + (hc->rfp_cfg.download_type == + RFAPI_RFP_DOWNLOAD_FULL ? "On" : "Off"), + (hc->rfp_cfg.download_type == + RFAPI_RFP_DOWNLOAD_PARTIAL ? "(default)" : ""), VTY_NEWLINE); + sprintf (tmp, "%u seconds", hc->rfp_cfg.ftd_advertisement_interval); + vty_out (vty, "%-39s %-19s %s%s", " Advertisement Interval:", tmp, + (hc->rfp_cfg.ftd_advertisement_interval == + RFAPI_RFP_CFG_DEFAULT_FTD_ADVERTISEMENT_INTERVAL + ? "(default)" : ""), VTY_NEWLINE); + vty_out (vty, "%-39s %d seconds%s", "Default RFP response lifetime:", + hc->default_response_lifetime, VTY_NEWLINE); + vty_out (vty, "%s", VTY_NEWLINE); + return; +} + +struct rfapi_cfg * +bgp_rfapi_get_config (struct bgp *bgp) +{ + struct rfapi_cfg *hc = NULL; + if (bgp == NULL) + bgp = bgp_get_default (); + if (bgp != NULL) + hc = bgp->rfapi_cfg; + return hc; +} + +#endif /* ENABLE_BGP_VNC */ diff --git a/bgpd/rfapi/bgp_rfapi_cfg.h b/bgpd/rfapi/bgp_rfapi_cfg.h new file mode 100644 index 0000000000..50ab3e27aa --- /dev/null +++ b/bgpd/rfapi/bgp_rfapi_cfg.h @@ -0,0 +1,318 @@ +/* + * + * Copyright 2009-2016, LabN Consulting, L.L.C. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#ifndef _QUAGGA_BGP_RFAPI_CFG_H +#define _QUAGGA_BGP_RFAPI_CFG_H + +#include "lib/table.h" +#include "lib/routemap.h" + +#if ENABLE_BGP_VNC +#include "rfapi.h" + +struct rfapi_l2_group_cfg +{ + char *name; + uint32_t logical_net_id; + struct list *labels; /* list of uint32_t */ + struct ecommunity *rt_import_list; + struct ecommunity *rt_export_list; + void *rfp_cfg; /* rfp owned group config */ + + QOBJ_FIELDS +}; +DECLARE_QOBJ_TYPE(rfapi_l2_group_cfg) + +struct rfapi_nve_group_cfg +{ + struct route_node *vn_node; /* backref */ + struct route_node *un_node; /* backref */ + + char *name; + struct prefix vn_prefix; + struct prefix un_prefix; + + struct prefix_rd rd; + uint8_t l2rd; /* 0 = VN addr LSB */ + uint32_t response_lifetime; + uint32_t flags; +#define RFAPI_RFG_RESPONSE_LIFETIME 0x1 +#define RFAPI_RFG_L2RD 0x02 + struct ecommunity *rt_import_list; + struct ecommunity *rt_export_list; + struct rfapi_import_table *rfapi_import_table; + + void *rfp_cfg; /* rfp owned group config */ + /* + * List of NVE descriptors that are assigned to this NVE group + * + * Currently (Mar 2010) this list is used only by the route + * export code to generate per-NVE nexthops for each route. + * + * The nve descriptors listed here have pointers back to + * this nve group config structure to enable them to delete + * their own list entries when they are closed. Consequently, + * if an instance of this nve group config structure is deleted, + * we must first set the nve descriptor references to it to NULL. + */ + struct list *nves; + + /* + * Route filtering + * + * Prefix lists are segregated by afi (part of the base plist code) + * Route-maps are not segregated + */ + char *plist_export_bgp_name[AFI_MAX]; + struct prefix_list *plist_export_bgp[AFI_MAX]; + + char *plist_export_zebra_name[AFI_MAX]; + struct prefix_list *plist_export_zebra[AFI_MAX]; + + char *plist_redist_name[ZEBRA_ROUTE_MAX][AFI_MAX]; + struct prefix_list *plist_redist[ZEBRA_ROUTE_MAX][AFI_MAX]; + + char *routemap_export_bgp_name; + struct route_map *routemap_export_bgp; + + char *routemap_export_zebra_name; + struct route_map *routemap_export_zebra; + + char *routemap_redist_name[ZEBRA_ROUTE_MAX]; + struct route_map *routemap_redist[ZEBRA_ROUTE_MAX]; + + QOBJ_FIELDS +}; +DECLARE_QOBJ_TYPE(rfapi_nve_group_cfg) + +struct rfapi_rfg_name +{ + struct rfapi_nve_group_cfg *rfg; + char *name; +}; + +typedef enum +{ + VNC_REDIST_MODE_PLAIN = 0, /* 0 = default */ + VNC_REDIST_MODE_RFG, + VNC_REDIST_MODE_RESOLVE_NVE +} vnc_redist_mode_t; + +struct rfapi_cfg +{ + struct prefix_rd default_rd; + uint8_t default_l2rd; + struct ecommunity *default_rt_import_list; + struct ecommunity *default_rt_export_list; + uint32_t default_response_lifetime; +#define BGP_VNC_DEFAULT_RESPONSE_LIFETIME_DEFAULT 3600 + void *default_rfp_cfg; /* rfp owned group config */ + + struct list *l2_groups; /* rfapi_l2_group_cfg list */ + /* three views into the same collection of rfapi_nve_group_cfg */ + struct list *nve_groups_sequential; + struct route_table nve_groups_vn[AFI_MAX]; + struct route_table nve_groups_un[AFI_MAX]; + + /* + * For Single VRF export to ordinary routing protocols. This is + * the nve-group that the ordinary protocols belong to. We use it + * to set the RD when sending unicast Zebra routes to VNC + */ + uint8_t redist[AFI_MAX][ZEBRA_ROUTE_MAX]; + uint32_t redist_lifetime; + vnc_redist_mode_t redist_mode; + + /* + * view name of BGP unicast instance that holds + * exterior routes + */ + char *redist_bgp_exterior_view_name; + struct bgp *redist_bgp_exterior_view; + + /* + * nve group for redistribution of routes from zebra to VNC + * (which is probably not useful for production networks) + */ + char *rfg_redist_name; + struct rfapi_nve_group_cfg *rfg_redist; + + /* + * List of NVE groups on whose behalf we will export VNC + * routes to zebra. ((NB: it's actually a list of ) + * This list is used when BGP_VNC_CONFIG_EXPORT_ZEBRA_MODE_BITS is + * BGP_VNC_CONFIG_EXPORT_ZEBRA_MODE_GRP + */ + struct list *rfg_export_zebra_l; + + /* + * List of NVE groups on whose behalf we will export VNC + * routes directly to the bgp unicast RIB. (NB: it's actually + * a list of ) + * This list is used when BGP_VNC_CONFIG_EXPORT_BGP_MODE_BITS is + * BGP_VNC_CONFIG_EXPORT_BGP_MODE_GRP + */ + struct list *rfg_export_direct_bgp_l; + + /* + * Exported Route filtering + * + * Prefix lists are segregated by afi (part of the base plist code) + * Route-maps are not segregated + */ + char *plist_export_bgp_name[AFI_MAX]; + struct prefix_list *plist_export_bgp[AFI_MAX]; + + char *plist_export_zebra_name[AFI_MAX]; + struct prefix_list *plist_export_zebra[AFI_MAX]; + + char *routemap_export_bgp_name; + struct route_map *routemap_export_bgp; + + char *routemap_export_zebra_name; + struct route_map *routemap_export_zebra; + + /* + * Redistributed route filtering (routes from other + * protocols into VNC) + */ + char *plist_redist_name[ZEBRA_ROUTE_MAX][AFI_MAX]; + struct prefix_list *plist_redist[ZEBRA_ROUTE_MAX][AFI_MAX]; + + char *routemap_redist_name[ZEBRA_ROUTE_MAX]; + struct route_map *routemap_redist[ZEBRA_ROUTE_MAX]; + + /* + * For importing bgp unicast routes to VNC, we encode the CE + * (route nexthop) in a Route Origin extended community. The + * local part (16-bit) is user-configurable. + */ + uint16_t resolve_nve_roo_local_admin; +#define BGP_VNC_CONFIG_RESOLVE_NVE_ROO_LOCAL_ADMIN_DEFAULT 5226 + + uint32_t flags; +#define BGP_VNC_CONFIG_ADV_UN_METHOD_ENCAP 0x00000001 +#define BGP_VNC_CONFIG_CALLBACK_DISABLE 0x00000002 +#define BGP_VNC_CONFIG_RESPONSE_REMOVAL_DISABLE 0x00000004 + +#define BGP_VNC_CONFIG_EXPORT_BGP_MODE_BITS 0x000000f0 +#define BGP_VNC_CONFIG_EXPORT_ZEBRA_MODE_BITS 0x00000f00 + +#define BGP_VNC_CONFIG_EXPORT_BGP_MODE_NONE 0x00000000 +#define BGP_VNC_CONFIG_EXPORT_BGP_MODE_GRP 0x00000010 +#define BGP_VNC_CONFIG_EXPORT_BGP_MODE_RH 0x00000020 /* registerd nve */ +#define BGP_VNC_CONFIG_EXPORT_BGP_MODE_CE 0x00000040 + +#define BGP_VNC_CONFIG_EXPORT_ZEBRA_MODE_NONE 0x00000000 +#define BGP_VNC_CONFIG_EXPORT_ZEBRA_MODE_GRP 0x00000100 +#define BGP_VNC_CONFIG_EXPORT_ZEBRA_MODE_RH 0x00000200 + +#define BGP_VNC_CONFIG_FILTER_SELF_FROM_RSP 0x00001000 +#define BGP_VNC_CONFIG_L2RD 0x00002000 + +/* Use new NVE RIB to filter callback routes */ +/* Filter querying NVE's registrations from responses */ +/* Default to updated-responses off */ +/* Default to removal-responses off */ +#define BGP_VNC_CONFIG_FLAGS_DEFAULT \ + (BGP_VNC_CONFIG_FILTER_SELF_FROM_RSP |\ + BGP_VNC_CONFIG_CALLBACK_DISABLE |\ + BGP_VNC_CONFIG_RESPONSE_REMOVAL_DISABLE) + + struct rfapi_rfp_cfg rfp_cfg; /* rfp related configuration */ +}; + +#define VNC_EXPORT_ZEBRA_GRP_ENABLED(hc) \ + (((hc)->flags & BGP_VNC_CONFIG_EXPORT_ZEBRA_MODE_BITS) == \ + BGP_VNC_CONFIG_EXPORT_ZEBRA_MODE_GRP) + +#define VNC_EXPORT_ZEBRA_RH_ENABLED(hc) \ + (((hc)->flags & BGP_VNC_CONFIG_EXPORT_ZEBRA_MODE_BITS) == \ + BGP_VNC_CONFIG_EXPORT_ZEBRA_MODE_RH) + +#define VNC_EXPORT_BGP_GRP_ENABLED(hc) \ + (((hc)->flags & BGP_VNC_CONFIG_EXPORT_BGP_MODE_BITS) == \ + BGP_VNC_CONFIG_EXPORT_BGP_MODE_GRP) + +#define VNC_EXPORT_BGP_RH_ENABLED(hc) \ + (((hc)->flags & BGP_VNC_CONFIG_EXPORT_BGP_MODE_BITS) == \ + BGP_VNC_CONFIG_EXPORT_BGP_MODE_RH) + +#define VNC_EXPORT_BGP_CE_ENABLED(hc) \ + (((hc)->flags & BGP_VNC_CONFIG_EXPORT_BGP_MODE_BITS) == \ + BGP_VNC_CONFIG_EXPORT_BGP_MODE_CE) + + +void +bgp_rfapi_cfg_init (void); + +struct rfapi_cfg * +bgp_rfapi_cfg_new (struct rfapi_rfp_cfg *cfg); + +void +bgp_rfapi_cfg_destroy (struct bgp *bgp, struct rfapi_cfg *h); + +int +bgp_rfapi_cfg_write (struct vty *vty, struct bgp *bgp); + +extern int +bgp_rfapi_is_vnc_configured (struct bgp *bgp); + +extern void +nve_group_to_nve_list ( + struct rfapi_nve_group_cfg *rfg, + struct list **nves, + uint8_t family); /* AF_INET, AF_INET6 */ + +struct rfapi_nve_group_cfg * +bgp_rfapi_cfg_match_group ( + struct rfapi_cfg *hc, + struct prefix *vn, + struct prefix *un); + +extern void +vnc_prefix_list_update (struct bgp *bgp); + +extern void +vnc_routemap_update (struct bgp *bgp, const char *unused); + +extern void +bgp_rfapi_show_summary (struct bgp *bgp, struct vty *vty); + +extern struct rfapi_cfg * +bgp_rfapi_get_config (struct bgp *bgp); + +extern struct ecommunity * +bgp_rfapi_get_ecommunity_by_lni_label ( + struct bgp *bgp, + uint32_t is_import, + uint32_t logical_net_id, + uint32_t label); /* note, 20bit label! */ + +extern struct list * +bgp_rfapi_get_labellist_by_lni_label ( + struct bgp *bgp, + uint32_t logical_net_id, + uint32_t label); /* note, 20bit label! */ + +#endif /* ENABLE_BGP_VNC */ + +#endif /* _QUAGGA_BGP_RFAPI_CFG_H */ diff --git a/bgpd/rfapi/rfapi.c b/bgpd/rfapi/rfapi.c new file mode 100644 index 0000000000..985bcaf942 --- /dev/null +++ b/bgpd/rfapi/rfapi.c @@ -0,0 +1,4386 @@ +/* + * + * Copyright 2009-2016, LabN Consulting, L.L.C. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + + +#include + +#include "lib/zebra.h" +#include "lib/prefix.h" +#include "lib/table.h" +#include "lib/vty.h" +#include "lib/memory.h" +#include "lib/routemap.h" +#include "lib/log.h" +#include "lib/linklist.h" +#include "lib/command.h" +#include "lib/stream.h" + +#include "bgpd/bgpd.h" +#include "bgpd/bgp_ecommunity.h" +#include "bgpd/bgp_attr.h" +#include "bgpd/bgp_mplsvpn.h" + +#include "bgpd/rfapi/bgp_rfapi_cfg.h" +#include "bgpd/rfapi/rfapi.h" +#include "bgpd/rfapi/rfapi_backend.h" + +#include "bgpd/bgp_route.h" +#include "bgpd/bgp_aspath.h" +#include "bgpd/bgp_advertise.h" +#include "bgpd/bgp_vnc_types.h" +#include "bgpd/bgp_zebra.h" + +#include "bgpd/rfapi/rfapi_import.h" +#include "bgpd/rfapi/rfapi_private.h" +#include "bgpd/rfapi/rfapi_monitor.h" +#include "bgpd/rfapi/rfapi_vty.h" +#include "bgpd/rfapi/vnc_export_bgp.h" +#include "bgpd/rfapi/vnc_export_bgp_p.h" +#include "bgpd/rfapi/vnc_zebra.h" +#include "bgpd/rfapi/vnc_import_bgp.h" +#include "bgpd/rfapi/rfapi_rib.h" +#include "bgpd/rfapi/rfapi_ap.h" +#include "bgpd/rfapi/rfapi_encap_tlv.h" +#include "bgpd/rfapi/vnc_debug.h" + +#ifdef HAVE_GLIBC_BACKTRACE +/* for backtrace and friends */ +#include +#endif /* HAVE_GLIBC_BACKTRACE */ + +struct ethaddr rfapi_ethaddr0 = { {0} }; + +#define DEBUG_RFAPI_STR "RF API debugging/testing command\n" + +const char * +rfapi_error_str (int code) +{ + switch (code) + { + case 0: + return "Success"; + case ENXIO: + return "BGP or VNC not configured"; + case ENOENT: + return "No match"; + case EEXIST: + return "Handle already open"; + case ENOMSG: + return "Incomplete configuration"; + case EAFNOSUPPORT: + return "Invalid address family"; + case EDEADLK: + return "Called from within a callback procedure"; + case EBADF: + return "Invalid handle"; + case EINVAL: + return "Invalid argument"; + case ESTALE: + return "Stale descriptor"; + default: + return "Unknown error"; + } +} + +/*------------------------------------------ + * rfapi_get_response_lifetime_default + * + * Returns the default lifetime for a response. + * rfp_start_val value returned by rfp_start or + * NULL (=use default instance) + * + * input: + * None + * + * output: + * + * return value: The bgp instance default lifetime for a response. + --------------------------------------------*/ +int +rfapi_get_response_lifetime_default (void *rfp_start_val) +{ + struct bgp *bgp = rfapi_bgp_lookup_by_rfp (rfp_start_val); + if (bgp) + return bgp->rfapi_cfg->default_response_lifetime; + return BGP_VNC_DEFAULT_RESPONSE_LIFETIME_DEFAULT; +} + +/*------------------------------------------ + * rfapi_is_vnc_configured + * + * Returns if VNC (BGP VPN messaging /VPN & encap SAFIs) are configured + * + * input: + * rfp_start_val value returned by rfp_start or + * NULL (=use default instance) + * + * output: + * + * return value: If VNC is configured for the bgpd instance + * 0 Success + * ENXIO VNC not configured + --------------------------------------------*/ +int +rfapi_is_vnc_configured (void *rfp_start_val) +{ + struct bgp *bgp = rfapi_bgp_lookup_by_rfp (rfp_start_val); + return bgp_rfapi_is_vnc_configured (bgp); +} + + +/*------------------------------------------ + * rfapi_get_vn_addr + * + * Get the virtual network address used by an NVE based on it's RFD + * + * input: + * rfd: rfapi descriptor returned by rfapi_open or rfapi_create_generic + * + * output: + * + * return value: + * vn NVE virtual network address + *------------------------------------------*/ +struct rfapi_ip_addr * +rfapi_get_vn_addr (void *rfd) +{ + struct rfapi_descriptor *rrfd = (struct rfapi_descriptor *) rfd; + return &rrfd->vn_addr; +} + +/*------------------------------------------ + * rfapi_get_un_addr + * + * Get the underlay network address used by an NVE based on it's RFD + * + * input: + * rfd: rfapi descriptor returned by rfapi_open or rfapi_create_generic + * + * output: + * + * return value: + * un NVE underlay network address + *------------------------------------------*/ +struct rfapi_ip_addr * +rfapi_get_un_addr (void *rfd) +{ + struct rfapi_descriptor *rrfd = (struct rfapi_descriptor *) rfd; + return &rrfd->un_addr; +} + +int +rfapi_ip_addr_cmp (struct rfapi_ip_addr *a1, struct rfapi_ip_addr *a2) +{ + if (a1->addr_family != a2->addr_family) + return a1->addr_family - a2->addr_family; + + if (a1->addr_family == AF_INET) + { + return IPV4_ADDR_CMP (&a1->addr.v4, &a2->addr.v4); + } + + if (a1->addr_family == AF_INET6) + { + return IPV6_ADDR_CMP (&a1->addr.v6, &a2->addr.v6); + } + + assert (1); + /* NOTREACHED */ + return 1; +} + +static int +rfapi_find_node ( + struct bgp *bgp, + struct rfapi_ip_addr *vn_addr, + struct rfapi_ip_addr *un_addr, + struct route_node **node) +{ + struct rfapi *h; + struct prefix p; + struct route_node *rn; + int rc; + int afi; + + if (!bgp) + { + return ENXIO; + } + + h = bgp->rfapi; + if (!h) + { + return ENXIO; + } + + afi = family2afi (un_addr->addr_family); + if (!afi) + { + return EAFNOSUPPORT; + } + + if ((rc = rfapiRaddr2Qprefix (un_addr, &p))) + return rc; + + rn = route_node_lookup (&h->un[afi], &p); + + if (!rn) + return ENOENT; + + route_unlock_node (rn); + + *node = rn; + + return 0; +} + + +int +rfapi_find_rfd ( + struct bgp *bgp, + struct rfapi_ip_addr *vn_addr, + struct rfapi_ip_addr *un_addr, + struct rfapi_descriptor **rfd) +{ + struct route_node *rn; + int rc; + + rc = rfapi_find_node (bgp, vn_addr, un_addr, &rn); + + if (rc) + return rc; + + for (*rfd = (struct rfapi_descriptor *) (rn->info); *rfd; + *rfd = (*rfd)->next) + { + if (!rfapi_ip_addr_cmp (&(*rfd)->vn_addr, vn_addr)) + break; + } + + if (!*rfd) + return ENOENT; + + return 0; +} + +/*------------------------------------------ + * rfapi_find_handle + * + * input: + * un underlay network address + * vn virtual network address + * + * output: + * pHandle pointer to location to store handle + * + * return value: + * 0 Success + * ENOENT no matching handle + * ENXIO BGP or VNC not configured + *------------------------------------------*/ +static int +rfapi_find_handle ( + struct bgp *bgp, + struct rfapi_ip_addr *vn_addr, + struct rfapi_ip_addr *un_addr, + rfapi_handle *handle) +{ + struct rfapi_descriptor **rfd; + + rfd = (struct rfapi_descriptor **) handle; + + return rfapi_find_rfd (bgp, vn_addr, un_addr, rfd); +} + +static int +rfapi_find_handle_vty ( + struct vty *vty, + struct rfapi_ip_addr *vn_addr, + struct rfapi_ip_addr *un_addr, + rfapi_handle *handle) +{ + struct bgp *bgp; + struct rfapi_descriptor **rfd; + + bgp = bgp_get_default (); /* assume 1 instance for now */ + + rfd = (struct rfapi_descriptor **) handle; + + return rfapi_find_rfd (bgp, vn_addr, un_addr, rfd); +} + +static int +is_valid_rfd (struct rfapi_descriptor *rfd) +{ + rfapi_handle hh; + + if (!rfd || rfd->bgp == NULL) + return 0; + + if (rfapi_find_handle (rfd->bgp, &rfd->vn_addr, &rfd->un_addr, &hh)) + return 0; + + if (rfd != hh) + return 0; + + return 1; +} + +/* + * check status of descriptor + */ +int +rfapi_check (void *handle) +{ + struct rfapi_descriptor *rfd = (struct rfapi_descriptor *) handle; + rfapi_handle hh; + int rc; + + if (!rfd || rfd->bgp == NULL) + return EINVAL; + + if ((rc = rfapi_find_handle (rfd->bgp, &rfd->vn_addr, &rfd->un_addr, &hh))) + return rc; + + if (rfd != hh) + return ENOENT; + + if (!rfd->rfg) + return ESTALE; + + return 0; +} + + + +void +del_vnc_route ( + struct rfapi_descriptor *rfd, + struct peer *peer, /* rfd->peer for RFP regs */ + struct bgp *bgp, + safi_t safi, + struct prefix *p, + struct prefix_rd *prd, + uint8_t type, + uint8_t sub_type, + struct rfapi_nexthop *lnh, + int kill) +{ + afi_t afi; /* of the VN address */ + struct bgp_node *bn; + struct bgp_info *bi; + char buf[BUFSIZ]; + char buf2[BUFSIZ]; + struct prefix_rd prd0; + + prefix2str (p, buf, BUFSIZ); + buf[BUFSIZ - 1] = 0; /* guarantee NUL-terminated */ + + prefix_rd2str (prd, buf2, BUFSIZ); + buf2[BUFSIZ - 1] = 0; + + afi = family2afi (p->family); + assert (afi == AFI_IP || afi == AFI_IP6); + + if (safi == SAFI_ENCAP) + { + memset (&prd0, 0, sizeof (prd0)); + prd0.family = AF_UNSPEC; + prd0.prefixlen = 64; + prd = &prd0; + } + bn = bgp_afi_node_get (bgp->rib[afi][safi], afi, safi, p, prd); + + zlog_debug + ("%s: peer=%p, prefix=%s, prd=%s afi=%d, safi=%d bn=%p, bn->info=%p", + __func__, peer, buf, buf2, afi, safi, bn, (bn ? bn->info : NULL)); + + for (bi = (bn ? bn->info : NULL); bi; bi = bi->next) + { + + zlog_debug + ("%s: trying bi=%p, bi->peer=%p, bi->type=%d, bi->sub_type=%d, bi->extra->vnc.export.rfapi_handle=%p", + __func__, bi, bi->peer, bi->type, bi->sub_type, + (bi->extra ? bi->extra->vnc.export.rfapi_handle : NULL)); + + if (bi->peer == peer && + bi->type == type && + bi->sub_type == sub_type && + bi->extra && bi->extra->vnc.export.rfapi_handle == (void *) rfd) + { + + zlog_debug ("%s: matched it", __func__); + + break; + } + } + + if (lnh) + { + /* + * lnh set means to JUST delete the local nexthop from this + * route. Leave the route itself in place. + * TBD add return code reporting of success/failure + */ + if (!bi || !bi->extra || !bi->extra->vnc.export.local_nexthops) + { + /* + * no local nexthops + */ + zlog_debug ("%s: lnh list already empty at prefix %s", + __func__, buf); + goto done; + } + + /* + * look for it + */ + struct listnode *node; + struct rfapi_nexthop *pLnh = NULL; + + for (ALL_LIST_ELEMENTS_RO (bi->extra->vnc.export.local_nexthops, + node, pLnh)) + { + + if (prefix_same (&pLnh->addr, &lnh->addr)) + { + break; + } + } + + if (pLnh) + { + listnode_delete (bi->extra->vnc.export.local_nexthops, pLnh); + + /* silly rabbit, listnode_delete doesn't invoke list->del on data */ + rfapi_nexthop_free (pLnh); + } + else + { + zlog_debug ("%s: desired lnh not found %s", __func__, buf); + } + goto done; + } + + /* + * loop back to import tables + * Do this before removing from BGP RIB because rfapiProcessWithdraw + * might refer to it + */ + rfapiProcessWithdraw (peer, rfd, p, prd, NULL, afi, safi, type, kill); + + if (bi) + { + char buf[BUFSIZ]; + + prefix2str (p, buf, BUFSIZ); + buf[BUFSIZ - 1] = 0; /* guarantee NUL-terminated */ + + zlog_debug ("%s: Found route (safi=%d) to delete at prefix %s", + __func__, safi, buf); + + if (safi == SAFI_MPLS_VPN) + { + struct bgp_node *prn = NULL; + struct bgp_table *table = NULL; + + prn = bgp_node_get (bgp->rib[afi][safi], (struct prefix *) prd); + if (prn->info) + { + table = (struct bgp_table *) (prn->info); + + vnc_import_bgp_del_vnc_host_route_mode_resolve_nve (bgp, + prd, + table, + p, bi); + } + bgp_unlock_node (prn); + } + + /* + * Delete local_nexthops list + */ + if (bi->extra && bi->extra->vnc.export.local_nexthops) + { + list_delete (bi->extra->vnc.export.local_nexthops); + } + + bgp_aggregate_decrement (bgp, p, bi, afi, safi); + bgp_info_delete (bn, bi); + bgp_process (bgp, bn, afi, safi); + } + else + { + zlog_debug ("%s: Couldn't find route (safi=%d) at prefix %s", + __func__, safi, buf); + } +done: + bgp_unlock_node (bn); +} + +struct rfapi_nexthop * +rfapi_nexthop_new (struct rfapi_nexthop *copyme) +{ + struct rfapi_nexthop *new = + XCALLOC (MTYPE_RFAPI_NEXTHOP, sizeof (struct rfapi_nexthop)); + if (copyme) + *new = *copyme; + return new; +} + +void +rfapi_nexthop_free (void *p) +{ + struct rfapi_nexthop *goner = p; + XFREE (MTYPE_RFAPI_NEXTHOP, goner); +} + +struct rfapi_vn_option * +rfapi_vn_options_dup (struct rfapi_vn_option *existing) +{ + struct rfapi_vn_option *p; + struct rfapi_vn_option *head = NULL; + struct rfapi_vn_option *tail = NULL; + + for (p = existing; p; p = p->next) + { + struct rfapi_vn_option *new; + + new = XCALLOC (MTYPE_RFAPI_VN_OPTION, sizeof (struct rfapi_vn_option)); + *new = *p; + new->next = NULL; + if (tail) + (tail)->next = new; + tail = new; + if (!head) + { + head = new; + } + } + return head; +} + +void +rfapi_un_options_free (struct rfapi_un_option *p) +{ + struct rfapi_un_option *next; + + while (p) + { + next = p->next; + XFREE (MTYPE_RFAPI_UN_OPTION, p); + p = next; + } +} + +void +rfapi_vn_options_free (struct rfapi_vn_option *p) +{ + struct rfapi_vn_option *next; + + while (p) + { + next = p->next; + XFREE (MTYPE_RFAPI_VN_OPTION, p); + p = next; + } +} + +/* Based on bgp_redistribute_add() */ +void +add_vnc_route ( + struct rfapi_descriptor *rfd, /* cookie, VPN UN addr, peer */ + struct bgp *bgp, + int safi, + struct prefix *p, + struct prefix_rd *prd, + struct rfapi_ip_addr *nexthop, + uint32_t *local_pref, + uint32_t *lifetime, /* NULL => dont send lifetime */ + struct bgp_tea_options *rfp_options, + struct rfapi_un_option *options_un, + struct rfapi_vn_option *options_vn, + struct ecommunity *rt_export_list,/* Copied, not consumed */ + uint32_t *med, /* NULL => don't set med */ + uint32_t *label, /* low order 3 bytes */ + uint8_t type, + uint8_t sub_type, /* RFP, NORMAL or REDIST */ + int flags) +{ + afi_t afi; /* of the VN address */ + struct bgp_info *new; + struct bgp_info *bi; + struct bgp_node *bn; + + struct attr attr = { 0 }; + struct attr *new_attr; + uint32_t label_val; + + struct bgp_attr_encap_subtlv *encaptlv; + char buf[BUFSIZ]; + char buf2[BUFSIZ]; +#if 0 /* unused? */ + struct prefix pfx_buf; +#endif + + struct rfapi_nexthop *lnh = NULL; /* local nexthop */ + struct rfapi_vn_option *vo; + struct rfapi_l2address_option *l2o = NULL; + struct rfapi_ip_addr *un_addr = &rfd->un_addr; + + bgp_encap_types TunnelType = BGP_ENCAP_TYPE_RESERVED; + struct bgp_redist *red; + + if (safi == SAFI_ENCAP && + !(bgp->rfapi_cfg->flags & BGP_VNC_CONFIG_ADV_UN_METHOD_ENCAP)) + { + + /* + * Encap mode not enabled. UN addresses will be communicated + * via VNC Tunnel subtlv instead. + */ + zlog_debug ("%s: encap mode not enabled, not adding SAFI_ENCAP route", + __func__); + return; + } + +#if 0 /* unused? */ + if ((safi == SAFI_MPLS_VPN) && (flags & RFAPI_AHR_SET_PFX_TO_NEXTHOP)) + { + + if (rfapiRaddr2Qprefix (nexthop, &pfx_buf)) + { + zlog_debug + ("%s: can't set pfx to vn addr, not adding SAFI_MPLS_VPN route", + __func__); + return; + } + p = &pfx_buf; + } +#endif + for (vo = options_vn; vo; vo = vo->next) + { + if (RFAPI_VN_OPTION_TYPE_L2ADDR == vo->type) + { + l2o = &vo->v.l2addr; + if (RFAPI_0_ETHERADDR (&l2o->macaddr)) + l2o = NULL; /* not MAC resolution */ + } + if (RFAPI_VN_OPTION_TYPE_LOCAL_NEXTHOP == vo->type) + { + lnh = &vo->v.local_nexthop; + } + } + + if (label) + label_val = *label; + else + label_val = MPLS_LABEL_IMPLICIT_NULL; + + prefix_rd2str (prd, buf2, BUFSIZ); + buf2[BUFSIZ - 1] = 0; + + + afi = family2afi (p->family); + assert (afi == AFI_IP || afi == AFI_IP6); + + zlog_debug ("%s: afi=%s, safi=%s", __func__, afi2str (afi), + safi2str (safi)); + + /* Make default attribute. Produces already-interned attr.aspath */ + /* Cripes, the memory management of attributes is byzantine */ + + bgp_attr_default_set (&attr, BGP_ORIGIN_INCOMPLETE); + assert (attr.extra); + + /* + * At this point: + * attr: static + * extra: dynamically allocated, owned by attr + * aspath: points to interned hash from aspath hash table + */ + + + /* + * Route-specific un_options get added to the VPN SAFI + * advertisement tunnel encap attribute. (the per-NVE + * "default" un_options are put into the 1-per-NVE ENCAP + * SAFI advertisement). The VPN SAFI also gets the + * default un_options if there are no route-specific options. + */ + if (options_un) + { + struct rfapi_un_option *uo; + + for (uo = options_un; uo; uo = uo->next) + { + if (RFAPI_UN_OPTION_TYPE_TUNNELTYPE == uo->type) + { + TunnelType = rfapi_tunneltype_option_to_tlv ( + bgp, un_addr, &uo->v.tunnel, &attr, l2o != NULL); + } + } + } + else + { + /* + * Add encap attr + * These are the NVE-specific "default" un_options which are + * put into the 1-per-NVE ENCAP advertisement. + */ + if (rfd->default_tunneltype_option.type) + { + TunnelType = rfapi_tunneltype_option_to_tlv ( + bgp, un_addr, &rfd->default_tunneltype_option, &attr, + l2o != NULL); + } + else + TunnelType = rfapi_tunneltype_option_to_tlv ( + bgp, un_addr, NULL, + /* create one to carry un_addr */ &attr, l2o != NULL); + } + + if (TunnelType == BGP_ENCAP_TYPE_MPLS) + { + if (safi == SAFI_ENCAP) + { + /* Encap SAFI not used with MPLS */ + zlog_debug ("%s: mpls tunnel type, encap safi omitted", __func__); + aspath_unintern (&attr.aspath); /* Unintern original. */ + bgp_attr_extra_free (&attr); + return; + } + nexthop = un_addr; /* UN used as MPLS NLRI nexthop */ + } + + if (local_pref) + { + attr.local_pref = *local_pref; + attr.flag |= ATTR_FLAG_BIT (BGP_ATTR_LOCAL_PREF); + } + + if (med) + { + attr.med = *med; + attr.flag |= ATTR_FLAG_BIT (BGP_ATTR_MULTI_EXIT_DISC); + } + + /* override default weight assigned by bgp_attr_default_set() */ + attr.extra->weight = rfd->peer ? rfd->peer->weight[afi][safi] : 0; + + /* + * NB: ticket 81: do not reset attr.aspath here because it would + * cause iBGP peers to drop route + */ + + /* + * Set originator ID for routes imported from BGP directly. + * These routes could be synthetic, and therefore could + * reuse the peer pointers of the routes they are derived + * from. Setting the originator ID to "us" prevents the + * wrong originator ID from being sent when this route is + * sent from a route reflector. + */ + if (type == ZEBRA_ROUTE_BGP_DIRECT || type == ZEBRA_ROUTE_BGP_DIRECT_EXT) + { + attr.flag |= ATTR_FLAG_BIT (BGP_ATTR_ORIGINATOR_ID); + attr.extra->originator_id = bgp->router_id; + } + + + /* Set up vnc attribute (sub-tlv for Prefix Lifetime) */ + if (lifetime && *lifetime != RFAPI_INFINITE_LIFETIME) + { + uint32_t lt; + + encaptlv = + XCALLOC (MTYPE_ENCAP_TLV, + sizeof (struct bgp_attr_encap_subtlv) - 1 + 4); + assert (encaptlv); + encaptlv->type = BGP_VNC_SUBTLV_TYPE_LIFETIME; /* prefix lifetime */ + encaptlv->length = 4; + lt = htonl (*lifetime); + memcpy (encaptlv->value, <, 4); + attr.extra->vnc_subtlvs = encaptlv; + zlog_debug ("%s: set Encap Attr Prefix Lifetime to %d", + __func__, *lifetime); + } + + /* add rfp options to vnc attr */ + if (rfp_options) + { + + if (flags & RFAPI_AHR_RFPOPT_IS_VNCTLV) + { + + /* + * this flag means we're passing a pointer to an + * existing encap tlv chain which we should copy. + * It's a hack to avoid adding yet another argument + * to add_vnc_route() + */ + encaptlv = + encap_tlv_dup ((struct bgp_attr_encap_subtlv *) rfp_options); + if (attr.extra->vnc_subtlvs) + { + attr.extra->vnc_subtlvs->next = encaptlv; + } + else + { + attr.extra->vnc_subtlvs = encaptlv; + } + + } + else + { + struct bgp_tea_options *hop; + /* XXX max of one tlv present so far from above code */ + struct bgp_attr_encap_subtlv *tail = attr.extra->vnc_subtlvs; + + for (hop = rfp_options; hop; hop = hop->next) + { + + /* + * Construct subtlv + */ + encaptlv = XCALLOC (MTYPE_ENCAP_TLV, + sizeof (struct bgp_attr_encap_subtlv) - 1 + + 2 + hop->length); + assert (encaptlv); + encaptlv->type = BGP_VNC_SUBTLV_TYPE_RFPOPTION; /* RFP option */ + encaptlv->length = 2 + hop->length; + *((uint8_t *) (encaptlv->value) + 0) = hop->type; + *((uint8_t *) (encaptlv->value) + 1) = hop->length; + memcpy (((uint8_t *) encaptlv->value) + 2, hop->value, + hop->length); + + /* + * add to end of subtlv chain + */ + if (tail) + { + tail->next = encaptlv; + } + else + { + attr.extra->vnc_subtlvs = encaptlv; + } + tail = encaptlv; + } + } + } + + /* + * At this point: + * attr: static + * extra: dynamically allocated, owned by attr + * vnc_subtlvs: dynamic chain, length 1 + * aspath: points to interned hash from aspath hash table + */ + + + attr.extra->ecommunity = ecommunity_new (); + assert (attr.extra->ecommunity); + + if (TunnelType != BGP_ENCAP_TYPE_MPLS && + TunnelType != BGP_ENCAP_TYPE_RESERVED) + { + /* + * Add BGP Encapsulation Extended Community. Format described in + * section 4.5 of RFC 5512. + * Always include when not MPLS type, to disambiguate this case. + */ + struct ecommunity_val beec; + + memset (&beec, 0, sizeof (beec)); + beec.val[0] = ECOMMUNITY_ENCODE_OPAQUE; + beec.val[1] = ECOMMUNITY_OPAQUE_SUBTYPE_ENCAP; + beec.val[6] = ((TunnelType) >> 8) & 0xff; + beec.val[7] = (TunnelType) & 0xff; + ecommunity_add_val (attr.extra->ecommunity, &beec); + } + + /* + * Add extended community attributes to match rt export list + */ + if (rt_export_list) + { + attr.extra->ecommunity = + ecommunity_merge (attr.extra->ecommunity, rt_export_list); + } + + if (attr.extra->ecommunity->size) + { + attr.flag |= ATTR_FLAG_BIT (BGP_ATTR_EXT_COMMUNITIES); + } + else + { + ecommunity_free (&attr.extra->ecommunity); + attr.extra->ecommunity = NULL; + } + zlog_debug ("%s: attr.extra->ecommunity=%p", __func__, + attr.extra->ecommunity); + + + /* + * At this point: + * attr: static + * extra: dynamically allocated, owned by attr + * vnc_subtlvs: dynamic chain, length 1 + * ecommunity: dynamic 2-part + * aspath: points to interned hash from aspath hash table + */ + + /* stuff nexthop in attr_extra; which field depends on IPv4 or IPv6 */ + switch (nexthop->addr_family) + { + case AF_INET: + /* + * set this field to prevent bgp_route.c code from setting + * mp_nexthop_global_in to self + */ + attr.nexthop.s_addr = nexthop->addr.v4.s_addr; + + attr.extra->mp_nexthop_global_in = nexthop->addr.v4; + attr.extra->mp_nexthop_len = 4; + break; + + case AF_INET6: + attr.extra->mp_nexthop_global = nexthop->addr.v6; + attr.extra->mp_nexthop_len = 16; + break; + + default: + assert (0); + } + + + prefix2str (p, buf, BUFSIZ); + buf[BUFSIZ - 1] = 0; /* guarantee NUL-terminated */ + + /* + * At this point: + * + * attr: static + * extra: dynamically allocated, owned by attr + * vnc_subtlvs: dynamic chain, length 1 + * ecommunity: dynamic 2-part + * aspath: points to interned hash from aspath hash table + */ + + red = bgp_redist_lookup(bgp, afi, type, VRF_DEFAULT); + + if (red && red->redist_metric_flag) + { + attr.med = red->redist_metric; + attr.flag |= ATTR_FLAG_BIT (BGP_ATTR_MULTI_EXIT_DISC); + } + + bn = bgp_afi_node_get (bgp->rib[afi][safi], afi, safi, p, prd); + + /* + * bgp_attr_intern creates a new reference to a cached + * attribute, but leaves the following bits of trash: + * - old attr + * - old attr->extra (free via bgp_attr_extra_free(attr)) + * + * Note that it frees the original attr->extra->ecommunity + * but leaves the new attribute pointing to the ORIGINAL + * vnc options (which therefore we needn't free from the + * static attr) + */ + new_attr = bgp_attr_intern (&attr); + + aspath_unintern (&attr.aspath); /* Unintern original. */ + bgp_attr_extra_free (&attr); + + /* + * At this point: + * + * attr: static + * extra: dynamically allocated, owned by attr + * vnc_subtlvs: dynamic chain, length 1 + * ecommunity: POINTS TO INTERNED ecom, THIS REF NOT COUNTED + * + * new_attr: an attr that is part of the hash table, distinct + * from attr which is static. + * extra: dynamically allocated, owned by new_attr (in hash table) + * vnc_subtlvs: POINTS TO SAME dynamic chain AS attr + * ecommunity: POINTS TO interned/refcounted dynamic 2-part AS attr + * aspath: POINTS TO interned/refcounted hashed block + */ + for (bi = bn->info; bi; bi = bi->next) + { + /* probably only need to check bi->extra->vnc.export.rfapi_handle */ + if (bi->peer == rfd->peer && + bi->type == type && + bi->sub_type == sub_type && + bi->extra && bi->extra->vnc.export.rfapi_handle == (void *) rfd) + { + + break; + } + } + + if (bi) + { + + /* + * Adding new local_nexthop, which does not by itself change + * what is advertised via BGP + */ + if (lnh) + { + if (!bi->extra->vnc.export.local_nexthops) + { + /* TBD make arrangements to free when needed */ + bi->extra->vnc.export.local_nexthops = list_new (); + bi->extra->vnc.export.local_nexthops->del = rfapi_nexthop_free; + } + + /* + * already present? + */ + struct listnode *node; + struct rfapi_nexthop *pLnh = NULL; + + for (ALL_LIST_ELEMENTS_RO (bi->extra->vnc.export.local_nexthops, + node, pLnh)) + { + + if (prefix_same (&pLnh->addr, &lnh->addr)) + { + break; + } + } + + /* + * Not present, add new one + */ + if (!pLnh) + { + pLnh = rfapi_nexthop_new (lnh); + listnode_add (bi->extra->vnc.export.local_nexthops, pLnh); + } + } + + if (attrhash_cmp (bi->attr, new_attr) && + !CHECK_FLAG (bi->flags, BGP_INFO_REMOVED)) + { + bgp_attr_unintern (&new_attr); + bgp_unlock_node (bn); + + zlog_info ("%s: Found route (safi=%d) at prefix %s, no change", + __func__, safi, buf); + + goto done; + } + else + { + /* The attribute is changed. */ + bgp_info_set_flag (bn, bi, BGP_INFO_ATTR_CHANGED); + + if (safi == SAFI_MPLS_VPN) + { + struct bgp_node *prn = NULL; + struct bgp_table *table = NULL; + + prn = bgp_node_get (bgp->rib[afi][safi], (struct prefix *) prd); + if (prn->info) + { + table = (struct bgp_table *) (prn->info); + + vnc_import_bgp_del_vnc_host_route_mode_resolve_nve ( + bgp, prd, table, p, bi); + } + bgp_unlock_node (prn); + } + + /* Rewrite BGP route information. */ + if (CHECK_FLAG (bi->flags, BGP_INFO_REMOVED)) + bgp_info_restore (bn, bi); + else + bgp_aggregate_decrement (bgp, p, bi, afi, safi); + bgp_attr_unintern (&bi->attr); + bi->attr = new_attr; + bi->uptime = bgp_clock (); + + + if (safi == SAFI_MPLS_VPN) + { + struct bgp_node *prn = NULL; + struct bgp_table *table = NULL; + + prn = bgp_node_get (bgp->rib[afi][safi], (struct prefix *) prd); + if (prn->info) + { + table = (struct bgp_table *) (prn->info); + + vnc_import_bgp_add_vnc_host_route_mode_resolve_nve ( + bgp, prd, table, p, bi); + } + bgp_unlock_node (prn); + } + + /* Process change. */ + bgp_aggregate_increment (bgp, p, bi, afi, safi); + bgp_process (bgp, bn, afi, safi); + bgp_unlock_node (bn); + + zlog_info ("%s: Found route (safi=%d) at prefix %s, changed attr", + __func__, safi, buf); + + goto done; + } + } + + + new = bgp_info_new (); + new->type = type; + new->sub_type = sub_type; + new->peer = rfd->peer; + SET_FLAG (new->flags, BGP_INFO_VALID); + new->attr = new_attr; + new->uptime = bgp_clock (); + + /* save backref to rfapi handle */ + assert (bgp_info_extra_get (new)); + new->extra->vnc.export.rfapi_handle = (void *) rfd; + encode_label (label_val, new->extra->tag); + + /* debug */ + zlog_debug ("%s: printing BI", __func__); + rfapiPrintBi (NULL, new); + + bgp_aggregate_increment (bgp, p, new, afi, safi); + bgp_info_add (bn, new); + + if (safi == SAFI_MPLS_VPN) + { + struct bgp_node *prn = NULL; + struct bgp_table *table = NULL; + + prn = bgp_node_get (bgp->rib[afi][safi], (struct prefix *) prd); + if (prn->info) + { + table = (struct bgp_table *) (prn->info); + + vnc_import_bgp_add_vnc_host_route_mode_resolve_nve ( + bgp, prd, table, p, new); + } + bgp_unlock_node (prn); + } + + bgp_unlock_node (bn); + bgp_process (bgp, bn, afi, safi); + + zlog_info ("%s: Added route (safi=%s) at prefix %s (bn=%p, prd=%s)", + __func__, safi2str (safi), buf, bn, buf2); + +done: + /* Loop back to import tables */ + rfapiProcessUpdate (rfd->peer, + rfd, + p, prd, new_attr, afi, safi, type, sub_type, &label_val); + zlog_debug ("%s: looped back import route (safi=%d)", __func__, safi); +} + +uint32_t +rfp_cost_to_localpref (uint8_t cost) +{ + return 255 - cost; +} + +static void +rfapiTunnelRouteAnnounce ( + struct bgp *bgp, + struct rfapi_descriptor *rfd, + uint32_t *pLifetime) +{ + struct prefix_rd prd; + struct prefix pfx_vn; + int rc; + uint32_t local_pref = rfp_cost_to_localpref (0); + + rc = rfapiRaddr2Qprefix (&(rfd->vn_addr), &pfx_vn); + assert (!rc); + + /* + * Construct route distinguisher = 0 + */ + memset (&prd, 0, sizeof (prd)); + prd.family = AF_UNSPEC; + prd.prefixlen = 64; + + add_vnc_route (rfd, /* rfapi descr, for export list & backref */ + bgp, /* which bgp instance */ + SAFI_ENCAP, /* which SAFI */ + &pfx_vn, /* prefix to advertise */ + &prd, /* route distinguisher to use */ + &rfd->un_addr, /* nexthop */ + &local_pref, + pLifetime, /* max lifetime of child VPN routes */ + NULL, /* no rfp options for ENCAP safi */ + NULL, /* rfp un options */ + NULL, /* rfp vn options */ + rfd->rt_export_list, + NULL, /* med */ + NULL, /* label: default */ + ZEBRA_ROUTE_BGP, + BGP_ROUTE_RFP, + 0); +} + + +/*********************************************************************** + * RFP processing behavior configuration + ***********************************************************************/ + +/*------------------------------------------ + * rfapi_rfp_set_configuration + * + * This is used to change rfapi's processing behavior based on + * RFP requirements. + * + * input: + * rfp_start_val value returned by rfp_start + * rfapi_rfp_cfg Pointer to configuration structure + * + * output: + * none + * + * return value: + * 0 Success + * ENXIO Unabled to locate configured BGP/VNC +--------------------------------------------*/ +int +rfapi_rfp_set_configuration (void *rfp_start_val, struct rfapi_rfp_cfg *new) +{ + struct rfapi_rfp_cfg *rcfg; + struct bgp *bgp; + + bgp = rfapi_bgp_lookup_by_rfp (rfp_start_val); + + if (!new || !bgp || !bgp->rfapi_cfg) + return ENXIO; + + rcfg = &bgp->rfapi_cfg->rfp_cfg; + rcfg->download_type = new->download_type; + rcfg->ftd_advertisement_interval = new->ftd_advertisement_interval; + rcfg->holddown_factor = new->holddown_factor; + + if (rcfg->use_updated_response != new->use_updated_response) + { + rcfg->use_updated_response = new->use_updated_response; + if (rcfg->use_updated_response) + rfapiMonitorCallbacksOn (bgp); + else + rfapiMonitorCallbacksOff (bgp); + } + if (rcfg->use_removes != new->use_removes) + { + rcfg->use_removes = new->use_removes; + if (rcfg->use_removes) + rfapiMonitorResponseRemovalOn (bgp); + else + rfapiMonitorResponseRemovalOff (bgp); + } + return 0; +} + +/*------------------------------------------ + * rfapi_rfp_set_cb_methods + * + * Change registered callback functions for asynchronous notifications + * from RFAPI to the RFP client. + * + * input: + * rfp_start_val value returned by rfp_start + * methods Pointer to struct rfapi_rfp_cb_methods containing + * pointers to callback methods as described above + * + * return value: + * 0 Success + * ENXIO BGP or VNC not configured + *------------------------------------------*/ +int +rfapi_rfp_set_cb_methods (void *rfp_start_val, + struct rfapi_rfp_cb_methods *methods) +{ + struct rfapi *h; + struct bgp *bgp; + + bgp = rfapi_bgp_lookup_by_rfp (rfp_start_val); + if (!bgp) + return ENXIO; + + h = bgp->rfapi; + if (!h) + return ENXIO; + + h->rfp_methods = *methods; + + return 0; +} + +/*********************************************************************** + * NVE Sessions + ***********************************************************************/ + +/* + * Caller must supply an already-allocated rfd with the "caller" + * fields already set (vn_addr, un_addr, callback, cookie) + * The advertised_prefixes[] array elements should be NULL to + * have this function set them to newly-allocated radix trees. + */ +static int +rfapi_open_inner ( + struct rfapi_descriptor *rfd, + struct bgp *bgp, + struct rfapi *h, + struct rfapi_nve_group_cfg *rfg) +{ + int ret; + + if (h->flags & RFAPI_INCALLBACK) + return EDEADLK; + + /* + * Fill in configured fields + */ + + /* + * If group's RD is specified as "auto", then fill in based + * on NVE's VN address + */ + rfd->rd = rfg->rd; + + if (rfd->rd.family == AF_UNIX) + { + ret = rfapi_set_autord_from_vn (&rfd->rd, &rfd->vn_addr); + if (ret != 0) + return ret; + } + rfd->rt_export_list = (rfg->rt_export_list) ? + ecommunity_dup (rfg->rt_export_list) : NULL; + rfd->response_lifetime = rfg->response_lifetime; + rfd->rfg = rfg; + + /* + * Fill in BGP peer structure + */ + rfd->peer = peer_new (bgp); + rfd->peer->status = Established; /* keep bgp core happy */ + bgp_sync_delete (rfd->peer); /* don't need these */ + if (rfd->peer->ibuf) + { + stream_free (rfd->peer->ibuf); /* don't need it */ + rfd->peer->ibuf = NULL; + } + if (rfd->peer->obuf) + { + stream_fifo_free (rfd->peer->obuf); /* don't need it */ + rfd->peer->obuf = NULL; + } + if (rfd->peer->work) + { + stream_free (rfd->peer->work); /* don't need it */ + rfd->peer->work = NULL; + } + { /* base code assumes have valid host pointer */ + char buf[BUFSIZ]; + buf[0] = 0; + + if (rfd->vn_addr.addr_family == AF_INET) + { + inet_ntop (AF_INET, &rfd->vn_addr.addr.v4, buf, BUFSIZ); + } + else if (rfd->vn_addr.addr_family == AF_INET6) + { + inet_ntop (AF_INET6, &rfd->vn_addr.addr.v6, buf, BUFSIZ); + } + rfd->peer->host = XSTRDUP (MTYPE_BGP_PEER_HOST, buf); + } + /* Mark peer as belonging to HD */ + SET_FLAG (rfd->peer->flags, PEER_FLAG_IS_RFAPI_HD); + + /* + * Set min prefix lifetime to max value so it will get set + * upon first rfapi_register() + */ + rfd->min_prefix_lifetime = UINT32_MAX; + + /* + * Allocate response tables if needed + */ +#define RFD_RTINIT_AFI(rh, ary, afi) do {\ + if (!ary[afi]) { \ + ary[afi] = route_table_init ();\ + ary[afi]->info = rh;\ + }\ +} while (0) + +#define RFD_RTINIT(rh, ary) do {\ + RFD_RTINIT_AFI(rh, ary, AFI_IP);\ + RFD_RTINIT_AFI(rh, ary, AFI_IP6);\ + RFD_RTINIT_AFI(rh, ary, AFI_ETHER);\ +} while(0) + + RFD_RTINIT(rfd, rfd->rib); + RFD_RTINIT(rfd, rfd->rib_pending); + RFD_RTINIT(rfd, rfd->rsp_times); + + /* + * Link to Import Table + */ + rfd->import_table = rfg->rfapi_import_table; + rfd->import_table->refcount += 1; + + rfapiApInit (&rfd->advertised); + + /* + * add this NVE descriptor to the list of NVEs in the NVE group + */ + if (!rfg->nves) + { + rfg->nves = list_new (); + } + listnode_add (rfg->nves, rfd); + + vnc_direct_bgp_add_nve (bgp, rfd); + vnc_zebra_add_nve (bgp, rfd); + + return 0; +} + +struct rfapi_vn_option * +rfapiVnOptionsDup (struct rfapi_vn_option *orig) +{ + struct rfapi_vn_option *head = NULL; + struct rfapi_vn_option *tail = NULL; + struct rfapi_vn_option *vo = NULL; + + for (vo = orig; vo; vo = vo->next) + { + struct rfapi_vn_option *new; + + new = XCALLOC (MTYPE_RFAPI_VN_OPTION, sizeof (struct rfapi_vn_option)); + memcpy (new, vo, sizeof (struct rfapi_vn_option)); + new->next = NULL; + + if (tail) + { + tail->next = new; + } + else + { + head = tail = new; + } + } + return head; +} + +struct rfapi_un_option * +rfapiUnOptionsDup (struct rfapi_un_option *orig) +{ + struct rfapi_un_option *head = NULL; + struct rfapi_un_option *tail = NULL; + struct rfapi_un_option *uo = NULL; + + for (uo = orig; uo; uo = uo->next) + { + struct rfapi_un_option *new; + + new = XCALLOC (MTYPE_RFAPI_UN_OPTION, sizeof (struct rfapi_un_option)); + memcpy (new, uo, sizeof (struct rfapi_un_option)); + new->next = NULL; + + if (tail) + { + tail->next = new; + } + else + { + head = tail = new; + } + } + return head; +} + +struct bgp_tea_options * +rfapiOptionsDup (struct bgp_tea_options *orig) +{ + struct bgp_tea_options *head = NULL; + struct bgp_tea_options *tail = NULL; + struct bgp_tea_options *hop = NULL; + + for (hop = orig; hop; hop = hop->next) + { + struct bgp_tea_options *new; + + new = XCALLOC (MTYPE_BGP_TEA_OPTIONS, sizeof (struct bgp_tea_options)); + memcpy (new, hop, sizeof (struct bgp_tea_options)); + new->next = NULL; + if (hop->value) + { + new->value = XCALLOC (MTYPE_BGP_TEA_OPTIONS_VALUE, hop->length); + memcpy (new->value, hop->value, hop->length); + } + if (tail) + { + tail->next = new; + } + else + { + head = tail = new; + } + } + return head; +} + +void +rfapiFreeBgpTeaOptionChain (struct bgp_tea_options *p) +{ + struct bgp_tea_options *next; + + while (p) + { + next = p->next; + + if (p->value) + { + XFREE (MTYPE_BGP_TEA_OPTIONS_VALUE, p->value); + p->value = NULL; + } + XFREE (MTYPE_BGP_TEA_OPTIONS, p); + + p = next; + } +} + +void +rfapiAdbFree (struct rfapi_adb *adb) +{ + XFREE (MTYPE_RFAPI_ADB, adb); +} + +static int +rfapi_query_inner ( + void *handle, + struct rfapi_ip_addr *target, + struct rfapi_l2address_option *l2o, /* may be NULL */ + struct rfapi_next_hop_entry **ppNextHopEntry) +{ + afi_t afi; + struct prefix p; + struct prefix p_original; + struct route_node *rn; + struct rfapi_descriptor *rfd = (struct rfapi_descriptor *) handle; + struct bgp *bgp = rfd->bgp; + struct rfapi_next_hop_entry *pNHE = NULL; + struct rfapi_ip_addr *self_vn_addr = NULL; + int eth_is_0 = 0; + int use_eth_resolution = 0; + struct rfapi_next_hop_entry *i_nhe; + + /* preemptive */ + if (!bgp) + { + zlog_debug ("%s: No BGP instance, returning ENXIO", __func__); + return ENXIO; + } + if (!bgp->rfapi) + { + zlog_debug ("%s: No RFAPI instance, returning ENXIO", __func__); + return ENXIO; + } + if (bgp->rfapi->flags & RFAPI_INCALLBACK) + { + zlog_debug ("%s: Called during calback, returning EDEADLK", __func__); + return EDEADLK; + } + + if (!is_valid_rfd (rfd)) + { + zlog_debug ("%s: invalid handle, returning EBADF", __func__); + return EBADF; + } + + rfd->rsp_counter++; /* dedup: identify this generation */ + rfd->rsp_time = rfapi_time (NULL); /* response content dedup */ + rfd->ftd_last_allowed_time = + bgp_clock() - bgp->rfapi_cfg->rfp_cfg.ftd_advertisement_interval; + + if (l2o) + { + if (!memcmp (l2o->macaddr.octet, rfapi_ethaddr0.octet, ETHER_ADDR_LEN)) + { + eth_is_0 = 1; + } + /* per t/c Paul/Lou 151022 */ + if (!eth_is_0 || l2o->logical_net_id) + { + use_eth_resolution = 1; + } + } + + if (ppNextHopEntry) + *ppNextHopEntry = NULL; + + /* + * Save original target in prefix form. In case of L2-based queries, + * p_original will be modified to reflect the L2 target + */ + assert(!rfapiRaddr2Qprefix (target, &p_original)); + + if (bgp->rfapi_cfg->rfp_cfg.download_type == RFAPI_RFP_DOWNLOAD_FULL) + { + /* convert query to 0/0 when full-table download is enabled */ + memset ((char *) &p, 0, sizeof (p)); + p.family = target->addr_family; + } + else + { + p = p_original; + } + + { + char buf[BUFSIZ]; + + prefix2str (&p, buf, BUFSIZ); + buf[BUFSIZ - 1] = 0; /* guarantee NUL-terminated */ + zlog_debug ("%s(rfd=%p, target=%s, ppNextHop=%p)", + __func__, rfd, buf, ppNextHopEntry); + } + + afi = family2afi (p.family); + assert (afi); + + if (CHECK_FLAG (bgp->rfapi_cfg->flags, BGP_VNC_CONFIG_FILTER_SELF_FROM_RSP)) + { + self_vn_addr = &rfd->vn_addr; + } + + if (use_eth_resolution) + { + uint32_t logical_net_id = l2o->logical_net_id; + struct ecommunity *l2com; + + /* + * fix up p_original to contain L2 address + */ + rfapiL2o2Qprefix (l2o, &p_original); + + l2com = + bgp_rfapi_get_ecommunity_by_lni_label (bgp, 1, logical_net_id, + l2o->label); + if (l2com) + { + uint8_t *v = l2com->val; + logical_net_id = (v[5] << 16) + (v[6] << 8) + (v[7]); + } + /* + * Ethernet/L2-based lookup + * + * Always returns IT node corresponding to route + */ + + if (RFAPI_RFP_DOWNLOAD_FULL == bgp->rfapi_cfg->rfp_cfg.download_type) + { + eth_is_0 = 1; + } + + rn = rfapiMonitorEthAdd (bgp, + rfd, + (eth_is_0 ? &rfapi_ethaddr0 : &l2o->macaddr), + logical_net_id); + + if (eth_is_0) + { + struct rfapi_ip_prefix rprefix; + + memset (&rprefix, 0, sizeof (rprefix)); + rprefix.prefix.addr_family = target->addr_family; + if (target->addr_family == AF_INET) + { + rprefix.length = 32; + } + else + { + rprefix.length = 128; + } + + pNHE = rfapiEthRouteTable2NextHopList (logical_net_id, &rprefix, + rfd->response_lifetime, self_vn_addr, rfd->rib[afi], &p_original); + goto done; + } + + } + else + { + + /* + * IP-based lookup + */ + + rn = rfapiMonitorAdd (bgp, rfd, &p); + + /* + * If target address is 0, this request is special: means to + * return ALL routes in the table + * + * Monitors for All-Routes queries get put on a special list, + * not in the VPN tree + */ + if (RFAPI_0_PREFIX (&p)) + { + + zlog_debug ("%s: 0-prefix", __func__); + + /* + * Generate nexthop list for caller + */ + pNHE = rfapiRouteTable2NextHopList ( + rfd->import_table->imported_vpn[afi], rfd->response_lifetime, + self_vn_addr, rfd->rib[afi], &p_original); + goto done; + } + + if (rn) + { + route_lock_node (rn); /* so we can unlock below */ + } + else + { + /* + * returns locked node. Don't unlock yet because the unlock + * might free it before we're done with it. This situation + * could occur when rfapiMonitorGetAttachNode() returns a + * newly-created default node. + */ + rn = rfapiMonitorGetAttachNode (rfd, &p); + } + } + + assert (rn); + if (!rn->info) + { + route_unlock_node (rn); + zlog_debug ("%s: VPN route not found, returning ENOENT", __func__); + return ENOENT; + } + + if (VNC_DEBUG(RFAPI_QUERY)) + { + rfapiShowImportTable (NULL, "query", rfd->import_table->imported_vpn[afi], + 1); + } + + if (use_eth_resolution) + { + + struct rfapi_ip_prefix rprefix; + + memset (&rprefix, 0, sizeof (rprefix)); + rprefix.prefix.addr_family = target->addr_family; + if (target->addr_family == AF_INET) + { + rprefix.length = 32; + } + else + { + rprefix.length = 128; + } + + pNHE = rfapiEthRouteNode2NextHopList (rn, &rprefix, + rfd->response_lifetime, self_vn_addr, rfd->rib[afi], &p_original); + + + } + else + { + /* + * Generate answer to query + */ + pNHE = rfapiRouteNode2NextHopList(rn, rfd->response_lifetime, + self_vn_addr, rfd->rib[afi], &p_original); + } + + route_unlock_node (rn); + +done: + if (ppNextHopEntry) + { + /* only count if caller gets it */ + ++bgp->rfapi->response_immediate_count; + } + + if (!pNHE) + { + zlog_debug ("%s: NO NHEs, returning ENOENT", __func__); + return ENOENT; + } + + /* + * count nexthops for statistics + */ + for (i_nhe = pNHE; i_nhe; i_nhe = i_nhe->next) + { + ++rfd->stat_count_nh_reachable; + } + + if (ppNextHopEntry) + { + *ppNextHopEntry = pNHE; + } + else + { + rfapi_free_next_hop_list (pNHE); + } + + zlog_debug ("%s: success", __func__); + return 0; +} + +/* + * support on-the-fly reassignment of an already-open nve to a new + * nve-group in the event that its original nve-group is + * administratively deleted. + */ +static int +rfapi_open_rfd (struct rfapi_descriptor *rfd, struct bgp *bgp) +{ + struct prefix pfx_vn; + struct prefix pfx_un; + struct rfapi_nve_group_cfg *rfg; + struct rfapi *h; + struct rfapi_cfg *hc; + int rc; + + h = bgp->rfapi; + if (!h) + return ENXIO; + + hc = bgp->rfapi_cfg; + if (!hc) + return ENXIO; + + rc = rfapiRaddr2Qprefix (&rfd->vn_addr, &pfx_vn); + assert (!rc); + + rc = rfapiRaddr2Qprefix (&rfd->un_addr, &pfx_un); + assert (!rc); + + /* + * Find the matching nve group config block + */ + rfg = bgp_rfapi_cfg_match_group (hc, &pfx_vn, &pfx_un); + if (!rfg) + { + return ENOENT; + } + + /* + * check nve group config block for required values + */ + if (!rfg->rt_export_list || !rfg->rfapi_import_table) + { + + return ENOMSG; + } + + rc = rfapi_open_inner (rfd, bgp, h, rfg); + if (rc) + { + return rc; + } + + /* + * re-advertise registered routes, this time as part of new NVE-group + */ + rfapiApReadvertiseAll (bgp, rfd); + + /* + * re-attach callbacks to import table + */ + if (!(bgp->rfapi_cfg->flags & BGP_VNC_CONFIG_CALLBACK_DISABLE)) + { + rfapiMonitorAttachImportHd (rfd); + } + + return 0; +} + +/*------------------------------------------ + * rfapi_open + * + * This function initializes a NVE record and associates it with + * the specified VN and underlay network addresses + * + * input: + * rfp_start_val value returned by rfp_start + * vn NVE virtual network address + * + * un NVE underlay network address + * + * default_options Default options to use on registrations. + * For now only tunnel type is supported. + * May be overridden per-prefix in rfapi_register(). + * Caller owns (rfapi_open() does not free) + * + * response_cb Pointer to next hop list update callback function or + * NULL when no callbacks are desired. + * + * userdata Passed to subsequent response_cb invocations. + * + * output: + * response_lifetime The length of time that responses sent to this + * NVE are valid. + * + * pHandle pointer to location to store rfapi handle. The + * handle must be passed on subsequent rfapi_ calls. + * + * + * return value: + * 0 Success + * EEXIST NVE with this {vn,un} already open + * ENOENT No matching nve group config + * ENOMSG Matched nve group config was incomplete + * ENXIO BGP or VNC not configured + * EAFNOSUPPORT Matched nve group specifies auto-assignment of RD, + * but underlay network address is not IPv4 + * EDEADLK Called from within a callback procedure + *------------------------------------------*/ +int +rfapi_open ( + void *rfp_start_val, + struct rfapi_ip_addr *vn, + struct rfapi_ip_addr *un, + struct rfapi_un_option *default_options, + uint32_t *response_lifetime, + void *userdata, /* callback cookie */ + rfapi_handle *pHandle) +{ + struct bgp *bgp; + struct rfapi *h; + struct rfapi_descriptor *rfd; + struct rfapi_cfg *hc; + struct rfapi_nve_group_cfg *rfg; + + struct prefix pfx_vn; + struct prefix pfx_un; + + struct route_node *rn; + int rc; + rfapi_handle hh = NULL; + int reusing_provisional = 0; + + afi_t afi_vn; + afi_t afi_un; + + { + char buf[2][INET_ADDRSTRLEN]; + zlog_debug ("%s: VN=%s UN=%s", __func__, + rfapiRfapiIpAddr2Str (vn, buf[0], INET_ADDRSTRLEN), + rfapiRfapiIpAddr2Str (un, buf[1], INET_ADDRSTRLEN)); + } + + assert (pHandle); + *pHandle = NULL; + + bgp = rfapi_bgp_lookup_by_rfp (rfp_start_val); + if (!bgp) + return ENXIO; + + h = bgp->rfapi; + if (!h) + return ENXIO; + + hc = bgp->rfapi_cfg; + if (!hc) + return ENXIO; + + if (h->flags & RFAPI_INCALLBACK) + return EDEADLK; + + rc = rfapiRaddr2Qprefix (vn, &pfx_vn); + assert (!rc); + + rc = rfapiRaddr2Qprefix (un, &pfx_un); + assert (!rc); + + /* + * already have a descriptor with VN and UN? + */ + if (!rfapi_find_handle (bgp, vn, un, &hh)) + { + /* + * we might have set up a handle for static routes before + * this NVE was opened. In that case, reuse the handle + */ + rfd = hh; + if (!CHECK_FLAG (rfd->flags, RFAPI_HD_FLAG_PROVISIONAL)) + { + return EEXIST; + } + + /* + * reuse provisional descriptor + * hh is not NULL + */ + reusing_provisional = 1; + } + + /* + * Find the matching nve group config block + */ + rfg = bgp_rfapi_cfg_match_group (hc, &pfx_vn, &pfx_un); + if (!rfg) + { + ++h->stat.count_unknown_nves; + { + char buf[2][INET_ADDRSTRLEN]; + zlog_notice ("%s: no matching group VN=%s UN=%s", __func__, + rfapiRfapiIpAddr2Str (vn, buf[0], INET_ADDRSTRLEN), + rfapiRfapiIpAddr2Str (un, buf[1], INET_ADDRSTRLEN)); + } + return ENOENT; + } + + /* + * check nve group config block for required values + */ + if (!rfg->rt_export_list || !rfg->rfapi_import_table) + { + + ++h->stat.count_unknown_nves; + return ENOMSG; + } + + /* + * If group config specifies auto-rd assignment, check that + * VN address is IPv4|v6 so we don't fail in rfapi_open_inner(). + * Check here so we don't need to unwind memory allocations, &c. + */ + if ((rfg->rd.family == AF_UNIX) && (vn->addr_family != AF_INET) + && (vn->addr_family != AF_INET6)) + { + return EAFNOSUPPORT; + } + + if (hh) + { + /* + * reusing provisional rfd + */ + rfd = hh; + } + else + { + rfd = XCALLOC (MTYPE_RFAPI_DESC, sizeof (struct rfapi_descriptor)); + } + assert (rfd); + + rfd->bgp = bgp; + if (default_options) + { + struct rfapi_un_option *p; + + for (p = default_options; p; p = p->next) + { + if ((RFAPI_UN_OPTION_TYPE_PROVISIONAL == p->type)) + { + rfd->flags |= RFAPI_HD_FLAG_PROVISIONAL; + } + if ((RFAPI_UN_OPTION_TYPE_TUNNELTYPE == p->type)) + { + rfd->default_tunneltype_option = p->v.tunnel; + } + } + } + + /* + * Fill in caller fields + */ + rfd->vn_addr = *vn; + rfd->un_addr = *un; + rfd->cookie = userdata; + + if (!reusing_provisional) + { + rfapi_time (&rfd->open_time); + + { + char buf_vn[BUFSIZ]; + char buf_un[BUFSIZ]; + + rfapiRfapiIpAddr2Str (vn, buf_vn, BUFSIZ); + rfapiRfapiIpAddr2Str (un, buf_un, BUFSIZ); + + zlog_debug ("%s: new HD with VN=%s UN=%s cookie=%p", + __func__, buf_vn, buf_un, userdata); + } + + listnode_add (&h->descriptors, rfd); + if (h->descriptors.count > h->stat.max_descriptors) + { + h->stat.max_descriptors = h->descriptors.count; + } + + /* + * attach to UN radix tree + */ + afi_vn = family2afi (rfd->vn_addr.addr_family); + afi_un = family2afi (rfd->un_addr.addr_family); + assert (afi_vn && afi_un); + assert (!rfapiRaddr2Qprefix (&rfd->un_addr, &pfx_un)); + + rn = route_node_get (&(h->un[afi_un]), &pfx_un); + assert (rn); + rfd->next = rn->info; + rn->info = rfd; + rfd->un_node = rn; + + rc = rfapi_open_inner (rfd, bgp, h, rfg); + /* + * This can fail only if the VN address is IPv6 and the group + * specified auto-assignment of RDs, which only works for v4, + * and the check above should catch it. + * + * Another failure possibility is that we were called + * during an rfapi callback. Also checked above. + */ + assert (!rc); + } + + if (response_lifetime) + *response_lifetime = rfd->response_lifetime; + *pHandle = rfd; + return 0; +} + +/* + * For use with debug functions + */ +static int +rfapi_set_response_cb (struct rfapi_descriptor *rfd, + rfapi_response_cb_t * response_cb) +{ + if (!is_valid_rfd (rfd)) + return EBADF; + rfd->response_cb = response_cb; + return 0; +} + +/* + * rfapi_close_inner + * + * Does almost all the work of rfapi_close, except: + * 1. preserves the descriptor (doesn't free it) + * 2. preserves the prefix query list (i.e., rfd->mon list) + * 3. preserves the advertised prefix list (rfd->advertised) + * 4. preserves the rib and rib_pending tables + * + * The purpose of organizing it this way is to support on-the-fly + * reassignment of an already-open nve to a new nve-group in the + * event that its original nve-group is administratively deleted. + */ +static int +rfapi_close_inner (struct rfapi_descriptor *rfd, struct bgp *bgp) +{ + int rc; + struct prefix pfx_vn; + struct prefix_rd prd; /* currently always 0 for VN->UN */ + + if (!is_valid_rfd (rfd)) + return EBADF; + + rc = rfapiRaddr2Qprefix (&rfd->vn_addr, &pfx_vn); + assert (!rc); /* should never have bad AF in stored vn address */ + + /* + * update exported routes to reflect disappearance of this NVE as nexthop + */ + vnc_direct_bgp_del_nve (bgp, rfd); + vnc_zebra_del_nve (bgp, rfd); + + /* + * unlink this HD's monitors from import table + */ + rfapiMonitorDetachImportHd (rfd); + + /* + * Unlink from Import Table + * NB rfd->import_table will be NULL if we are closing a stale descriptor + */ + if (rfd->import_table) + rfapiImportTableRefDelByIt (bgp, rfd->import_table); + rfd->import_table = NULL; + + /* + * Construct route distinguisher + */ + memset (&prd, 0, sizeof (prd)); + prd = rfd->rd; + prd.family = AF_UNSPEC; + prd.prefixlen = 64; + + /* + * withdraw tunnel + */ + del_vnc_route ( + rfd, + rfd->peer, + bgp, + SAFI_ENCAP, + &pfx_vn, /* prefix being advertised */ + &prd, /* route distinguisher to use (0 for ENCAP) */ + ZEBRA_ROUTE_BGP, + BGP_ROUTE_RFP, + NULL, + 0); /* no kill */ + + /* + * Construct route distinguisher for VPN routes + */ + prd = rfd->rd; + prd.family = AF_UNSPEC; + prd.prefixlen = 64; + + /* + * find all VPN routes associated with this rfd and delete them, too + */ + rfapiApWithdrawAll (bgp, rfd); + + /* + * remove this nve descriptor from the list of nves + * associated with the nve group + */ + if (rfd->rfg) + { + listnode_delete (rfd->rfg->nves, rfd); + rfd->rfg = NULL; /* XXX mark as orphaned/stale */ + } + + if (rfd->rt_export_list) + ecommunity_free (&rfd->rt_export_list); + rfd->rt_export_list = NULL; + + /* + * free peer structure (possibly delayed until its + * refcount reaches zero) + */ + if (rfd->peer) + { + zlog_debug ("%s: calling peer_delete(%p), #%d", + __func__, rfd->peer, rfd->peer->lock); + peer_delete (rfd->peer); + } + rfd->peer = NULL; + + return 0; +} + +int +rfapi_close (void *handle) +{ + struct rfapi_descriptor *rfd = (struct rfapi_descriptor *) handle; + int rc; + struct route_node *node; + struct bgp *bgp; + struct rfapi *h; + + zlog_debug ("%s: rfd=%p", __func__, rfd); + +#if RFAPI_WHO_IS_CALLING_ME +#ifdef HAVE_GLIBC_BACKTRACE +#define RFAPI_DEBUG_BACKTRACE_NENTRIES 5 + { + void *buf[RFAPI_DEBUG_BACKTRACE_NENTRIES]; + char **syms; + int i; + size_t size; + + size = backtrace (buf, RFAPI_DEBUG_BACKTRACE_NENTRIES); + syms = backtrace_symbols (buf, size); + for (i = 0; i < size && i < RFAPI_DEBUG_BACKTRACE_NENTRIES; ++i) + { + zlog_debug ("backtrace[%2d]: %s", i, syms[i]); + } + free (syms); + } +#endif +#endif + + bgp = rfd->bgp; + if (!bgp) + return ENXIO; + + h = bgp->rfapi; + if (!h) + return ENXIO; + + if (!is_valid_rfd (rfd)) + return EBADF; + + if (h->flags & RFAPI_INCALLBACK) + { + /* + * Queue these close requests for processing after callback + * is finished + */ + if (!CHECK_FLAG (rfd->flags, RFAPI_HD_FLAG_CLOSING_ADMINISTRATIVELY)) + { + work_queue_add (h->deferred_close_q, handle); + zlog_debug ("%s: added handle %p to deferred close queue", + __func__, handle); + } + return 0; + } + + if (CHECK_FLAG (rfd->flags, RFAPI_HD_FLAG_CLOSING_ADMINISTRATIVELY)) + { + + zlog_debug ("%s administrative close rfd=%p", __func__, rfd); + + if (h && h->rfp_methods.close_cb) + { + zlog_debug ("%s calling close callback rfd=%p", __func__, rfd); + + /* + * call the callback fairly early so that it can still lookup un/vn + * from handle, etc. + * + * NB RFAPI_INCALLBACK is tested above, so if we reach this point + * we are not already in the context of a callback. + */ + h->flags |= RFAPI_INCALLBACK; + (*h->rfp_methods.close_cb) (handle, EIDRM); + h->flags &= ~RFAPI_INCALLBACK; + } + } + + if (rfd->rfg) + { + /* + * Orphaned descriptors have already done this part, so do + * only for non-orphaned descriptors. + */ + if ((rc = rfapi_close_inner (rfd, bgp))) + return rc; + } + + /* + * Remove descriptor from UN index + * (remove from chain at node) + */ + rc = rfapi_find_node (bgp, &rfd->vn_addr, &rfd->un_addr, &node); + if (!rc) + { + struct rfapi_descriptor *hh; + + if (node->info == rfd) + { + node->info = rfd->next; + } + else + { + + for (hh = node->info; hh; hh = hh->next) + { + if (hh->next == rfd) + { + hh->next = rfd->next; + break; + } + } + } + route_unlock_node (node); + } + + /* + * remove from descriptor list + */ + listnode_delete (&h->descriptors, rfd); + + /* + * Delete monitor list items and free monitor structures + */ + (void) rfapiMonitorDelHd (rfd); + + /* + * release advertised prefix data + */ + rfapiApRelease (&rfd->advertised); + + /* + * Release RFP callback RIB + */ + rfapiRibFree (rfd); + + /* + * free descriptor + */ + memset (rfd, 0, sizeof (struct rfapi_descriptor)); + XFREE (MTYPE_RFAPI_DESC, rfd); + + return 0; +} + +/* + * Reopen a nve descriptor. If the descriptor's NVE-group + * does not exist (e.g., if it has been administratively removed), + * reassignment to a new NVE-group is attempted. + * + * If NVE-group reassignment fails, the descriptor becomes "stale" + * (rfd->rfg == NULL implies "stale:). The only permissible API operation + * on a stale descriptor is rfapi_close(). Any other rfapi_* API operation + * on the descriptor will return ESTALE. + * + * Reopening a descriptor is a potentially expensive operation, because + * it involves withdrawing any routes advertised by the NVE, withdrawing + * the NVE's route queries, and then re-adding them all after a new + * NVE-group is assigned. There are also possible route-export affects + * caused by deleting and then adding the NVE: advertised prefixes + * and nexthop lists for exported routes can turn over. + */ +int +rfapi_reopen (struct rfapi_descriptor *rfd, struct bgp *bgp) +{ + struct rfapi *h; + int rc; + + if ((rc = rfapi_close_inner (rfd, bgp))) + { + return rc; + } + if ((rc = rfapi_open_rfd (rfd, bgp))) + { + + h = bgp->rfapi; + + assert (!CHECK_FLAG (h->flags, RFAPI_INCALLBACK)); + + if (CHECK_FLAG (rfd->flags, RFAPI_HD_FLAG_CLOSING_ADMINISTRATIVELY) && + h && h->rfp_methods.close_cb) + { + + /* + * NB RFAPI_INCALLBACK is tested above, so if we reach this point + * we are not already in the context of a callback. + */ + h->flags |= RFAPI_INCALLBACK; + (*h->rfp_methods.close_cb) ((rfapi_handle) rfd, ESTALE); + h->flags &= ~RFAPI_INCALLBACK; + } + return rc; + } + return 0; +} + +/*********************************************************************** + * NVE Routes + ***********************************************************************/ +/* + * Announce reachability to this prefix via the NVE + */ +int +rfapi_register ( + void *handle, + struct rfapi_ip_prefix *prefix, + uint32_t lifetime, /* host byte order */ + struct rfapi_un_option *options_un, + struct rfapi_vn_option *options_vn, + rfapi_register_action action) +{ + struct rfapi_descriptor *rfd = (struct rfapi_descriptor *) handle; + struct bgp *bgp; + struct prefix p; + struct prefix *pfx_ip = NULL; + struct prefix_rd prd; + int afi; + struct prefix pfx_mac_buf; + struct prefix *pfx_mac = NULL; + struct prefix pfx_vn_buf; + const char *action_str = NULL; + uint32_t *label = NULL; + struct rfapi_vn_option *vo; + struct rfapi_l2address_option *l2o = NULL; + struct prefix_rd *prd_override = NULL; + + switch (action) + { + case RFAPI_REGISTER_ADD: + action_str = "add"; + break; + case RFAPI_REGISTER_WITHDRAW: + action_str = "withdraw"; + break; + case RFAPI_REGISTER_KILL: + action_str = "kill"; + break; + default: + assert (0); + break; + } + + /* + * Inspect VN options + */ + for (vo = options_vn; vo; vo = vo->next) + { + if (RFAPI_VN_OPTION_TYPE_L2ADDR == vo->type) + { + l2o = &vo->v.l2addr; + } + if (RFAPI_VN_OPTION_TYPE_INTERNAL_RD == vo->type) + { + prd_override = &vo->v.internal_rd; + } + } + + /********************************************************************* + * advertise prefix + *********************************************************************/ + + /* + * set

based on + */ + assert (!rfapiRprefix2Qprefix (prefix, &p)); + + afi = family2afi (prefix->prefix.addr_family); + assert (afi); + + + { + char buf[BUFSIZ]; + + prefix2str (&p, buf, BUFSIZ); + buf[BUFSIZ - 1] = 0; /* guarantee NUL-terminated */ + zlog_debug + ("%s(rfd=%p, pfx=%s, lifetime=%d, opts_un=%p, opts_vn=%p, action=%s)", + __func__, rfd, buf, lifetime, options_un, options_vn, action_str); + } + + /* + * These tests come after the prefix conversion so that we can + * print the prefix in a debug message before failing + */ + + bgp = rfd->bgp; + if (!bgp) + { + zlog_debug ("%s: no BGP instance: returning ENXIO", __func__); + return ENXIO; + } + if (!bgp->rfapi) + { + zlog_debug ("%s: no RFAPI instance: returning ENXIO", __func__); + return ENXIO; + } + if (!rfd->rfg) + { + if (RFAPI_REGISTER_ADD == action) + { + ++bgp->rfapi->stat.count_registrations_failed; + } + zlog_debug ("%s: rfd=%p, no RF GRP instance: returning ESTALE", + __func__, rfd); + return ESTALE; + } + + if (bgp->rfapi->flags & RFAPI_INCALLBACK) + { + if (RFAPI_REGISTER_ADD == action) + { + ++bgp->rfapi->stat.count_registrations_failed; + } + zlog_debug ("%s: in callback: returning EDEADLK", __func__); + return EDEADLK; + } + + if (!is_valid_rfd (rfd)) + { + if (RFAPI_REGISTER_ADD == action) + { + ++bgp->rfapi->stat.count_registrations_failed; + } + zlog_debug ("%s: invalid handle: returning EBADF", __func__); + return EBADF; + } + + /* + * Is there a MAC address in this registration? + */ + if (l2o && !RFAPI_0_ETHERADDR (&l2o->macaddr)) + { + rfapiL2o2Qprefix (l2o, &pfx_mac_buf); + pfx_mac = &pfx_mac_buf; + } + + /* + * Is there an IP prefix in this registration? + */ + if (!(RFAPI_0_PREFIX (&p) && RFAPI_HOST_PREFIX (&p))) + { + pfx_ip = &p; + } + else + { + if (!pfx_mac) + { + zlog_debug ("%s: missing mac addr that is required for host 0 pfx", + __func__); + if (RFAPI_REGISTER_ADD == action) + { + ++bgp->rfapi->stat.count_registrations_failed; + } + return EINVAL; + } + if (rfapiRaddr2Qprefix (&rfd->vn_addr, &pfx_vn_buf)) + { + zlog_debug ("%s: handle has bad vn_addr: returning EBADF", + __func__); + if (RFAPI_REGISTER_ADD == action) + { + ++bgp->rfapi->stat.count_registrations_failed; + } + return EBADF; + } + } + + if (RFAPI_REGISTER_ADD == action) + { + ++bgp->rfapi->stat.count_registrations; + } + + /* + * Figure out if this registration is missing an IP address + * + * MAC-addr based: + * + * In RFAPI, we use prefixes in family AF_LINK to store + * the MAC addresses. These prefixes are used for the + * list of advertised prefixes and in the RFAPI import + * tables. + * + * In BGP proper, we use the prefix matching the NVE's + * VN address with a host prefix-length (i.e., 32 or 128). + * + */ + if (l2o && l2o->logical_net_id && RFAPI_0_PREFIX (&p) && + RFAPI_HOST_PREFIX (&p)) + { + + rfapiL2o2Qprefix (l2o, &pfx_mac_buf); + pfx_mac = &pfx_mac_buf; + } + + /* + * Construct route distinguisher + */ + if (prd_override) + { + prd = *prd_override; + } + else + { + memset (&prd, 0, sizeof (prd)); + if (pfx_mac) + { + prd.family = AF_UNSPEC; + prd.prefixlen = 64; + encode_rd_type(RD_TYPE_VNC_ETH, prd.val); + if (l2o->local_nve_id || !(rfd->rfg->flags & RFAPI_RFG_L2RD)) + { + /* + * If Local NVE ID is specified in message, use it. + * (if no local default configured, also use it even if 0) + */ + prd.val[1] = l2o->local_nve_id; + } + else + { + if (rfd->rfg->l2rd) + { + /* + * locally-configured literal value + */ + prd.val[1] = rfd->rfg->l2rd; + } + else + { + /* + * 0 means auto:vn, which means use LSB of VN addr + */ + if (rfd->vn_addr.addr_family == AF_INET) + { + prd.val[1] = + *(((char *) &rfd->vn_addr.addr.v4.s_addr) + 3); + } + else + { + prd.val[1] = + *(((char *) &rfd->vn_addr.addr.v6.s6_addr) + 15); + } + } + } + memcpy (prd.val + 2, pfx_mac->u.prefix_eth.octet, 6); + } + else + { + prd = rfd->rd; + prd.family = AF_UNSPEC; + prd.prefixlen = 64; + } + } + + + if (action == RFAPI_REGISTER_WITHDRAW || action == RFAPI_REGISTER_KILL) + { + + int adv_tunnel = 0; + + /* + * withdraw previous advertisement + */ + del_vnc_route ( + rfd, + rfd->peer, + bgp, + SAFI_MPLS_VPN, + pfx_ip ? pfx_ip : &pfx_vn_buf, /* prefix being advertised */ + &prd, /* route distinguisher (0 for ENCAP) */ + ZEBRA_ROUTE_BGP, + BGP_ROUTE_RFP, + NULL, + action == RFAPI_REGISTER_KILL); + + if (0 == rfapiApDelete (bgp, rfd, &p, pfx_mac, &adv_tunnel)) + { + if (adv_tunnel) + rfapiTunnelRouteAnnounce (bgp, rfd, &rfd->max_prefix_lifetime); + } + + } + else + { + + int adv_tunnel = 0; + uint32_t local_pref; + struct ecommunity *rtlist = NULL; + struct ecommunity_val ecom_value; + + if (!rfapiApCount (rfd)) + { + /* + * make sure we advertise tunnel route upon adding the + * first VPN route + */ + adv_tunnel = 1; + } + + if (rfapiApAdd (bgp, rfd, &p, pfx_mac, &prd, lifetime, prefix->cost, + l2o)) + { + adv_tunnel = 1; + } + + zlog_debug ("%s: adv_tunnel = %d", __func__, adv_tunnel); + if (adv_tunnel) + { + zlog_debug ("%s: announcing tunnel route", __func__); + rfapiTunnelRouteAnnounce (bgp, rfd, &rfd->max_prefix_lifetime); + } + + zlog_debug ("%s: calling add_vnc_route", __func__); + + local_pref = rfp_cost_to_localpref (prefix->cost); + + if (l2o && l2o->label) + label = &l2o->label; + + if (pfx_mac) + { + struct ecommunity *l2com = NULL; + + if (label) + { + l2com = bgp_rfapi_get_ecommunity_by_lni_label (bgp, 1, + l2o->logical_net_id, + *label); + } + if (l2com) + { + rtlist = ecommunity_dup (l2com); + } + else + { + /* + * If mac address is set, add an RT based on the registered LNI + */ + memset ((char *) &ecom_value, 0, sizeof (ecom_value)); + ecom_value.val[1] = 0x02; + ecom_value.val[5] = (l2o->logical_net_id >> 16) & 0xff; + ecom_value.val[6] = (l2o->logical_net_id >> 8) & 0xff; + ecom_value.val[7] = (l2o->logical_net_id >> 0) & 0xff; + rtlist = ecommunity_new(); + ecommunity_add_val (rtlist, &ecom_value); + } + } + + /* + * advertise prefix via tunnel endpoint + */ + add_vnc_route ( + rfd, /* rfapi descr, for export list & backref */ + bgp, /* which bgp instance */ + SAFI_MPLS_VPN, /* which SAFI */ + (pfx_ip ? pfx_ip : &pfx_vn_buf), /* prefix being advertised */ + &prd, /* route distinguisher to use (0 for ENCAP) */ + &rfd->vn_addr, /* nexthop */ + &local_pref, + &lifetime, /* prefix lifetime -> Tunnel Encap attr */ + NULL, + options_un, /* rfapi un options */ + options_vn, /* rfapi vn options */ + (rtlist ? rtlist : rfd->rt_export_list), + NULL, /* med */ + label, /* label: default */ + ZEBRA_ROUTE_BGP, + BGP_ROUTE_RFP, + 0); + + if (rtlist) + ecommunity_free (&rtlist); /* sets rtlist = NULL */ + } + + zlog_debug ("%s: success", __func__); + return 0; +} + +int +rfapi_query ( + void *handle, + struct rfapi_ip_addr *target, + struct rfapi_l2address_option *l2o, /* may be NULL */ + struct rfapi_next_hop_entry **ppNextHopEntry) +{ + struct rfapi_descriptor *rfd = (struct rfapi_descriptor *) handle; + struct bgp *bgp = rfd->bgp; + int rc; + + assert (ppNextHopEntry); + *ppNextHopEntry = NULL; + + if (bgp && bgp->rfapi) + { + bgp->rfapi->stat.count_queries++; + } + + if (!rfd->rfg) + { + if (bgp && bgp->rfapi) + ++bgp->rfapi->stat.count_queries_failed; + return ESTALE; + } + + if ((rc = rfapi_query_inner (handle, target, l2o, ppNextHopEntry))) + { + if (bgp && bgp->rfapi) + ++bgp->rfapi->stat.count_queries_failed; + } + return rc; +} + +int +rfapi_query_done (rfapi_handle handle, struct rfapi_ip_addr *target) +{ + struct prefix p; + int rc; + struct rfapi_descriptor *rfd = (struct rfapi_descriptor *) handle; + struct bgp *bgp = rfd->bgp; + + if (!rfd->rfg) + return ESTALE; + + assert (target); + rc = rfapiRaddr2Qprefix (target, &p); + assert (!rc); + + if (!is_valid_rfd (rfd)) + return EBADF; + + /* preemptive */ + if (!bgp || !bgp->rfapi) + return ENXIO; + + if (bgp->rfapi->flags & RFAPI_INCALLBACK) + return EDEADLK; + + rfapiMonitorDel (bgp, rfd, &p); + + return 0; +} + +int +rfapi_query_done_all (rfapi_handle handle, int *count) +{ + struct rfapi_descriptor *rfd = (struct rfapi_descriptor *) handle; + struct bgp *bgp = rfd->bgp;; + int num; + + if (!rfd->rfg) + return ESTALE; + + if (!is_valid_rfd (rfd)) + return EBADF; + + /* preemptive */ + if (!bgp || !bgp->rfapi) + return ENXIO; + + if (bgp->rfapi->flags & RFAPI_INCALLBACK) + return EDEADLK; + + num = rfapiMonitorDelHd (rfd); + + if (count) + *count = num; + + return 0; +} + +void +rfapi_free_next_hop_list (struct rfapi_next_hop_entry *list) +{ + struct rfapi_next_hop_entry *nh; + struct rfapi_next_hop_entry *next; + + for (nh = list; nh; nh = next) + { + next = nh->next; + rfapi_un_options_free (nh->un_options); + nh->un_options = NULL; + rfapi_vn_options_free (nh->vn_options); + nh->vn_options = NULL; + XFREE (MTYPE_RFAPI_NEXTHOP, nh); + } +} + +/* + * NULL handle => return total count across all nves + */ +uint32_t +rfapi_monitor_count (void *handle) +{ + struct bgp *bgp = bgp_get_default (); + uint32_t count; + + if (handle) + { + struct rfapi_descriptor *rfd = (struct rfapi_descriptor *) handle; + count = rfd->monitor_count; + } + else + { + + if (!bgp || !bgp->rfapi) + return 0; + + count = bgp->rfapi->monitor_count; + } + + return count; +} + +/*********************************************************************** + * CLI/CONFIG + ***********************************************************************/ + +DEFUN (debug_rfapi_show_nves, + debug_rfapi_show_nves_cmd, + "debug rfapi-dev show nves", + DEBUG_STR + DEBUG_RFAPI_STR + SHOW_STR + "NVE Information\n") +{ + rfapiPrintMatchingDescriptors (vty, NULL, NULL); + return CMD_SUCCESS; +} + +DEFUN ( + debug_rfapi_show_nves_vn_un, + debug_rfapi_show_nves_vn_un_cmd, + "debug rfapi-dev show nves (vn|un) (A.B.C.D|X:X::X:X)", /* prefix also ok */ + DEBUG_STR + DEBUG_RFAPI_STR + SHOW_STR + "NVE Information\n" + "Specify virtual network or underlay network interface\n" + "IPv4 or IPv6 address\n") +{ + struct prefix pfx; + + if (!str2prefix (argv[1], &pfx)) + { + vty_out (vty, "Malformed address \"%s\"%s", argv[1], VTY_NEWLINE); + return CMD_WARNING; + } + if (pfx.family != AF_INET && pfx.family != AF_INET6) + { + vty_out (vty, "Invalid address \"%s\"%s", argv[1], VTY_NEWLINE); + return CMD_WARNING; + } + + if (*(argv[0]) == 'c') + { + rfapiPrintMatchingDescriptors (vty, NULL, &pfx); + } + else + { + rfapiPrintMatchingDescriptors (vty, &pfx, NULL); + } + return CMD_SUCCESS; +} + +/* + * Note: this function does not flush vty output, so if it is called + * with a stream pointing to a vty, the user will have to type something + * before the callback output shows up + */ +static void +test_nexthops_callback ( +// struct rfapi_ip_addr *target, + struct rfapi_next_hop_entry *next_hops, + void *userdata) +{ + void *stream = userdata; + + int (*fp) (void *, const char *, ...); + struct vty *vty; + void *out; + const char *vty_newline; + + if (rfapiStream2Vty (stream, &fp, &vty, &out, &vty_newline) == 0) + return; + + fp (out, "Nexthops Callback, Target=("); + //rfapiPrintRfapiIpAddr(stream, target); + fp (out, ")%s", VTY_NEWLINE); + + rfapiPrintNhl (stream, next_hops); + + rfapi_free_next_hop_list (next_hops); +} + +DEFUN (debug_rfapi_open, + debug_rfapi_open_cmd, + "debug rfapi-dev open vn (A.B.C.D|X:X::X:X) un (A.B.C.D|X:X::X:X)", + DEBUG_STR + DEBUG_RFAPI_STR + "rfapi_open\n" + "indicate vn addr follows\n" + "virtual network interface address\n" + "indicate xt addr follows\n" "underlay network interface address\n") +{ + struct rfapi_ip_addr vn; + struct rfapi_ip_addr un; + uint32_t lifetime; + int rc; + rfapi_handle handle; + + /* + * Get VN addr + */ + if ((rc = rfapiCliGetRfapiIpAddr (vty, argv[0], &vn))) + return rc; + + /* + * Get UN addr + */ + if ((rc = rfapiCliGetRfapiIpAddr (vty, argv[1], &un))) + return rc; + + rc = rfapi_open (rfapi_get_rfp_start_val_by_bgp (bgp_get_default ()), + &vn, &un, /*&uo */ NULL, &lifetime, NULL, &handle); + + vty_out (vty, "rfapi_open: status %d, handle %p, lifetime %d%s", + rc, handle, lifetime, VTY_NEWLINE); + + rc = rfapi_set_response_cb (handle, test_nexthops_callback); + + vty_out (vty, "rfapi_set_response_cb: status %d%s", rc, VTY_NEWLINE); + + return CMD_SUCCESS; +} + + +DEFUN (debug_rfapi_close_vn_un, + debug_rfapi_close_vn_un_cmd, + "debug rfapi-dev close vn (A.B.C.D|X:X::X:X) un (A.B.C.D|X:X::X:X)", + DEBUG_STR + DEBUG_RFAPI_STR + "rfapi_close\n" + "indicate vn addr follows\n" + "virtual network interface address\n" + "indicate xt addr follows\n" "underlay network interface address\n") +{ + struct rfapi_ip_addr vn; + struct rfapi_ip_addr un; + rfapi_handle handle; + int rc; + + /* + * Get VN addr + */ + if ((rc = rfapiCliGetRfapiIpAddr (vty, argv[0], &vn))) + return rc; + + + /* + * Get UN addr + */ + if ((rc = rfapiCliGetRfapiIpAddr (vty, argv[1], &un))) + return rc; + + + if (rfapi_find_handle_vty (vty, &vn, &un, &handle)) + { + vty_out (vty, "can't locate handle matching vn=%s, un=%s%s", + argv[0], argv[1], VTY_NEWLINE); + return CMD_WARNING; + } + + rc = rfapi_close (handle); + + vty_out (vty, "rfapi_close(handle=%p): status %d%s", handle, rc, + VTY_NEWLINE); + + return CMD_SUCCESS; +} + +DEFUN (debug_rfapi_close_rfd, + debug_rfapi_close_rfd_cmd, + "debug rfapi-dev close rfd HANDLE", + DEBUG_STR + DEBUG_RFAPI_STR + "rfapi_close\n" + "indicate handle follows\n" "rfapi handle in hexadecimal\n") +{ + rfapi_handle handle; + int rc; + char *endptr = NULL; + + handle = (rfapi_handle) (uintptr_t) (strtoull (argv[0], &endptr, 16)); + + if (*endptr != '\0' || (uintptr_t) handle == UINTPTR_MAX) + { + vty_out (vty, "Invalid value: %s%s", argv[0], VTY_NEWLINE); + return CMD_WARNING; + } + + rc = rfapi_close (handle); + + vty_out (vty, "rfapi_close(handle=%p): status %d%s", handle, rc, + VTY_NEWLINE); + + return CMD_SUCCESS; +} + +DEFUN (debug_rfapi_register_vn_un, + debug_rfapi_register_vn_un_cmd, + "debug rfapi-dev register vn (A.B.C.D|X:X::X:X) un (A.B.C.D|X:X::X:X) prefix (A.B.C.D/M|X:X::X:X/M) lifetime SECONDS", + DEBUG_STR + DEBUG_RFAPI_STR + "rfapi_register\n" + "indicate vn addr follows\n" + "virtual network IPv4 interface address\n" + "virtual network IPv6 interface address\n" + "indicate un addr follows\n" + "underlay network IPv4 interface address\n" + "underlay network IPv6 interface address\n" + "indicate prefix follows\n" + "IPv4 prefix\n" + "IPv6 prefix\n" "indicate lifetime follows\n" "lifetime\n") +{ + struct rfapi_ip_addr vn; + struct rfapi_ip_addr un; + rfapi_handle handle; + struct prefix pfx; + uint32_t lifetime; + struct rfapi_ip_prefix hpfx; + int rc; + + /* + * Get VN addr + */ + if ((rc = rfapiCliGetRfapiIpAddr (vty, argv[0], &vn))) + return rc; + + + /* + * Get UN addr + */ + if ((rc = rfapiCliGetRfapiIpAddr (vty, argv[1], &un))) + return rc; + + + if (rfapi_find_handle_vty (vty, &vn, &un, &handle)) + { + vty_out (vty, "can't locate handle matching vn=%s, un=%s%s", + argv[0], argv[1], VTY_NEWLINE); + return CMD_WARNING; + } + + /* + * Get prefix to advertise + */ + if (!str2prefix (argv[2], &pfx)) + { + vty_out (vty, "Malformed prefix \"%s\"%s", argv[2], VTY_NEWLINE); + return CMD_WARNING; + } + if (pfx.family != AF_INET && pfx.family != AF_INET6) + { + vty_out (vty, "Bad family for prefix \"%s\"%s", argv[2], VTY_NEWLINE); + return CMD_WARNING; + } + rfapiQprefix2Rprefix (&pfx, &hpfx); + + if (!strcmp (argv[3], "infinite")) + { + lifetime = RFAPI_INFINITE_LIFETIME; + } + else + { + VTY_GET_INTEGER ("Lifetime", lifetime, argv[3]); + } + + + rc = rfapi_register (handle, &hpfx, lifetime, NULL, NULL, 0); + if (rc) + { + vty_out (vty, "rfapi_register failed with rc=%d (%s)%s", rc, + strerror (rc), VTY_NEWLINE); + } + + return CMD_SUCCESS; +} + +DEFUN (debug_rfapi_register_vn_un_l2o, + debug_rfapi_register_vn_un_l2o_cmd, + "debug rfapi-dev register" + " vn (A.B.C.D|X:X::X:X)" + " un (A.B.C.D|X:X::X:X)" + " prefix (A.B.C.D/M|X:X::X:X/M)" + " lifetime SECONDS" + " macaddr YY:YY:YY:YY:YY:YY" + " lni <0-16777215>", + DEBUG_STR + DEBUG_RFAPI_STR + "rfapi_register\n" + "indicate vn addr follows\n" + "virtual network IPv4 interface address\n" + "virtual network IPv6 interface address\n" + "indicate un addr follows\n" + "underlay network IPv4 interface address\n" + "underlay network IPv6 interface address\n" + "indicate prefix follows\n" + "IPv4 prefix\n" + "IPv6 prefix\n" "indicate lifetime follows\n" "lifetime\n") +{ + struct rfapi_ip_addr vn; + struct rfapi_ip_addr un; + rfapi_handle handle; + struct prefix pfx; + uint32_t lifetime; + struct rfapi_ip_prefix hpfx; + int rc; + struct rfapi_vn_option optary[10]; /* XXX must be big enough */ + struct rfapi_vn_option *opt = NULL; + int opt_next = 0; + + /* + * Get VN addr + */ + if ((rc = rfapiCliGetRfapiIpAddr (vty, argv[0], &vn))) + return rc; + + + /* + * Get UN addr + */ + if ((rc = rfapiCliGetRfapiIpAddr (vty, argv[1], &un))) + return rc; + + + if (rfapi_find_handle_vty (vty, &vn, &un, &handle)) + { + vty_out (vty, "can't locate handle matching vn=%s, un=%s%s", + argv[0], argv[1], VTY_NEWLINE); + return CMD_WARNING; + } + + /* + * Get prefix to advertise + */ + if (!str2prefix (argv[2], &pfx)) + { + vty_out (vty, "Malformed prefix \"%s\"%s", argv[2], VTY_NEWLINE); + return CMD_WARNING; + } + if (pfx.family != AF_INET && pfx.family != AF_INET6) + { + vty_out (vty, "Bad family for prefix \"%s\"%s", argv[2], VTY_NEWLINE); + return CMD_WARNING; + } + rfapiQprefix2Rprefix (&pfx, &hpfx); + + if (!strcmp (argv[3], "infinite")) + { + lifetime = RFAPI_INFINITE_LIFETIME; + } + else + { + VTY_GET_INTEGER ("Lifetime", lifetime, argv[3]); + } + + /* L2 option parsing START */ + memset (optary, 0, sizeof (optary)); + VTY_GET_INTEGER ("Logical Network ID", + optary[opt_next].v.l2addr.logical_net_id, argv[5]); + if ((rc = rfapiStr2EthAddr (argv[4], &optary[opt_next].v.l2addr.macaddr))) + { + vty_out (vty, "Bad mac address \"%s\"%s", argv[4], VTY_NEWLINE); + return CMD_WARNING; + } + optary[opt_next].type = RFAPI_VN_OPTION_TYPE_L2ADDR; + if (opt_next) + { + optary[opt_next - 1].next = optary + opt_next; + } + else + { + opt = optary; + } + ++opt_next; + /* L2 option parsing END */ + + /* TBD fixme */ + rc = rfapi_register (handle, &hpfx, lifetime, NULL /* &uo */ , opt, 0); + if (rc) + { + vty_out (vty, "rfapi_register failed with rc=%d (%s)%s", rc, + strerror (rc), VTY_NEWLINE); + } + + return CMD_SUCCESS; +} + + +DEFUN (debug_rfapi_unregister_vn_un, + debug_rfapi_unregister_vn_un_cmd, + "debug rfapi-dev unregister vn (A.B.C.D|X:X::X:X) un (A.B.C.D|X:X::X:X) prefix (A.B.C.D/M|X:X::X:X/M)", + DEBUG_STR + DEBUG_RFAPI_STR + "rfapi_register\n" + "indicate vn addr follows\n" + "virtual network interface address\n" + "indicate xt addr follows\n" + "underlay network interface address\n" + "indicate prefix follows\n" "prefix") +{ + struct rfapi_ip_addr vn; + struct rfapi_ip_addr un; + rfapi_handle handle; + struct prefix pfx; + struct rfapi_ip_prefix hpfx; + int rc; + + /* + * Get VN addr + */ + if ((rc = rfapiCliGetRfapiIpAddr (vty, argv[0], &vn))) + return rc; + + + /* + * Get UN addr + */ + if ((rc = rfapiCliGetRfapiIpAddr (vty, argv[1], &un))) + return rc; + + + if (rfapi_find_handle_vty (vty, &vn, &un, &handle)) + { + vty_out (vty, "can't locate handle matching vn=%s, un=%s%s", + argv[0], argv[1], VTY_NEWLINE); + return CMD_WARNING; + } + + /* + * Get prefix to advertise + */ + if (!str2prefix (argv[2], &pfx)) + { + vty_out (vty, "Malformed prefix \"%s\"%s", argv[2], VTY_NEWLINE); + return CMD_WARNING; + } + if (pfx.family != AF_INET && pfx.family != AF_INET6) + { + vty_out (vty, "Bad family for prefix \"%s\"%s", argv[2], VTY_NEWLINE); + return CMD_WARNING; + } + rfapiQprefix2Rprefix (&pfx, &hpfx); + + rfapi_register (handle, &hpfx, 0, NULL, NULL, 1); + + return CMD_SUCCESS; +} + +DEFUN (debug_rfapi_query_vn_un, + debug_rfapi_query_vn_un_cmd, + "debug rfapi-dev query vn (A.B.C.D|X:X::X:X) un (A.B.C.D|X:X::X:X) target (A.B.C.D|X:X::X:X)", + DEBUG_STR + DEBUG_RFAPI_STR + "rfapi_query\n" + "indicate vn addr follows\n" + "virtual network interface address\n" + "indicate xt addr follows\n" + "underlay network interface address\n" + "indicate target follows\n" "target\n") +{ + struct rfapi_ip_addr vn; + struct rfapi_ip_addr un; + struct rfapi_ip_addr target; + rfapi_handle handle; + int rc; + struct rfapi_next_hop_entry *pNextHopEntry; + + /* + * Get VN addr + */ + if ((rc = rfapiCliGetRfapiIpAddr (vty, argv[0], &vn))) + return rc; + + + /* + * Get UN addr + */ + if ((rc = rfapiCliGetRfapiIpAddr (vty, argv[1], &un))) + return rc; + + + /* + * Get target addr + */ + if ((rc = rfapiCliGetRfapiIpAddr (vty, argv[2], &target))) + return rc; + + + if (rfapi_find_handle_vty (vty, &vn, &un, &handle)) + { + vty_out (vty, "can't locate handle matching vn=%s, un=%s%s", + argv[0], argv[1], VTY_NEWLINE); + return CMD_WARNING; + } + + /* + * options parameter not used? Set to NULL for now + */ + rc = rfapi_query (handle, &target, NULL, &pNextHopEntry); + + if (rc) + { + vty_out (vty, "rfapi_query failed with rc=%d (%s)%s", rc, + strerror (rc), VTY_NEWLINE); + } + else + { + /* + * print nexthop list + */ + test_nexthops_callback ( /*&target, */ pNextHopEntry, vty); /* frees nh list! */ + } + + return CMD_SUCCESS; +} + + +DEFUN (debug_rfapi_query_vn_un_l2o, + debug_rfapi_query_vn_un_l2o_cmd, + "debug rfapi-dev query vn (A.B.C.D|X:X::X:X) un (A.B.C.D|X:X::X:X) lni LNI target YY:YY:YY:YY:YY:YY", + DEBUG_STR + DEBUG_RFAPI_STR + "rfapi_query\n" + "indicate vn addr follows\n" + "virtual network interface address\n" + "indicate xt addr follows\n" + "underlay network interface address\n" + "logical network ID follows\n" + "logical network ID\n" + "indicate target MAC addr follows\n" "target MAC addr\n") +{ + struct rfapi_ip_addr vn; + struct rfapi_ip_addr un; + struct rfapi_ip_addr target; + rfapi_handle handle; + int rc; + struct rfapi_next_hop_entry *pNextHopEntry; + struct rfapi_l2address_option l2o_buf; + struct bgp_tea_options hopt; + uint8_t valbuf[14]; + + /* + * Get VN addr + */ + if ((rc = rfapiCliGetRfapiIpAddr (vty, argv[0], &vn))) + return rc; + + + /* + * Get UN addr + */ + if ((rc = rfapiCliGetRfapiIpAddr (vty, argv[1], &un))) + return rc; + + + /* + * Get target addr + */ + if ((rc = rfapiCliGetRfapiIpAddr (vty, argv[2], &target))) + return rc; + + + if (rfapi_find_handle_vty (vty, &vn, &un, &handle)) + { + vty_out (vty, "can't locate handle matching vn=%s, un=%s%s", + argv[0], argv[1], VTY_NEWLINE); + return CMD_WARNING; + } + + /* + * Set up L2 parameters + */ + memset (&l2o_buf, 0, sizeof (l2o_buf)); + if (rfapiStr2EthAddr (argv[3], &l2o_buf.macaddr)) + { + vty_out (vty, "Bad mac address \"%s\"%s", argv[3], VTY_NEWLINE); + return CMD_WARNING; + } + + VTY_GET_INTEGER ("Logical Network ID", l2o_buf.logical_net_id, argv[2]); + + /* construct option chain */ + + memset (valbuf, 0, sizeof (valbuf)); + memcpy (valbuf, &l2o_buf.macaddr.octet, ETHER_ADDR_LEN); + valbuf[11] = (l2o_buf.logical_net_id >> 16) & 0xff; + valbuf[12] = (l2o_buf.logical_net_id >> 8) & 0xff; + valbuf[13] = l2o_buf.logical_net_id & 0xff; + + memset (&hopt, 0, sizeof (hopt)); + hopt.options_count = 1; + hopt.options_length = sizeof (valbuf); /* is this right? */ + hopt.type = RFAPI_VN_OPTION_TYPE_L2ADDR; + hopt.length = sizeof (valbuf); + hopt.value = valbuf; + + + /* + * options parameter not used? Set to NULL for now + */ + rc = rfapi_query (handle, &target, &l2o_buf, &pNextHopEntry); + + if (rc) + { + vty_out (vty, "rfapi_query failed with rc=%d (%s)%s", rc, + strerror (rc), VTY_NEWLINE); + } + else + { + /* + * print nexthop list + */ + /* TBD enhance to print L2 information */ + test_nexthops_callback ( /*&target, */ pNextHopEntry, vty); /* frees nh list! */ + } + + return CMD_SUCCESS; +} + + +DEFUN (debug_rfapi_query_done_vn_un, + debug_rfapi_query_vn_un_done_cmd, + "debug rfapi-dev query done vn (A.B.C.D|X:X::X:X) un (A.B.C.D|X:X::X:X) target (A.B.C.D|X:X::X:X)", + DEBUG_STR + DEBUG_RFAPI_STR + "rfapi_query_done\n" + "indicate vn addr follows\n" + "virtual network interface address\n" + "indicate xt addr follows\n" + "underlay network interface address\n" + "indicate prefix follows\n" "prefix\n") +{ + struct rfapi_ip_addr vn; + struct rfapi_ip_addr un; + struct rfapi_ip_addr target; + rfapi_handle handle; + int rc; + + /* + * Get VN addr + */ + if ((rc = rfapiCliGetRfapiIpAddr (vty, argv[0], &vn))) + return rc; + + + /* + * Get UN addr + */ + if ((rc = rfapiCliGetRfapiIpAddr (vty, argv[1], &un))) + return rc; + + + /* + * Get target addr + */ + if ((rc = rfapiCliGetRfapiIpAddr (vty, argv[2], &target))) + return rc; + + + if (rfapi_find_handle_vty (vty, &vn, &un, &handle)) + { + vty_out (vty, "can't locate handle matching vn=%s, un=%s%s", + argv[0], argv[1], VTY_NEWLINE); + return CMD_WARNING; + } + + /* + * options parameter not used? Set to NULL for now + */ + rc = rfapi_query_done (handle, &target); + + vty_out (vty, "rfapi_query_done returned %d%s", rc, VTY_NEWLINE); + + return CMD_SUCCESS; +} + +DEFUN (debug_rfapi_show_import, + debug_rfapi_show_import_cmd, + "debug rfapi-dev show import", + DEBUG_STR + DEBUG_RFAPI_STR + SHOW_STR + "import\n") +{ + struct bgp *bgp; + struct rfapi *h; + struct rfapi_import_table *it; + char *s; + int first_l2 = 1; + + /* + * Show all import tables + */ + + bgp = bgp_get_default (); /* assume 1 instance for now */ + if (!bgp) + { + vty_out (vty, "No BGP instance%s", VTY_NEWLINE); + return CMD_WARNING; + } + + h = bgp->rfapi; + if (!h) + { + vty_out (vty, "No RFAPI instance%s", VTY_NEWLINE); + return CMD_WARNING; + } + + /* + * Iterate over all import tables; do a filtered import + * for the afi/safi combination + */ + + + for (it = h->imports; it; it = it->next) + { + s = ecommunity_ecom2str (it->rt_import_list, + ECOMMUNITY_FORMAT_ROUTE_MAP); + vty_out (vty, "Import Table %p, RTs: %s%s", it, s, VTY_NEWLINE); + XFREE (MTYPE_ECOMMUNITY_STR, s); + + rfapiShowImportTable (vty, "IP VPN", it->imported_vpn[AFI_IP], 1); + rfapiShowImportTable (vty, "IP ENCAP", it->imported_encap[AFI_IP], 0); + rfapiShowImportTable (vty, "IP6 VPN", it->imported_vpn[AFI_IP6], 1); + rfapiShowImportTable (vty, "IP6 ENCAP", it->imported_encap[AFI_IP6], 0); + } + + if (h->import_mac) + { + void *cursor = NULL; + uint32_t lni; + uintptr_t lni_as_ptr; + int rc; + char buf[BUFSIZ]; + + for (rc = + skiplist_next (h->import_mac, (void **) &lni_as_ptr, (void **) &it, + &cursor); !rc; + rc = + skiplist_next (h->import_mac, (void **) &lni_as_ptr, (void **) &it, + &cursor)) + { + + if (it->imported_vpn[AFI_ETHER]) + { + lni = lni_as_ptr; + if (first_l2) + { + vty_out (vty, "%sLNI-based Ethernet Tables:%s", + VTY_NEWLINE, VTY_NEWLINE); + first_l2 = 0; + } + snprintf (buf, BUFSIZ, "L2VPN LNI=%u", lni); + rfapiShowImportTable (vty, buf, it->imported_vpn[AFI_ETHER], 1); + } + } + } + + rfapiShowImportTable (vty, "CE IT - IP VPN", + h->it_ce->imported_vpn[AFI_IP], 1); + + return CMD_SUCCESS; +} + +DEFUN (debug_rfapi_show_import_vn_un, + debug_rfapi_show_import_vn_un_cmd, + "debug rfapi-dev show import vn (A.B.C.D|X:X::X:X) un (A.B.C.D|X:X::X:X)", + DEBUG_STR + DEBUG_RFAPI_STR + SHOW_STR + "import\n" + "indicate vn addr follows\n" + "virtual network interface address\n" + "indicate xt addr follows\n" "underlay network interface address\n") +{ + struct rfapi_ip_addr vn; + struct rfapi_ip_addr un; + rfapi_handle handle; + int rc; + struct rfapi_descriptor *rfd; + + /* + * Get VN addr + */ + if ((rc = rfapiCliGetRfapiIpAddr (vty, argv[0], &vn))) + return rc; + + + /* + * Get UN addr + */ + if ((rc = rfapiCliGetRfapiIpAddr (vty, argv[1], &un))) + return rc; + + + if (rfapi_find_handle_vty (vty, &vn, &un, &handle)) + { + vty_out (vty, "can't locate handle matching vn=%s, un=%s%s", + argv[0], argv[1], VTY_NEWLINE); + return CMD_WARNING; + } + + rfd = (struct rfapi_descriptor *) handle; + + rfapiShowImportTable (vty, "IP VPN", + rfd->import_table->imported_vpn[AFI_IP], 1); + rfapiShowImportTable (vty, "IP ENCAP", + rfd->import_table->imported_encap[AFI_IP], 0); + rfapiShowImportTable (vty, "IP6 VPN", + rfd->import_table->imported_vpn[AFI_IP6], 1); + rfapiShowImportTable (vty, "IP6 ENCAP", + rfd->import_table->imported_encap[AFI_IP6], 0); + + return CMD_SUCCESS; +} + +DEFUN (debug_rfapi_response_omit_self, + debug_rfapi_response_omit_self_cmd, + "debug rfapi-dev response-omit-self (on|off)", + DEBUG_STR + DEBUG_RFAPI_STR + "Omit self in RFP responses\n" + "filter out self from responses\n" "leave self in responses\n") +{ + struct bgp *bgp = bgp_get_default (); + + if (!bgp) + { + vty_out (vty, "No BGP process is configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + if (!bgp->rfapi_cfg) + { + vty_out (vty, "VNC not configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (!strcmp (argv[0], "on")) + SET_FLAG (bgp->rfapi_cfg->flags, BGP_VNC_CONFIG_FILTER_SELF_FROM_RSP); + else + UNSET_FLAG (bgp->rfapi_cfg->flags, BGP_VNC_CONFIG_FILTER_SELF_FROM_RSP); + + return CMD_SUCCESS; +} + + +#ifdef RFAPI_DEBUG_SKIPLIST_CLI + +#include "lib/skiplist.h" +DEFUN (skiplist_test_cli, + skiplist_test_cli_cmd, + "skiplist test", + "skiplist command\n" + "test\n") +{ + skiplist_test (vty); + + return CMD_SUCCESS; +} + +DEFUN (skiplist_debug_cli, + skiplist_debug_cli_cmd, + "skiplist debug", + "skiplist command\n" + "debug\n") +{ + skiplist_debug (vty, NULL); + return CMD_SUCCESS; +} + +#endif /* RFAPI_DEBUG_SKIPLIST_CLI */ + +void +rfapi_init (void) +{ + bgp_rfapi_cfg_init (); + vnc_debug_init(); + + install_element (ENABLE_NODE, &debug_rfapi_show_import_cmd); + install_element (ENABLE_NODE, &debug_rfapi_show_import_vn_un_cmd); + + install_element (ENABLE_NODE, &debug_rfapi_open_cmd); + install_element (ENABLE_NODE, &debug_rfapi_close_vn_un_cmd); + install_element (ENABLE_NODE, &debug_rfapi_close_rfd_cmd); + install_element (ENABLE_NODE, &debug_rfapi_register_vn_un_cmd); + install_element (ENABLE_NODE, &debug_rfapi_unregister_vn_un_cmd); + install_element (ENABLE_NODE, &debug_rfapi_query_vn_un_cmd); + install_element (ENABLE_NODE, &debug_rfapi_query_vn_un_done_cmd); + install_element (ENABLE_NODE, &debug_rfapi_query_vn_un_l2o_cmd); + + install_element (ENABLE_NODE, &debug_rfapi_response_omit_self_cmd); + + /* Need the following show commands for gpz test scripts */ + install_element (ENABLE_NODE, &debug_rfapi_show_nves_cmd); + install_element (ENABLE_NODE, &debug_rfapi_show_nves_vn_un_cmd); + install_element (ENABLE_NODE, &debug_rfapi_register_vn_un_l2o_cmd); + +#ifdef RFAPI_DEBUG_SKIPLIST_CLI + install_element (ENABLE_NODE, &skiplist_test_cli_cmd); + install_element (ENABLE_NODE, &skiplist_debug_cli_cmd); +#endif + + rfapi_vty_init (); +} + +#ifdef DEBUG_RFAPI +static void +rfapi_print_exported (struct bgp *bgp) +{ + struct bgp_node *rdn; + struct bgp_node *rn; + struct bgp_info *bi; + + if (!bgp) + return; + + for (rdn = bgp_table_top (bgp->rib[AFI_IP][SAFI_MPLS_VPN]); rdn; + rdn = bgp_route_next (rdn)) + { + if (!rdn->info) + continue; + fprintf (stderr, "%s: vpn rdn=%p\n", __func__, rdn); + for (rn = bgp_table_top (rdn->info); rn; rn = bgp_route_next (rn)) + { + if (!rn->info) + continue; + fprintf (stderr, "%s: rn=%p\n", __func__, rn); + for (bi = rn->info; bi; bi = bi->next) + { + rfapiPrintBi ((void *) 2, bi); /* 2 => stderr */ + } + } + } + for (rdn = bgp_table_top (bgp->rib[AFI_IP][SAFI_ENCAP]); rdn; + rdn = bgp_route_next (rdn)) + { + if (!rdn->info) + continue; + fprintf (stderr, "%s: encap rdn=%p\n", __func__, rdn); + for (rn = bgp_table_top (rdn->info); rn; rn = bgp_route_next (rn)) + { + if (!rn->info) + continue; + fprintf (stderr, "%s: rn=%p\n", __func__, rn); + for (bi = rn->info; bi; bi = bi->next) + { + rfapiPrintBi ((void *) 2, bi); /* 2 => stderr */ + } + } + } + +} +#endif /* defined(DEBUG_RFAPI) */ + +/* + * Free all memory to prepare for clean exit as seen by valgrind memcheck + */ +void +rfapi_delete (struct bgp *bgp) +{ + extern void rfp_clear_vnc_nve_all (void); /* can't fix correctly yet */ + + /* + * This clears queries and registered routes, and closes nves + */ + if (bgp->rfapi) + rfp_clear_vnc_nve_all (); + bgp_rfapi_cfg_destroy (bgp, bgp->rfapi_cfg); + bgp->rfapi_cfg = NULL; + bgp_rfapi_destroy (bgp, bgp->rfapi); + bgp->rfapi = NULL; +#ifdef DEBUG_RFAPI + /* + * show what's left in the BGP MPLSVPN RIB + */ + rfapi_print_exported (bgp); +#endif + +} + +int +rfapi_set_autord_from_vn (struct prefix_rd *rd, struct rfapi_ip_addr *vn) +{ + zlog_debug ("%s: auto-assigning RD", __func__); + if (vn->addr_family != AF_INET + && vn->addr_family != AF_INET6) + { + zlog_debug ("%s: can't auto-assign RD, VN addr family is not IPv4" + "|v6" + , __func__); + return EAFNOSUPPORT; + } + rd->family = AF_UNSPEC; + rd->prefixlen = 64; + rd->val[1] = RD_TYPE_IP; + if (vn->addr_family == AF_INET) + { + memcpy (rd->val + 2, &vn->addr.v4.s_addr, 4); + } + else + { /* is v6 */ + memcpy (rd->val + 2, &vn->addr.v6.s6_addr32[3], 4);/* low order 4 bytes */ + } + { + char buf[BUFSIZ]; + buf[0] = 0; + prefix_rd2str (rd, buf, BUFSIZ); + buf[BUFSIZ - 1] = 0; + zlog_debug ("%s: auto-RD is set to %s", __func__, buf); + } + return 0; +} + +/*------------------------------------------ + * rfapi_bgp_lookup_by_rfp + * + * Find bgp instance pointer based on value returned by rfp_start + * + * input: + * rfp_start_val value returned by rfp_startor + * NULL (=get default instance) + * + * output: + * none + * + * return value: + * bgp bgp instance pointer + * NULL = not found + * + --------------------------------------------*/ +struct bgp * +rfapi_bgp_lookup_by_rfp (void *rfp_start_val) +{ + struct bgp *bgp = NULL; + struct listnode *node, *nnode; + + if (rfp_start_val == NULL) + bgp = bgp_get_default (); + else + for (ALL_LIST_ELEMENTS (bm->bgp, node, nnode, bgp)) + if (bgp->rfapi != NULL && bgp->rfapi->rfp == rfp_start_val) + return bgp; + return bgp; +} + +/*------------------------------------------ + * rfapi_get_rfp_start_val_by_bgp + * + * Find bgp instance pointer based on value returned by rfp_start + * + * input: + * bgp bgp instance pointer + * + * output: + * none + * + * return value: + * rfp_start_val + * NULL = not found + * + --------------------------------------------*/ +void * +rfapi_get_rfp_start_val_by_bgp (struct bgp *bgp) +{ + if (!bgp || !bgp->rfapi) + return NULL; + return bgp->rfapi->rfp; +} + +/*********************************************************************** + * RFP group specific configuration + ***********************************************************************/ +static void * +rfapi_rfp_get_or_init_group_config_default ( + struct rfapi_cfg *rfc, + struct vty *vty, + uint32_t size) +{ + if (rfc->default_rfp_cfg == NULL && size > 0) + { + rfc->default_rfp_cfg = XCALLOC (MTYPE_RFAPI_RFP_GROUP_CFG, size); + zlog_debug ("%s: allocated, size=%d", __func__, size); + + } + return rfc->default_rfp_cfg; +} + +static void * +rfapi_rfp_get_or_init_group_config_nve ( + struct rfapi_cfg *rfc, + struct vty *vty, + uint32_t size) +{ + struct rfapi_nve_group_cfg *rfg = VTY_GET_CONTEXT_SUB(rfapi_nve_group_cfg); + + /* make sure group is still in list */ + if (!rfg || !listnode_lookup (rfc->nve_groups_sequential, rfg)) + { + /* Not in list anymore */ + vty_out (vty, "Current NVE group no longer exists%s", VTY_NEWLINE); + return NULL; + } + + if (rfg->rfp_cfg == NULL && size > 0) + { + rfg->rfp_cfg = XCALLOC (MTYPE_RFAPI_RFP_GROUP_CFG, size); + zlog_debug ("%s: allocated, size=%d", __func__, size); + + } + return rfg->rfp_cfg; +} + +static void * +rfapi_rfp_get_or_init_group_config_l2 ( + struct rfapi_cfg *rfc, + struct vty *vty, + uint32_t size) +{ + struct rfapi_l2_group_cfg *rfg = VTY_GET_CONTEXT_SUB(rfapi_l2_group_cfg); + + /* make sure group is still in list */ + if (!rfg || !listnode_lookup (rfc->l2_groups, rfg)) + { + /* Not in list anymore */ + vty_out (vty, "Current L2 group no longer exists%s", VTY_NEWLINE); + return NULL; + } + if (rfg->rfp_cfg == NULL && size > 0) + { + rfg->rfp_cfg = XCALLOC (MTYPE_RFAPI_RFP_GROUP_CFG, size); + zlog_debug ("%s: allocated, size=%d", __func__, size); + + } + return rfg->rfp_cfg; +} + +/*------------------------------------------ + * rfapi_rfp_init_group_config_ptr_vty + * + * This is used to init or return a previously init'ed group specific + * configuration pointer. Group is identified by vty context. + * NOTE: size is ignored when a previously init'ed value is returned. + * RFAPI frees rfp_cfg_group when group is deleted during reconfig, + * bgp restart or shutdown. + * + * input: + * rfp_start_val value returned by rfp_start + * type group type + * vty quagga vty context + * size number of bytes to allocation + * + * output: + * none + * + * return value: + * rfp_cfg_group NULL or Pointer to configuration structure +--------------------------------------------*/ +void * +rfapi_rfp_init_group_config_ptr_vty ( + void *rfp_start_val, + rfapi_rfp_cfg_group_type type, + struct vty *vty, + uint32_t size) +{ + struct bgp *bgp; + void *ret = NULL; + + if (rfp_start_val == NULL || vty == NULL) + return NULL; + + bgp = rfapi_bgp_lookup_by_rfp (rfp_start_val); + if (!bgp || !bgp->rfapi_cfg) + return NULL; + + switch (type) + { + case RFAPI_RFP_CFG_GROUP_DEFAULT: + ret = rfapi_rfp_get_or_init_group_config_default (bgp->rfapi_cfg, + vty, size); + break; + case RFAPI_RFP_CFG_GROUP_NVE: + ret = rfapi_rfp_get_or_init_group_config_nve (bgp->rfapi_cfg, + vty, size); + break; + case RFAPI_RFP_CFG_GROUP_L2: + ret = rfapi_rfp_get_or_init_group_config_l2 (bgp->rfapi_cfg, vty, size); + break; + default: + zlog_err ("%s: Unknown group type=%d", __func__, type); + /* should never happen */ + assert ("Unknown type" == NULL); + break; + } + return ret; +} + +/*------------------------------------------ + * rfapi_rfp_get_group_config_ptr_vty + * + * This is used to get group specific configuration pointer. + * Group is identified by type and vty context. + * RFAPI frees rfp_cfg_group when group is deleted during reconfig, + * bgp restart or shutdown. + * + * input: + * rfp_start_val value returned by rfp_start + * type group type + * vty quagga vty context + * + * output: + * none + * + * return value: + * rfp_cfg_group Pointer to configuration structure +--------------------------------------------*/ +void * +rfapi_rfp_get_group_config_ptr_vty ( + void *rfp_start_val, + rfapi_rfp_cfg_group_type type, + struct vty *vty) +{ + return rfapi_rfp_init_group_config_ptr_vty (rfp_start_val, type, vty, 0); +} + +static void * +rfapi_rfp_get_group_config_name_nve ( + struct rfapi_cfg *rfc, + const char *name, + void *criteria, + rfp_group_config_search_cb_t *search_cb) +{ + struct rfapi_nve_group_cfg *rfg; + struct listnode *node; + + for (ALL_LIST_ELEMENTS_RO (rfc->nve_groups_sequential, node, rfg)) + { + if (!strcmp (rfg->name, name) && /* name match */ + (search_cb == NULL || !search_cb (criteria, rfg->rfp_cfg))) + return rfg->rfp_cfg; + } + return NULL; +} + +static void * +rfapi_rfp_get_group_config_name_l2 ( + struct rfapi_cfg *rfc, + const char *name, + void *criteria, + rfp_group_config_search_cb_t *search_cb) +{ + struct rfapi_l2_group_cfg *rfg; + struct listnode *node; + + for (ALL_LIST_ELEMENTS_RO (rfc->l2_groups, node, rfg)) + { + if (!strcmp (rfg->name, name) && /* name match */ + (search_cb == NULL || !search_cb (criteria, rfg->rfp_cfg))) + return rfg->rfp_cfg; + } + return NULL; +} + +/*------------------------------------------ + * rfapi_rfp_get_group_config_ptr_name + * + * This is used to get group specific configuration pointer. + * Group is identified by type and name context. + * RFAPI frees rfp_cfg_group when group is deleted during reconfig, + * bgp restart or shutdown. + * + * input: + * rfp_start_val value returned by rfp_start + * type group type + * name group name + * criteria RFAPI caller provided serach criteria + * search_cb optional rfp_group_config_search_cb_t + * + * output: + * none + * + * return value: + * rfp_cfg_group Pointer to configuration structure +--------------------------------------------*/ +void * +rfapi_rfp_get_group_config_ptr_name ( + void *rfp_start_val, + rfapi_rfp_cfg_group_type type, + const char *name, + void *criteria, + rfp_group_config_search_cb_t *search_cb) +{ + struct bgp *bgp; + void *ret = NULL; + + if (rfp_start_val == NULL || name == NULL) + return NULL; + + bgp = rfapi_bgp_lookup_by_rfp (rfp_start_val); + if (!bgp || !bgp->rfapi_cfg) + return NULL; + + switch (type) + { + case RFAPI_RFP_CFG_GROUP_DEFAULT: + ret = bgp->rfapi_cfg->default_rfp_cfg; + break; + case RFAPI_RFP_CFG_GROUP_NVE: + ret = rfapi_rfp_get_group_config_name_nve (bgp->rfapi_cfg, + name, criteria, search_cb); + break; + case RFAPI_RFP_CFG_GROUP_L2: + ret = rfapi_rfp_get_group_config_name_l2 (bgp->rfapi_cfg, + name, criteria, search_cb); + break; + default: + zlog_err ("%s: Unknown group type=%d", __func__, type); + /* should never happen */ + assert ("Unknown type" == NULL); + break; + } + return ret; +} + +/*------------------------------------------ + * rfapi_rfp_get_l2_group_config_ptr_lni + * + * This is used to get group specific configuration pointer. + * Group is identified by type and logical network identifier. + * RFAPI frees rfp_cfg_group when group is deleted during reconfig, + * bgp restart or shutdown. + * + * input: + * rfp_start_val value returned by rfp_start + * type group type + * logical_net_id group logical network identifier + * criteria RFAPI caller provided serach criteria + * search_cb optional rfp_group_config_search_cb_t + * + * output: + * none + * + * return value: + * rfp_cfg_group Pointer to configuration structure +--------------------------------------------*/ +void * +rfapi_rfp_get_l2_group_config_ptr_lni ( + void *rfp_start_val, + uint32_t logical_net_id, + void *criteria, + rfp_group_config_search_cb_t *search_cb) +{ + struct bgp *bgp; + struct rfapi_l2_group_cfg *rfg; + struct listnode *node; + + if (rfp_start_val == NULL) + return NULL; + + bgp = rfapi_bgp_lookup_by_rfp (rfp_start_val); + if (!bgp || !bgp->rfapi_cfg) + return NULL; + + for (ALL_LIST_ELEMENTS_RO (bgp->rfapi_cfg->l2_groups, node, rfg)) + { + if (rfg->logical_net_id == logical_net_id && + (search_cb == NULL || !search_cb (criteria, rfg->rfp_cfg))) + { + if (rfg->rfp_cfg == NULL) + zlog_debug ("%s: returning rfp group config for lni=0", __func__); + return rfg->rfp_cfg; + } + } + return NULL; +} diff --git a/bgpd/rfapi/rfapi.h b/bgpd/rfapi/rfapi.h new file mode 100644 index 0000000000..7d108432ae --- /dev/null +++ b/bgpd/rfapi/rfapi.h @@ -0,0 +1,980 @@ +/* + * + * Copyright 2009-2016, LabN Consulting, L.L.C. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#ifndef _QUAGGA_BGP_RFAPI_H +#define _QUAGGA_BGP_RFAPI_H + +#if ENABLE_BGP_VNC + +#include +#include +#include "lib/zebra.h" +#include "lib/vty.h" +#include "lib/prefix.h" +#include "bgpd/bgpd.h" +#include "bgpd/bgp_encap_types.h" + +/* probably ought to have a field-specific define in config.h */ +# ifndef s6_addr32 /* for solaris/bsd */ +# ifdef SOLARIS_IPV6 +# define s6_addr32 _S6_un._S6_u32 +# else +# define s6_addr32 __u6_addr.__u6_addr32 +# endif +# endif + +#define RFAPI_V4_ADDR 0x04 +#define RFAPI_V6_ADDR 0x06 +#define RFAPI_SHOW_STR "VNC information\n" + +struct rfapi_ip_addr +{ + uint8_t addr_family; /* AF_INET | AF_INET6 */ + union + { + struct in_addr v4; /* in network order */ + struct in6_addr v6; /* in network order */ + } addr; +}; + +struct rfapi_ip_prefix +{ + uint8_t length; + uint8_t cost; /* bgp local pref = 255 - cost */ + struct rfapi_ip_addr prefix; +}; + +struct rfapi_nexthop +{ + struct prefix addr; + uint8_t cost; +}; + +struct rfapi_next_hop_entry +{ + struct rfapi_next_hop_entry *next; + struct rfapi_ip_prefix prefix; + uint32_t lifetime; + struct rfapi_ip_addr un_address; + struct rfapi_ip_addr vn_address; + struct rfapi_vn_option *vn_options; + struct rfapi_un_option *un_options; +}; + +#define RFAPI_REMOVE_RESPONSE_LIFETIME 0 +#define RFAPI_INFINITE_LIFETIME 0xFFFFFFFF + +struct rfapi_l2address_option +{ + struct ethaddr macaddr; /* use 0 to assign label to IP prefix */ + uint32_t label; /* 20bit label in low bits, no TC, S, or TTL */ + uint32_t logical_net_id; /* ~= EVPN Ethernet Segment Id, + must not be zero for mac regis. */ + uint8_t local_nve_id; +}; + +typedef enum +{ + RFAPI_UN_OPTION_TYPE_PROVISIONAL, /* internal use only */ + RFAPI_UN_OPTION_TYPE_TUNNELTYPE, +} rfapi_un_option_type; + +struct rfapi_tunneltype_option +{ + bgp_encap_types type; + union + { + struct bgp_encap_type_reserved reserved; + struct bgp_encap_type_l2tpv3_over_ip l2tpv3_ip; + struct bgp_encap_type_gre gre; + struct bgp_encap_type_transmit_tunnel_endpoint transmit_tunnel_endpoint; + struct bgp_encap_type_ipsec_in_tunnel_mode ipsec_tunnel; + struct bgp_encap_type_ip_in_ip_tunnel_with_ipsec_transport_mode ip_ipsec; + struct bgp_encap_type_mpls_in_ip_tunnel_with_ipsec_transport_mode mpls_ipsec; + struct bgp_encap_type_ip_in_ip ip_ip; + struct bgp_encap_type_vxlan vxlan; + struct bgp_encap_type_nvgre nvgre; + struct bgp_encap_type_mpls mpls; + struct bgp_encap_type_mpls_in_gre mpls_gre; + struct bgp_encap_type_vxlan_gpe vxlan_gpe; + struct bgp_encap_type_mpls_in_udp mpls_udp; + struct bgp_encap_type_pbb pbb; + } bgpinfo; +}; + +struct rfapi_un_option +{ + struct rfapi_un_option *next; + rfapi_un_option_type type; + union + { + struct rfapi_tunneltype_option tunnel; + } v; +}; + +typedef enum +{ + RFAPI_VN_OPTION_TYPE_L2ADDR = 3, /* Layer 2 address, 3 for legacy compatibility */ + RFAPI_VN_OPTION_TYPE_LOCAL_NEXTHOP, /* for static routes */ + RFAPI_VN_OPTION_TYPE_INTERNAL_RD, /* internal use only */ +} rfapi_vn_option_type; + +struct rfapi_vn_option +{ + struct rfapi_vn_option *next; + + rfapi_vn_option_type type; + + union + { + struct rfapi_l2address_option l2addr; + + /* + * If this option is present, the next hop is local to the + * client NVE (i.e., not via a tunnel). + */ + struct rfapi_nexthop local_nexthop; + + /* + * For rfapi internal use only + */ + struct prefix_rd internal_rd; + } v; +}; + +struct rfapi_l2address_option_match +{ + struct rfapi_l2address_option o; + uint32_t flags; + +#define RFAPI_L2O_MACADDR 0x00000001 +#define RFAPI_L2O_LABEL 0x00000002 +#define RFAPI_L2O_LNI 0x00000004 +#define RFAPI_L2O_LHI 0x00000008 +}; + +#define VNC_CONFIG_STR "VNC/RFP related configuration\n" + +typedef void *rfapi_handle; + +/*********************************************************************** + * RFP Callbacks + ***********************************************************************/ +/*------------------------------------------ + * rfapi_response_cb_t (callback typedef) + * + * Callbacks of this type are used to provide asynchronous + * route updates from RFAPI to the RFP client. + * + * response_cb + * called to notify the rfp client that a next hop list + * that has previously been provided in response to an + * rfapi_query call has been updated. Deleted routes are indicated + * with lifetime==RFAPI_REMOVE_RESPONSE_LIFETIME. + * + * By default, the routes an NVE receives via this callback include + * its own routes (that it has registered). However, these may be + * filtered out if the global BGP_VNC_CONFIG_FILTER_SELF_FROM_RSP + * flag is set. + * + * local_cb + * called to notify the rfp client that a local route + * has been added or deleted. Deleted routes are indicated + * with lifetime==RFAPI_REMOVE_RESPONSE_LIFETIME. + * + * input: + * next_hops a list of possible next hops. + * This is a linked list allocated within the + * rfapi. The response_cb callback function is responsible + * for freeing this memory via rfapi_free_next_hop_list() + * in order to avoid memory leaks. + * + * userdata value (cookie) originally specified in call to + * rfapi_open() + * + *------------------------------------------*/ +typedef void (rfapi_response_cb_t) (struct rfapi_next_hop_entry * next_hops, + void *userdata); + +/*------------------------------------------ + * rfapi_nve_close_cb_t (callback typedef) + * + * Callbacks of this type are used to provide asynchronous + * notification that an rfapi_handle was invalidated + * + * input: + * pHandle Firmerly valid rfapi_handle returned to + * client via rfapi_open(). + * + * reason EIDRM handle administratively closed (clear nve ...) + * ESTALE handle invalidated by configuration change + * + *------------------------------------------*/ +typedef void (rfapi_nve_close_cb_t) (rfapi_handle pHandle, int reason); + +/*------------------------------------------ + * rfp_cfg_write_cb_t (callback typedef) + * + * This callback is used to generate output for any config parameters + * that may supported by RFP via RFP defined vty commands at the bgp + * level. See loglevel as an example. + * + * input: + * vty -- quagga vty context + * rfp_start_val -- value returned by rfp_start + * + * output: + * to vty, rfp related configuration + * + * return value: + * lines written +--------------------------------------------*/ +typedef int (rfp_cfg_write_cb_t) (struct vty * vty, void *rfp_start_val); + +/*------------------------------------------ + * rfp_cfg_group_write_cb_t (callback typedef) + * + * This callback is used to generate output for any config parameters + * that may supported by RFP via RFP defined vty commands at the + * L2 or NVE level. See loglevel as an example. + * + * input: + * vty quagga vty context + * rfp_start_val value returned by rfp_start + * type group type + * name group name + * rfp_cfg_group Pointer to configuration structure + * + * output: + * to vty, rfp related configuration + * + * return value: + * lines written +--------------------------------------------*/ +typedef enum +{ + RFAPI_RFP_CFG_GROUP_DEFAULT, + RFAPI_RFP_CFG_GROUP_NVE, + RFAPI_RFP_CFG_GROUP_L2 +} rfapi_rfp_cfg_group_type; + +typedef int (rfp_cfg_group_write_cb_t) (struct vty * vty, + void *rfp_start_val, + rfapi_rfp_cfg_group_type type, + const char *name, + void *rfp_cfg_group); + +/*********************************************************************** + * Configuration related defines and structures + ***********************************************************************/ + +struct rfapi_rfp_cb_methods +{ + rfp_cfg_write_cb_t *cfg_cb; /* show top level config */ + rfp_cfg_group_write_cb_t *cfg_group_cb; /* show group level config */ + rfapi_response_cb_t *response_cb; /* unsolicited responses */ + rfapi_response_cb_t *local_cb; /* local route add/delete */ + rfapi_nve_close_cb_t *close_cb; /* handle closed */ + +}; + +/* + * If a route with infinite lifetime is withdrawn, this is + * how long (in seconds) to wait before expiring it (because + * RFAPI_LIFETIME_MULTIPLIER_PCT * infinity is too long to wait) + */ +#define RFAPI_LIFETIME_INFINITE_WITHDRAW_DELAY (60*120) + +/* + * the factor that should be applied to a prefix's value + * before using it to expire a withdrawn prefix, expressed as a percent. + * Thus, a value of 100 means to use the exact value of , + * a value of 200 means to use twice the value of , etc. + */ +#define RFAPI_RFP_CFG_DEFAULT_HOLDDOWN_FACTOR 150 + +/* + * This is used by rfapi to determine if RFP is using/supports + * a partial (i.e., cache) or full table download approach for + * mapping information. When full table download approach is + * used all information is passed to RFP after an initial + * rfapi_query. When partial table download is used, only + * information matching a query is passed. + */ +typedef enum +{ + RFAPI_RFP_DOWNLOAD_PARTIAL = 0, + RFAPI_RFP_DOWNLOAD_FULL +} rfapi_rfp_download_type; + +#define RFAPI_RFP_CFG_DEFAULT_FTD_ADVERTISEMENT_INTERVAL 1 + +struct rfapi_rfp_cfg +{ + /* partial or full table download */ + rfapi_rfp_download_type download_type; /* default=partial */ + /* + * When full-table-download is enabled, this is the minimum + * number of seconds between times a non-queried prefix will + * be updated to a particular NVE. + * default: RFAPI_RFP_CFG_DEFAULT_FTD_ADVERTISEMENT_INTERVAL + */ + uint32_t ftd_advertisement_interval; + /* + * percentage of registration lifetime to continue to use information + * post soft-state refresh timeout + default: RFAPI_RFP_CFG_DEFAULT_HOLDDOWN_FACTOR + */ + uint32_t holddown_factor; + /* Control generation of updated RFP responses */ + uint8_t use_updated_response; /* default=0/no */ + /* when use_updated_response, also generate remove responses */ + uint8_t use_removes; /* default=0/no */ +}; + +/*********************************************************************** + * Process related functions -- MUST be provided by the RFAPI user <<=== + ***********************************************************************/ + +/*------------------------------------------ + * rfp_start + * + * This function will start the RFP code + * + * input: + * master quagga thread_master to tie into bgpd threads + * + * output: + * cfgp Pointer to rfapi_rfp_cfg (null = use defaults), + * copied by caller, updated via rfp_set_configuration + * cbmp Pointer to rfapi_rfp_cb_methods, may be null + * copied by caller, updated via rfapi_rfp_set_cb_methods + * return value: + * rfp_start_val rfp returned value passed on rfp_stop and other rfapi calls +--------------------------------------------*/ +extern void * +rfp_start ( + struct thread_master *master, + struct rfapi_rfp_cfg **cfgp, + struct rfapi_rfp_cb_methods **cbmp); + +/*------------------------------------------ + * rfp_stop + * + * This function is called on shutdown to trigger RFP cleanup + * + * input: + * rfp_start_val + * + * output: + * none + * + * return value: +--------------------------------------------*/ +extern void +rfp_stop (void *rfp_start_val); + +/*********************************************************************** + * RFP processing behavior configuration + ***********************************************************************/ + +/*------------------------------------------ + * rfapi_rfp_set_configuration + * + * This is used to change rfapi's processing behavior based on + * RFP requirements. + * + * input: + * rfp_start_val value returned by rfp_start + * rfapi_rfp_cfg Pointer to configuration structure + * + * output: + * none + * + * return value: + * 0 Success + * ENXIO Unabled to locate configured BGP/VNC +--------------------------------------------*/ +extern int +rfapi_rfp_set_configuration ( + void *rfp_start_val, + struct rfapi_rfp_cfg *rfp_cfg); + +/*------------------------------------------ + * rfapi_rfp_set_cb_methods + * + * Change registered callback functions for asynchronous notifications + * from RFAPI to the RFP client. + * + * input: + * rfp_start_val value by rfp_start + * methods Pointer to struct rfapi_rfp_cb_methods containing + * pointers to callback methods as described above + * + * return value: + * 0 Success + * ENXIO BGP or VNC not configured + *------------------------------------------*/ +extern int +rfapi_rfp_set_cb_methods ( + void *rfp_start_val, + struct rfapi_rfp_cb_methods *methods); + +/*********************************************************************** + * RFP group specific configuration + ***********************************************************************/ + +/*------------------------------------------ + * rfapi_rfp_init_group_config_ptr_vty + * + * This is used to init or return a previously init'ed group specific + * configuration pointer. Group is identified by vty context. + * NOTE: size is ignored when a previously init'ed value is returned. + * RFAPI frees rfp_cfg_group when group is deleted during reconfig, + * bgp restart or shutdown. + * + * input: + * rfp_start_val value returned by rfp_start + * type group type + * vty quagga vty context + * size number of bytes to allocation + * + * output: + * none + * + * return value: + * rfp_cfg_group NULL or Pointer to configuration structure +--------------------------------------------*/ +extern void * +rfapi_rfp_init_group_config_ptr_vty ( + void *rfp_start_val, + rfapi_rfp_cfg_group_type type, + struct vty *vty, + uint32_t size); + +/*------------------------------------------ + * rfapi_rfp_get_group_config_ptr_vty + * + * This is used to get group specific configuration pointer. + * Group is identified by type and vty context. + * RFAPI frees rfp_cfg_group when group is deleted during reconfig, + * bgp restart or shutdown. + * + * input: + * rfp_start_val value returned by rfp_start + * type group type + * vty quagga vty context + * + * output: + * none + * + * return value: + * rfp_cfg_group Pointer to configuration structure +--------------------------------------------*/ +extern void * +rfapi_rfp_get_group_config_ptr_vty ( + void *rfp_start_val, + rfapi_rfp_cfg_group_type type, + struct vty *vty); + +/*------------------------------------------ + * rfp_group_config_search_cb_t (callback typedef) + * + * This callback is used to called from within a + * rfapi_rfp_get_group_config_ptr to check if the rfp_cfg_group + * matches the search criteria + * + * input: + * criteria RFAPI caller provided serach criteria + * rfp_cfg_group Pointer to configuration structure | NULL + * + * output: + * + * return value: + * 0 Match/Success + * ENOENT No matching +--------------------------------------------*/ +typedef int (rfp_group_config_search_cb_t) (void *criteria, + void *rfp_cfg_group); + +/*------------------------------------------ + * rfapi_rfp_get_group_config_ptr_name + * + * This is used to get group specific configuration pointer. + * Group is identified by type and name context. + * RFAPI frees rfp_cfg_group when group is deleted during reconfig, + * bgp restart or shutdown. + * + * input: + * rfp_start_val value returned by rfp_start + * type group type + * name group name + * criteria RFAPI caller provided serach criteria + * search_cb optional rfp_group_config_search_cb_t + * + * output: + * none + * + * return value: + * rfp_cfg_group Pointer to configuration structure +--------------------------------------------*/ +extern void * +rfapi_rfp_get_group_config_ptr_name ( + void *rfp_start_val, + rfapi_rfp_cfg_group_type type, + const char *name, + void *criteria, + rfp_group_config_search_cb_t *search_cb); + +/*------------------------------------------ + * rfapi_rfp_get_l2_group_config_ptr_lni + * + * This is used to get group specific configuration pointer. + * Group is identified by type and logical network identifier. + * RFAPI frees rfp_cfg_group when group is deleted during reconfig, + * bgp restart or shutdown. + * + * input: + * rfp_start_val value returned by rfp_start + * logical_net_id group logical network identifier + * criteria RFAPI caller provided serach criteria + * search_cb optional rfp_group_config_search_cb_t + * + * output: + * none + * + * return value: + * rfp_cfg_group Pointer to configuration structure +--------------------------------------------*/ +extern void * +rfapi_rfp_get_l2_group_config_ptr_lni ( + void *rfp_start_val, + uint32_t logical_net_id, + void *criteria, + rfp_group_config_search_cb_t *search_cb); + +/*********************************************************************** + * NVE Sessions + ***********************************************************************/ + +/*------------------------------------------ + * rfapi_open + * + * This function initializes a NVE record and associates it with + * the specified VN and underlay network addresses + * + * input: + * rfp_start_val value returned by rfp_start + * vn NVE virtual network address + * + * un NVE underlay network address + * + * default_options Default options to use on registrations. + * For now only tunnel type is supported. + * May be overridden per-prefix in rfapi_register(). + * Caller owns (rfapi_open() does not free) + * + * response_cb Pointer to next hop list update callback function or + * NULL when no callbacks are desired. + * + * userdata Passed to subsequent response_cb invocations. + * + * output: + * response_lifetime The length of time that responses sent to this + * NVE are valid. + * + * pHandle pointer to location to store rfapi handle. The + * handle must be passed on subsequent rfapi_ calls. + * + * + * return value: + * 0 Success + * EEXIST NVE with this {vn,un} already open + * ENOENT No matching nve group config + * ENOMSG Matched nve group config was incomplete + * ENXIO BGP or VNC not configured + * EAFNOSUPPORT Matched nve group specifies auto-assignment of RD, + * but underlay network address is not IPv4 + * EDEADLK Called from within a callback procedure + *------------------------------------------*/ +extern int +rfapi_open ( + void *rfp_start_val, + struct rfapi_ip_addr *vn, + struct rfapi_ip_addr *un, + struct rfapi_un_option *default_options, + uint32_t *response_lifetime, + void *userdata, + rfapi_handle *pHandle); + + +/*------------------------------------------ + * rfapi_close + * + * Shut down NVE session and release associated data. Calling + * from within a rfapi callback procedure is permitted (the close + * will be completed asynchronously after the callback finishes). + * + * input: + * rfd: rfapi descriptor returned by rfapi_open + * + * output: + * + * return value: + * 0 Success + * EBADF invalid handle + * ENXIO BGP or VNC not configured + *------------------------------------------*/ +extern int +rfapi_close (rfapi_handle rfd); + +/*------------------------------------------ + * rfapi_check + * + * Test rfapi descriptor + * + * input: + * rfd: rfapi descriptor returned by rfapi_open + * + * output: + * + * return value: + * 0 Success: handle is valid and usable + * EINVAL null argument + * ESTALE formerly valid handle invalidated by config, needs close + * EBADF invalid handle + * ENXIO BGP or VNC not configured + * EAFNOSUPPORT Internal addressing error + *------------------------------------------*/ +extern int +rfapi_check (rfapi_handle rfd); + +/*********************************************************************** + * NVE Routes + ***********************************************************************/ + +/*------------------------------------------ + * rfapi_query + * + * This function queries the RIB for a + * particular route. Note that this call may result in subsequent + * callbacks to response_cb. Response callbacks can be cancelled + * by calling rfapi_query_done. A duplicate query using the same target + * will result in only one callback per change in next_hops. (i.e., + * cancel/replace the prior query results.) + * + * input: + * rfd: rfapi descriptor returned by rfapi_open + * target: the destination address + * l2o ptr to L2 Options struct, NULL if not present in query + * + * output: + * ppNextHopEntry pointer to a location to store a pointer + * to the returned list of nexthops. It is the + * caller's responsibility to free this list + * via rfapi_free_next_hop_list(). + * + * + * return value: + * 0 Success + * EBADF invalid handle + * ENOENT no valid route + * ENXIO BGP or VNC not configured + * ESTALE descriptor is no longer usable; should be closed + * EDEADLK Called from within a callback procedure +--------------------------------------------*/ +extern int +rfapi_query ( + rfapi_handle rfd, + struct rfapi_ip_addr *target, + struct rfapi_l2address_option *l2o, + struct rfapi_next_hop_entry **ppNextHopEntry); + +/*------------------------------------------ + * rfapi_query_done + * + * Notifies the rfapi that the user is no longer interested + * in the specified target. + * + * input: + * rfd: rfapi descriptor returned by rfapi_open + * target: the destination address + * + * output: + * + * return value: + * 0 Success + * EBADF invalid handle + * ENOENT no match found for target + * ENXIO BGP or VNC not configured + * ESTALE descriptor is no longer usable; should be closed + * EDEADLK Called from within a callback procedure +--------------------------------------------*/ +extern int +rfapi_query_done (rfapi_handle rfd, struct rfapi_ip_addr *target); + +/*------------------------------------------ + * rfapi_query_done_all + * + * Notifies the rfapi that the user is no longer interested + * in any target. + * + * input: + * rfd: rfapi descriptor returned by rfapi_open + * + * output: + * count: number of queries cleared + * + * return value: + * 0 Success + * EBADF invalid handle + * ENXIO BGP or VNC not configured + * ESTALE descriptor is no longer usable; should be closed + * EDEADLK Called from within a callback procedure +--------------------------------------------*/ +extern int +rfapi_query_done_all (rfapi_handle rfd, int *count); + +/*------------------------------------------ + * rfapi_register + * + * Requests that reachability to the indicated prefix via this NVE + * be advertised by BGP. If is non-zero, then the previously- + * advertised prefix should be withdrawn. + * + * (This function should NOT be called if the rfapi_open() function + * returns NULL) + * + * input: + * rfd: rfapi descriptor returned by rfapi_open + * prefix: A prefix to be registered or deregistered + * lifetime Prefix lifetime in seconds, host byte order + * options_un underlay netowrk options, may include tunnel-type + * Caller owns (rfapi_register() does not free). + * options_vn virtual network options, may include layer 2 address + * option and local-nexthop option + * Caller owns (rfapi_register() does not free). + * + * action: RFAPI_REGISTER_ADD add the route + * RFAPI_REGISTER_WITHDRAW withdraw route + * RFAPI_REGISTER_KILL withdraw without holddown + * + * return value: + * 0 Success + * EBADF invalid handle + * ENXIO BGP or VNC not configured + * ESTALE descriptor is no longer usable; should be closed + * EDEADLK Called from within a callback procedure + --------------------------------------------*/ + +typedef enum +{ + RFAPI_REGISTER_ADD, + RFAPI_REGISTER_WITHDRAW, + RFAPI_REGISTER_KILL +} rfapi_register_action; + +extern int +rfapi_register ( + rfapi_handle rfd, + struct rfapi_ip_prefix *prefix, + uint32_t lifetime, + struct rfapi_un_option *options_un, + struct rfapi_vn_option *options_vn, + rfapi_register_action action); + +/*********************************************************************** + * Helper / Utility functions + ***********************************************************************/ + +/*------------------------------------------ + * rfapi_get_vn_addr + * + * Get the virtual network address used by an NVE based on it's RFD + * + * input: + * rfd: rfapi descriptor returned by rfapi_open or rfapi_create_generic + * + * output: + * + * return value: + * vn NVE virtual network address + *------------------------------------------*/ +extern struct rfapi_ip_addr * +rfapi_get_vn_addr (void *); + +/*------------------------------------------ + * rfapi_get_un_addr + * + * Get the underlay network address used by an NVE based on it's RFD + * + * input: + * rfd: rfapi descriptor returned by rfapi_open or rfapi_create_generic + * + * output: + * + * return value: + * un NVE underlay network address + *------------------------------------------*/ +extern struct rfapi_ip_addr * +rfapi_get_un_addr (void *); + +/*------------------------------------------ + * rfapi_error_str + * + * Returns a string describing the rfapi error code. + * + * input: + * + * code Error code returned by rfapi function + * + * returns: + * + * const char * String + *------------------------------------------*/ +extern const char * +rfapi_error_str (int code); + +/*------------------------------------------ + * rfapi_get_rfp_start_val + * + * Returns value passed to rfapi on rfp_start + * + * input: + * void * bgp structure + * + * returns: + * void * + *------------------------------------------*/ +extern void * +rfapi_get_rfp_start_val (void *bgpv); + +/*------------------------------------------ + * rfapi_compare_rfds + * + * Compare two generic rfapi descriptors. + * + * input: + * rfd1: rfapi descriptor returned by rfapi_open or rfapi_create_generic + * rfd2: rfapi descriptor returned by rfapi_open or rfapi_create_generic + * + * output: + * + * return value: + * 0 Mismatch + * 1 Match + *------------------------------------------*/ +extern int +rfapi_compare_rfds (void *rfd1, void *rfd2); + +/*------------------------------------------ + * rfapi_free_next_hop_list + * + * Frees a next_hop_list returned by a rfapi_query invocation + * + * input: + * list: a pointer to a response list (as a + * struct rfapi_next_hop_entry) to free. + * + * output: + * + * return value: None + --------------------------------------------*/ +extern void +rfapi_free_next_hop_list (struct rfapi_next_hop_entry *list); + +/*------------------------------------------ + * rfapi_get_response_lifetime_default + * + * Returns the default lifetime for a response. + * rfp_start_val value returned by rfp_start or + * NULL (=use default instance) + * + * input: + * None + * + * output: + * + * return value: The bgp instance default lifetime for a response. + --------------------------------------------*/ +extern int +rfapi_get_response_lifetime_default (void *rfp_start_val); + +/*------------------------------------------ + * rfapi_is_vnc_configured + * + * Returns if VNC (BGP VPN messaging /VPN & encap SAFIs) are configured + * + * input: + * rfp_start_val value returned by rfp_start or + * NULL (=use default instance) + * + * output: + * + * return value: If VNC is configured for the bgpd instance + * 0 Success + * ENXIO VNC not configured + --------------------------------------------*/ +extern int +rfapi_is_vnc_configured (void *rfp_start_val); + +/*------------------------------------------ + * rfapi_bgp_lookup_by_rfp + * + * Find bgp instance pointer based on value returned by rfp_start + * + * input: + * rfp_start_val value returned by rfp_startor + * NULL (=get default instance) + * + * output: + * none + * + * return value: + * bgp bgp instance pointer + * NULL = not found + * + --------------------------------------------*/ +extern struct bgp * +rfapi_bgp_lookup_by_rfp (void *rfp_start_val); + +/*------------------------------------------ + * rfapi_get_rfp_start_val_by_bgp + * + * Find bgp instance pointer based on value returned by rfp_start + * + * input: + * bgp bgp instance pointer + * + * output: + * none + * + * return value: + * rfp_start_val + * NULL = not found + * + --------------------------------------------*/ +extern void * +rfapi_get_rfp_start_val_by_bgp (struct bgp *bgp); + +#endif /* ENABLE_BGP_VNC */ + +#endif /* _QUAGGA_BGP_RFAPI_H */ diff --git a/bgpd/rfapi/rfapi_ap.c b/bgpd/rfapi/rfapi_ap.c new file mode 100644 index 0000000000..b0d5ab35c9 --- /dev/null +++ b/bgpd/rfapi/rfapi_ap.c @@ -0,0 +1,629 @@ +/* + * + * Copyright 2009-2016, LabN Consulting, L.L.C. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#include + +#include "lib/zebra.h" +#include "lib/prefix.h" +#include "lib/table.h" +#include "lib/vty.h" +#include "lib/memory.h" +#include "lib/routemap.h" +#include "lib/log.h" +#include "lib/linklist.h" +#include "lib/command.h" +#include "lib/stream.h" + +#include "bgpd/bgpd.h" +#include "bgpd/bgp_ecommunity.h" +#include "bgpd/bgp_attr.h" +#include "bgpd/bgp_mplsvpn.h" + +#include "bgpd/rfapi/bgp_rfapi_cfg.h" +#include "bgpd/rfapi/rfapi.h" +#include "bgpd/rfapi/rfapi_backend.h" + +#include "bgpd/bgp_route.h" +#include "bgpd/bgp_aspath.h" +#include "bgpd/bgp_advertise.h" + +#include "bgpd/rfapi/rfapi_import.h" +#include "bgpd/rfapi/rfapi_private.h" +#include "bgpd/rfapi/rfapi_monitor.h" +#include "bgpd/rfapi/rfapi_vty.h" +#include "bgpd/rfapi/vnc_export_bgp.h" +#include "bgpd/rfapi/vnc_export_bgp_p.h" +#include "bgpd/rfapi/vnc_zebra.h" +#include "bgpd/rfapi/vnc_import_bgp.h" +#include "bgpd/rfapi/rfapi_rib.h" + +#include "bgpd/rfapi/rfapi_ap.h" + +/* + * Per-NVE Advertised prefixes + * + * We maintain a list of prefixes advertised by each NVE. + * There are two indices: by prefix and by lifetime. + * + * BY-PREFIX skiplist + * + * key: ptr to struct prefix (when storing, point to prefix that + * is part of rfapi_adb). + * + * value: ptr to struct rfapi_adb + * + * BY-LIFETIME skiplist + * + * key: ptr to struct rfapi_adb + * value: ptr to struct rfapi_adb + * + */ + +/* + * Skiplist sort function that sorts first according to lifetime + * and then according to adb pointer value. The adb pointer + * is used to spread out the sort for adbs with the same lifetime + * and thereby make the skip list operations more efficient. + */ +static int +sl_adb_lifetime_cmp (void *adb1, void *adb2) +{ + struct rfapi_adb *a1 = adb1; + struct rfapi_adb *a2 = adb2; + + if (a1->lifetime < a2->lifetime) + return -1; + if (a1->lifetime > a2->lifetime) + return 1; + + if (a1 < a2) + return -1; + if (a1 > a2) + return 1; + + return 0; +} + + +void +rfapiApInit (struct rfapi_advertised_prefixes *ap) +{ + ap->ipN_by_prefix = skiplist_new (0, vnc_prefix_cmp, NULL); + ap->ip0_by_ether = skiplist_new (0, vnc_prefix_cmp, NULL); + ap->by_lifetime = skiplist_new (0, sl_adb_lifetime_cmp, NULL); +} + +void +rfapiApRelease (struct rfapi_advertised_prefixes *ap) +{ + struct rfapi_adb *adb; + + /* Free ADBs and lifetime items */ + while (0 == skiplist_first (ap->by_lifetime, NULL, (void **) &adb)) + { + rfapiAdbFree (adb); + skiplist_delete_first (ap->by_lifetime); + } + + while (0 == skiplist_delete_first (ap->ipN_by_prefix)); + while (0 == skiplist_delete_first (ap->ip0_by_ether)); + + /* Free lists */ + skiplist_free (ap->ipN_by_prefix); + skiplist_free (ap->ip0_by_ether); + skiplist_free (ap->by_lifetime); + + ap->ipN_by_prefix = NULL; + ap->ip0_by_ether = NULL; + ap->by_lifetime = NULL; +} + +int +rfapiApCount (struct rfapi_descriptor *rfd) +{ + if (!rfd->advertised.by_lifetime) + return 0; + + return skiplist_count (rfd->advertised.by_lifetime); +} + +int +rfapiApCountAll (struct bgp *bgp) +{ + struct rfapi *h; + struct listnode *node; + struct rfapi_descriptor *rfd; + int total = 0; + + h = bgp->rfapi; + if (h) + { + for (ALL_LIST_ELEMENTS_RO (&h->descriptors, node, rfd)) + { + total += rfapiApCount (rfd); + } + } + return total; +} + + +void +rfapiApReadvertiseAll (struct bgp *bgp, struct rfapi_descriptor *rfd) +{ + struct rfapi_adb *adb; + void *cursor; + int rc; + + for (rc = + skiplist_next (rfd->advertised.by_lifetime, NULL, (void **) &adb, + &cursor); rc == 0; + rc = + skiplist_next (rfd->advertised.by_lifetime, NULL, (void **) &adb, + &cursor)) + { + + struct prefix_rd prd; + uint32_t local_pref = rfp_cost_to_localpref (adb->cost); + + prd = rfd->rd; + prd.family = AF_UNSPEC; + prd.prefixlen = 64; + + /* + * TBD this is not quite right. When pfx_ip is 0/32 or 0/128, + * we need to substitute the VN address as the prefix + */ + add_vnc_route (rfd, bgp, SAFI_MPLS_VPN, &adb->prefix_ip, &prd, /* RD to use (0 for ENCAP) */ + &rfd->vn_addr, /* nexthop */ + &local_pref, &adb->lifetime, NULL, NULL, /* struct rfapi_un_option */ + NULL, /* struct rfapi_vn_option */ + rfd->rt_export_list, NULL, /* med */ + NULL, ZEBRA_ROUTE_BGP, BGP_ROUTE_RFP, 0); + } +} + +void +rfapiApWithdrawAll (struct bgp *bgp, struct rfapi_descriptor *rfd) +{ + struct rfapi_adb *adb; + void *cursor; + int rc; + + + cursor = NULL; + for (rc = + skiplist_next (rfd->advertised.by_lifetime, NULL, (void **) &adb, + &cursor); rc == 0; + rc = + skiplist_next (rfd->advertised.by_lifetime, NULL, (void **) &adb, + &cursor)) + { + + struct prefix pfx_vn_buf; + struct prefix *pfx_ip; + + if (!(RFAPI_0_PREFIX (&adb->prefix_ip) && + RFAPI_HOST_PREFIX (&adb->prefix_ip))) + { + + pfx_ip = &adb->prefix_ip; + + } + else + { + + pfx_ip = NULL; + + /* + * 0/32 or 0/128 => mac advertisement + */ + if (rfapiRaddr2Qprefix (&rfd->vn_addr, &pfx_vn_buf)) + { + /* + * Bad: it means we can't delete the route + */ + zlog_debug ("%s: BAD: handle has bad vn_addr: skipping", + __func__); + continue; + } + } + + del_vnc_route (rfd, rfd->peer, bgp, SAFI_MPLS_VPN, pfx_ip ? pfx_ip : &pfx_vn_buf, &adb->prd, /* RD to use (0 for ENCAP) */ + ZEBRA_ROUTE_BGP, BGP_ROUTE_RFP, NULL, 0); + } +} + +/* + * returns nonzero if tunnel readvertisement is needed, 0 otherwise + */ +static int +rfapiApAdjustLifetimeStats ( + struct rfapi_descriptor *rfd, + uint32_t *old_lifetime, /* set if removing/replacing */ + uint32_t *new_lifetime) /* set if replacing/adding */ +{ + int advertise = 0; + int find_max = 0; + int find_min = 0; + + zlog_debug ("%s: rfd=%p, pOldLife=%p, pNewLife=%p", + __func__, rfd, old_lifetime, new_lifetime); + if (old_lifetime) + zlog_debug ("%s: OldLife=%d", __func__, *old_lifetime); + if (new_lifetime) + zlog_debug ("%s: NewLife=%d", __func__, *new_lifetime); + + if (new_lifetime) + { + /* + * Adding new lifetime + */ + if (old_lifetime) + { + /* + * replacing existing lifetime + */ + + + /* old and new are same */ + if (*old_lifetime == *new_lifetime) + return 0; + + if (*old_lifetime == rfd->min_prefix_lifetime) + { + find_min = 1; + } + if (*old_lifetime == rfd->max_prefix_lifetime) + { + find_max = 1; + } + + /* no need to search if new value is at or equals min|max */ + if (*new_lifetime <= rfd->min_prefix_lifetime) + { + rfd->min_prefix_lifetime = *new_lifetime; + find_min = 0; + } + if (*new_lifetime >= rfd->max_prefix_lifetime) + { + rfd->max_prefix_lifetime = *new_lifetime; + advertise = 1; + find_max = 0; + } + + } + else + { + /* + * Just adding new lifetime + */ + if (*new_lifetime < rfd->min_prefix_lifetime) + { + rfd->min_prefix_lifetime = *new_lifetime; + } + if (*new_lifetime > rfd->max_prefix_lifetime) + { + advertise = 1; + rfd->max_prefix_lifetime = *new_lifetime; + } + + } + } + else + { + /* + * Deleting + */ + + /* + * See if the max prefix lifetime for this NVE has decreased. + * The easy optimization: track min & max; walk the table only + * if they are different. + * The general optimization: index the advertised_prefixes + * table by lifetime. + * + * Note: for a given nve_descriptor, only one of the + * advertised_prefixes[] tables will be used: viz., the + * address family that matches the VN address. + * + */ + if (rfd->max_prefix_lifetime == rfd->min_prefix_lifetime) + { + + /* + * Common case: all lifetimes are the same. Only + * thing we need to do here is check if there are + * no exported routes left. In that case, reinitialize + * the max and min values. + */ + if (!rfapiApCount (rfd)) + { + rfd->max_prefix_lifetime = 0; + rfd->min_prefix_lifetime = UINT32_MAX; + } + + + } + else + { + if (old_lifetime) + { + if (*old_lifetime == rfd->min_prefix_lifetime) + { + find_min = 1; + } + if (*old_lifetime == rfd->max_prefix_lifetime) + { + find_max = 1; + } + } + } + } + + if (find_min || find_max) + { + uint32_t min = UINT32_MAX; + uint32_t max = 0; + + struct rfapi_adb *adb_min; + struct rfapi_adb *adb_max; + + if (!skiplist_first + (rfd->advertised.by_lifetime, (void **) &adb_min, NULL) + && !skiplist_last (rfd->advertised.by_lifetime, (void **) &adb_max, + NULL)) + { + + /* + * This should always work + */ + min = adb_min->lifetime; + max = adb_max->lifetime; + + } + else + { + + void *cursor; + struct prefix *prefix; + struct rfapi_adb *adb; + int rc; + + zlog_debug ("%s: walking to find new min/max", __func__); + + cursor = NULL; + for (rc = skiplist_next (rfd->advertised.ipN_by_prefix, + (void **) &prefix, (void **) &adb, + &cursor); !rc; + rc = + skiplist_next (rfd->advertised.ipN_by_prefix, + (void **) &prefix, (void **) &adb, &cursor)) + { + + uint32_t lt = adb->lifetime; + + if (lt > max) + max = lt; + if (lt < min) + min = lt; + } + cursor = NULL; + for (rc = skiplist_next (rfd->advertised.ip0_by_ether, + (void **) &prefix, (void **) &adb, + &cursor); !rc; + rc = + skiplist_next (rfd->advertised.ip0_by_ether, (void **) &prefix, + (void **) &adb, &cursor)) + { + + uint32_t lt = adb->lifetime; + + if (lt > max) + max = lt; + if (lt < min) + min = lt; + } + } + + /* + * trigger tunnel route update + * but only if we found a VPN route and it had + * a lifetime greater than 0 + */ + if (max && rfd->max_prefix_lifetime != max) + advertise = 1; + rfd->max_prefix_lifetime = max; + rfd->min_prefix_lifetime = min; + } + + zlog_debug ("%s: returning advertise=%d, min=%d, max=%d", + __func__, advertise, rfd->min_prefix_lifetime, + rfd->max_prefix_lifetime); + + return (advertise != 0); +} + +/* + * Return Value + * + * 0 No need to advertise tunnel route + * non-0 advertise tunnel route + */ +int +rfapiApAdd ( + struct bgp *bgp, + struct rfapi_descriptor *rfd, + struct prefix *pfx_ip, + struct prefix *pfx_eth, + struct prefix_rd *prd, + uint32_t lifetime, + uint8_t cost, + struct rfapi_l2address_option *l2o) /* other options TBD */ +{ + int rc; + struct rfapi_adb *adb; + uint32_t old_lifetime = 0; + int use_ip0 = 0; + + if (RFAPI_0_PREFIX (pfx_ip) && RFAPI_HOST_PREFIX (pfx_ip)) + { + use_ip0 = 1; + assert (pfx_eth); + + rc = + skiplist_search (rfd->advertised.ip0_by_ether, pfx_eth, + (void **) &adb); + + } + else + { + + /* find prefix in advertised prefixes list */ + rc = + skiplist_search (rfd->advertised.ipN_by_prefix, pfx_ip, + (void **) &adb); + } + + + if (rc) + { + /* Not found */ + adb = XCALLOC (MTYPE_RFAPI_ADB, sizeof (struct rfapi_adb)); + assert (adb); + adb->lifetime = lifetime; + adb->prefix_ip = *pfx_ip; + if (pfx_eth) + adb->prefix_eth = *pfx_eth; + + if (use_ip0) + { + assert (pfx_eth); + skiplist_insert (rfd->advertised.ip0_by_ether, &adb->prefix_eth, + adb); + } + else + { + skiplist_insert (rfd->advertised.ipN_by_prefix, &adb->prefix_ip, + adb); + } + + skiplist_insert (rfd->advertised.by_lifetime, adb, adb); + } + else + { + old_lifetime = adb->lifetime; + if (old_lifetime != lifetime) + { + assert (!skiplist_delete (rfd->advertised.by_lifetime, adb, NULL)); + adb->lifetime = lifetime; + assert (!skiplist_insert (rfd->advertised.by_lifetime, adb, adb)); + } + + if (!use_ip0 && pfx_eth && prefix_cmp (&adb->prefix_eth, pfx_eth)) + { + /* mac address changed */ + adb->prefix_eth = *pfx_eth; + } + } + adb->cost = cost; + if (l2o) + adb->l2o = *l2o; + else + memset (&adb->l2o, 0, sizeof (struct rfapi_l2address_option)); + adb->prd = *prd; + + if (rfapiApAdjustLifetimeStats + (rfd, (rc ? NULL : &old_lifetime), &lifetime)) + return 1; + + return 0; +} + +/* + * After this function returns successfully, caller should call + * rfapiAdjustLifetimeStats() and possibly rfapiTunnelRouteAnnounce() + */ +int +rfapiApDelete ( + struct bgp *bgp, + struct rfapi_descriptor *rfd, + struct prefix *pfx_ip, + struct prefix *pfx_eth, + int *advertise_tunnel) /* out */ +{ + int rc; + struct rfapi_adb *adb; + uint32_t old_lifetime; + int use_ip0 = 0; + + if (advertise_tunnel) + *advertise_tunnel = 0; + + /* find prefix in advertised prefixes list */ + if (RFAPI_0_PREFIX (pfx_ip) && RFAPI_HOST_PREFIX (pfx_ip)) + { + use_ip0 = 1; + assert (pfx_eth); + + rc = + skiplist_search (rfd->advertised.ip0_by_ether, pfx_eth, + (void **) &adb); + + } + else + { + + /* find prefix in advertised prefixes list */ + rc = + skiplist_search (rfd->advertised.ipN_by_prefix, pfx_ip, + (void **) &adb); + } + + if (rc) + { + return ENOENT; + } + + old_lifetime = adb->lifetime; + + if (use_ip0) + { + rc = skiplist_delete (rfd->advertised.ip0_by_ether, pfx_eth, NULL); + } + else + { + rc = skiplist_delete (rfd->advertised.ipN_by_prefix, pfx_ip, NULL); + } + assert (!rc); + + rc = skiplist_delete (rfd->advertised.by_lifetime, adb, NULL); + assert (!rc); + + rfapiAdbFree (adb); + + if (rfapiApAdjustLifetimeStats (rfd, &old_lifetime, NULL)) + { + if (advertise_tunnel) + *advertise_tunnel = 1; + } + + return 0; +} diff --git a/bgpd/rfapi/rfapi_ap.h b/bgpd/rfapi/rfapi_ap.h new file mode 100644 index 0000000000..f2805f49cb --- /dev/null +++ b/bgpd/rfapi/rfapi_ap.h @@ -0,0 +1,99 @@ +/* + * + * Copyright 2009-2016, LabN Consulting, L.L.C. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ +#ifndef _QUAGGA_BGP_RFAPI_AP_H +#define _QUAGGA_BGP_RFAPI_AP_H + +/* TBD delete some of these #includes */ + +#include + +#include "lib/zebra.h" +#include "lib/prefix.h" +#include "lib/table.h" +#include "lib/vty.h" +#include "lib/memory.h" +#include "lib/routemap.h" +#include "lib/log.h" +#include "lib/linklist.h" +#include "lib/command.h" +#include "lib/stream.h" + +#include "bgpd/bgpd.h" + +#include "bgp_rfapi_cfg.h" +#include "rfapi.h" +#include "rfapi_backend.h" + +#include "bgpd/bgp_route.h" +#include "bgpd/bgp_aspath.h" +#include "bgpd/bgp_advertise.h" + +#include "rfapi_import.h" +#include "rfapi_private.h" +#include "rfapi_monitor.h" +#include "rfapi_vty.h" +#include "vnc_export_bgp.h" +#include "vnc_export_bgp_p.h" +#include "vnc_zebra.h" +#include "vnc_import_bgp.h" +#include "rfapi_rib.h" + + +extern void +rfapiApInit (struct rfapi_advertised_prefixes *ap); + +extern void +rfapiApRelease (struct rfapi_advertised_prefixes *ap); + +extern int +rfapiApCount (struct rfapi_descriptor *rfd); + + +extern int +rfapiApCountAll (struct bgp *bgp); + +extern void +rfapiApReadvertiseAll (struct bgp *bgp, struct rfapi_descriptor *rfd); + +extern void +rfapiApWithdrawAll (struct bgp *bgp, struct rfapi_descriptor *rfd); + +extern int +rfapiApAdd ( + struct bgp *bgp, + struct rfapi_descriptor *rfd, + struct prefix *pfx_ip, + struct prefix *pfx_eth, + struct prefix_rd *prd, + uint32_t lifetime, + uint8_t cost, + struct rfapi_l2address_option *l2o); /* other options TBD */ + +extern int +rfapiApDelete ( + struct bgp *bgp, + struct rfapi_descriptor *rfd, + struct prefix *pfx_ip, + struct prefix *pfx_eth, + int *advertise_tunnel); /* out */ + + +#endif /* _QUAGGA_BGP_RFAPI_AP_H */ diff --git a/bgpd/rfapi/rfapi_backend.h b/bgpd/rfapi/rfapi_backend.h new file mode 100644 index 0000000000..788ec73751 --- /dev/null +++ b/bgpd/rfapi/rfapi_backend.h @@ -0,0 +1,92 @@ +/* + * + * Copyright 2009-2016, LabN Consulting, L.L.C. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#ifndef _QUAGGA_BGP_RFAPI_BACKEND_H +#define _QUAGGA_BGP_RFAPI_BACKEND_H + +#if ENABLE_BGP_VNC + +#include "bgpd/bgp_route.h" +#include "bgpd/bgp_nexthop.h" + +extern void rfapi_init (void); +extern void vnc_zebra_init (struct thread_master *master); +extern void vnc_zebra_destroy (void); + +extern void rfapi_delete (struct bgp *); + +struct rfapi *bgp_rfapi_new (struct bgp *bgp); +void bgp_rfapi_destroy (struct bgp *bgp, struct rfapi *h); + +struct rfapi_import_table *rfapiImportTableRefAdd (struct bgp *bgp, + struct ecommunity + *rt_import_list); + +void +rfapiImportTableRefDelByIt (struct bgp *bgp, + struct rfapi_import_table *it_target); + + +extern void +rfapiProcessUpdate (struct peer *peer, + void *rfd, + struct prefix *p, + struct prefix_rd *prd, + struct attr *attr, + afi_t afi, + safi_t safi, + u_char type, u_char sub_type, uint32_t * label); + + +extern void +rfapiProcessWithdraw (struct peer *peer, + void *rfd, + struct prefix *p, + struct prefix_rd *prd, + struct attr *attr, + afi_t afi, safi_t safi, u_char type, int kill); + +extern void rfapiProcessPeerDown (struct peer *peer); + +extern void +vnc_zebra_announce (struct prefix *p, + struct bgp_info *new_select, struct bgp *bgp); + +extern void +vnc_zebra_withdraw (struct prefix *p, struct bgp_info *old_select); + + +extern void +rfapi_vty_out_vncinfo (struct vty *vty, + struct prefix *p, struct bgp_info *bi, safi_t safi); + + +extern void vnc_direct_bgp_vpn_enable (struct bgp *bgp, afi_t afi); + +extern void vnc_direct_bgp_vpn_disable (struct bgp *bgp, afi_t afi); + +extern void vnc_direct_bgp_rh_vpn_enable (struct bgp *bgp, afi_t afi); + +extern void vnc_direct_bgp_rh_vpn_disable (struct bgp *bgp, afi_t afi); + +#endif /* ENABLE_BGP_VNC */ + +#endif /* _QUAGGA_BGP_RFAPI_BACKEND_H */ diff --git a/bgpd/rfapi/rfapi_descriptor_rfp_utils.c b/bgpd/rfapi/rfapi_descriptor_rfp_utils.c new file mode 100644 index 0000000000..b2a8689881 --- /dev/null +++ b/bgpd/rfapi/rfapi_descriptor_rfp_utils.c @@ -0,0 +1,131 @@ +/* + * + * Copyright 2009-2016, LabN Consulting, L.L.C. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + + +#include + +#include "lib/zebra.h" +#include "lib/prefix.h" +#include "lib/table.h" +#include "lib/vty.h" +#include "lib/memory.h" +#include "lib/log.h" + +#include "bgpd/bgpd.h" + +#include "bgpd/rfapi/rfapi.h" +#include "bgpd/rfapi/rfapi_private.h" +#include "bgpd/rfapi/rfapi_descriptor_rfp_utils.h" + + +void * +rfapi_create_generic (struct rfapi_ip_addr *vn, struct rfapi_ip_addr *un) +{ + struct rfapi_descriptor *rfd; + rfd = XCALLOC (MTYPE_RFAPI_DESC, sizeof (struct rfapi_descriptor)); + zlog_debug ("%s: rfd=%p", __func__, rfd); + rfd->vn_addr = *vn; + rfd->un_addr = *un; + return (void *) rfd; +} + +/*------------------------------------------ + * rfapi_free_generic + * + * Compare two generic rfapi descriptors. + * + * input: + * grfd: rfapi descriptor returned by rfapi_open or rfapi_create_generic + * + * output: + * + * return value: + * + *------------------------------------------*/ +void +rfapi_free_generic (void *grfd) +{ + struct rfapi_descriptor *rfd; + rfd = (struct rfapi_descriptor *) grfd; + XFREE (MTYPE_RFAPI_DESC, rfd); +} + + +/*------------------------------------------ + * rfapi_compare_rfds + * + * Compare two generic rfapi descriptors. + * + * input: + * rfd1: rfapi descriptor returned by rfapi_open or rfapi_create_generic + * rfd2: rfapi descriptor returned by rfapi_open or rfapi_create_generic + * + * output: + * + * return value: + * 0 Mismatch + * 1 Match + *------------------------------------------*/ +int +rfapi_compare_rfds (void *rfd1, void *rfd2) +{ + struct rfapi_descriptor *rrfd1, *rrfd2; + int match = 0; + + rrfd1 = (struct rfapi_descriptor *) rfd1; + rrfd2 = (struct rfapi_descriptor *) rfd2; + + if (rrfd1->vn_addr.addr_family == rrfd2->vn_addr.addr_family) + { + if (rrfd1->vn_addr.addr_family == AF_INET) + match = IPV4_ADDR_SAME (&(rrfd1->vn_addr.addr.v4), + &(rrfd2->vn_addr.addr.v4)); + else + match = IPV6_ADDR_SAME (&(rrfd1->vn_addr.addr.v6), + &(rrfd2->vn_addr.addr.v6)); + } + + /* + * If the VN addresses don't match in all forms, + * give up. + */ + if (!match) + return 0; + + /* + * do the process again for the UN addresses. + */ + match = 0; + if (rrfd1->un_addr.addr_family == rrfd2->un_addr.addr_family) + { + /* VN addresses match + * UN address families match + * now check the actual UN addresses + */ + if (rrfd1->un_addr.addr_family == AF_INET) + match = IPV4_ADDR_SAME (&(rrfd1->un_addr.addr.v4), + &(rrfd2->un_addr.addr.v4)); + else + match = IPV6_ADDR_SAME (&(rrfd1->un_addr.addr.v6), + &(rrfd2->un_addr.addr.v6)); + } + return match; +} diff --git a/bgpd/rfapi/rfapi_descriptor_rfp_utils.h b/bgpd/rfapi/rfapi_descriptor_rfp_utils.h new file mode 100644 index 0000000000..9067cdf54b --- /dev/null +++ b/bgpd/rfapi/rfapi_descriptor_rfp_utils.h @@ -0,0 +1,39 @@ +/* + * + * Copyright 2009-2016, LabN Consulting, L.L.C. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + + +extern void *rfapi_create_generic (struct rfapi_ip_addr *vn, + struct rfapi_ip_addr *un); + +/*------------------------------------------ + * rfapi_free_generic + * + * Compare two generic rfapi descriptors. + * + * input: + * grfd: rfapi descriptor returned by rfapi_open or rfapi_create_generic + * + * output: + * + * return value: + * + *------------------------------------------*/ +extern void rfapi_free_generic (void *grfd); diff --git a/bgpd/rfapi/rfapi_encap_tlv.c b/bgpd/rfapi/rfapi_encap_tlv.c new file mode 100644 index 0000000000..0a5962ccb6 --- /dev/null +++ b/bgpd/rfapi/rfapi_encap_tlv.c @@ -0,0 +1,812 @@ +/* + * Copyright 2015-2016, LabN Consulting, L.L.C. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#include "lib/zebra.h" + +#include "lib/memory.h" +#include "lib/prefix.h" +#include "lib/table.h" +#include "lib/vty.h" + +#include "bgpd/bgpd.h" +#include "bgpd/bgp_attr.h" + +#include "bgpd/bgp_encap_types.h" +#include "bgpd/bgp_encap_tlv.h" + +#include "bgpd/rfapi/rfapi.h" +#include "bgpd/rfapi/rfapi_encap_tlv.h" +#include "bgpd/rfapi/rfapi_private.h" +#include "bgpd/rfapi/rfapi_monitor.h" +#include "bgpd/rfapi/rfapi_vty.h" +#include "bgpd/rfapi/bgp_rfapi_cfg.h" + +static void +rfapi_add_endpoint_address_to_subtlv ( + struct bgp *bgp, + struct rfapi_ip_addr *ea, + struct bgp_tea_subtlv_remote_endpoint *subtlv) +{ + subtlv->family = ea->addr_family; + if (subtlv->family == AF_INET) + subtlv->ip_address.v4 = ea->addr.v4; + else + subtlv->ip_address.v6 = ea->addr.v6; + subtlv->as4 = htonl (bgp->as); +} + +bgp_encap_types +rfapi_tunneltype_option_to_tlv ( + struct bgp *bgp, + struct rfapi_ip_addr *ea, + struct rfapi_tunneltype_option *tto, + struct attr *attr, + int always_add) +{ + +#define _RTTO_MAYBE_ADD_ENDPOINT_ADDRESS(ttype) \ + if ((always_add || (bgp->rfapi_cfg && \ + !CHECK_FLAG(bgp->rfapi_cfg->flags, \ + BGP_VNC_CONFIG_ADV_UN_METHOD_ENCAP))) && \ + ea && !CHECK_SUBTLV_FLAG(&tto->bgpinfo.ttype, \ + BGP_TEA_SUBTLV_REMOTE_ENDPOINT)) { \ + rfapi_add_endpoint_address_to_subtlv(bgp, ea, \ + &tto->bgpinfo.ttype.st_endpoint); \ + SET_SUBTLV_FLAG(&tto->bgpinfo.ttype, BGP_TEA_SUBTLV_REMOTE_ENDPOINT); \ + } + + struct rfapi_tunneltype_option dto; + if (tto == NULL) + { /* create default type */ + tto = &dto; + memset (tto, 0, sizeof (dto)); + tto->type = RFAPI_BGP_ENCAP_TYPE_DEFAULT; + } + switch (tto->type) + { + case BGP_ENCAP_TYPE_L2TPV3_OVER_IP: + _RTTO_MAYBE_ADD_ENDPOINT_ADDRESS (l2tpv3_ip); + bgp_encap_type_l2tpv3overip_to_tlv (&tto->bgpinfo.l2tpv3_ip, attr); + break; + + case BGP_ENCAP_TYPE_GRE: + _RTTO_MAYBE_ADD_ENDPOINT_ADDRESS (gre); + bgp_encap_type_gre_to_tlv (&tto->bgpinfo.gre, attr); + break; + + case BGP_ENCAP_TYPE_TRANSMIT_TUNNEL_ENDPOINT: + _RTTO_MAYBE_ADD_ENDPOINT_ADDRESS (transmit_tunnel_endpoint); + bgp_encap_type_transmit_tunnel_endpoint (&tto->bgpinfo.transmit_tunnel_endpoint, + attr); + break; + + case BGP_ENCAP_TYPE_IPSEC_IN_TUNNEL_MODE: + _RTTO_MAYBE_ADD_ENDPOINT_ADDRESS (ipsec_tunnel); + bgp_encap_type_ipsec_in_tunnel_mode_to_tlv (&tto->bgpinfo.ipsec_tunnel, + attr); + break; + + case BGP_ENCAP_TYPE_IP_IN_IP_TUNNEL_WITH_IPSEC_TRANSPORT_MODE: + _RTTO_MAYBE_ADD_ENDPOINT_ADDRESS (ip_ipsec); + bgp_encap_type_ip_in_ip_tunnel_with_ipsec_transport_mode_to_tlv + (&tto->bgpinfo.ip_ipsec, attr); + break; + + case BGP_ENCAP_TYPE_MPLS_IN_IP_TUNNEL_WITH_IPSEC_TRANSPORT_MODE: + _RTTO_MAYBE_ADD_ENDPOINT_ADDRESS (mpls_ipsec); + bgp_encap_type_mpls_in_ip_tunnel_with_ipsec_transport_mode_to_tlv + (&tto->bgpinfo.mpls_ipsec, attr); + break; + + case BGP_ENCAP_TYPE_IP_IN_IP: + _RTTO_MAYBE_ADD_ENDPOINT_ADDRESS (ip_ip); + bgp_encap_type_ip_in_ip_to_tlv (&tto->bgpinfo.ip_ip, attr); + break; + + case BGP_ENCAP_TYPE_VXLAN: + _RTTO_MAYBE_ADD_ENDPOINT_ADDRESS (vxlan); + bgp_encap_type_vxlan_to_tlv (&tto->bgpinfo.vxlan, attr); + break; + + case BGP_ENCAP_TYPE_NVGRE: + _RTTO_MAYBE_ADD_ENDPOINT_ADDRESS (nvgre); + bgp_encap_type_nvgre_to_tlv (&tto->bgpinfo.nvgre, attr); + break; + + case BGP_ENCAP_TYPE_MPLS: + _RTTO_MAYBE_ADD_ENDPOINT_ADDRESS (mpls); + bgp_encap_type_mpls_to_tlv (&tto->bgpinfo.mpls, attr); + break; + + case BGP_ENCAP_TYPE_MPLS_IN_GRE: + _RTTO_MAYBE_ADD_ENDPOINT_ADDRESS (mpls_gre); + bgp_encap_type_mpls_in_gre_to_tlv (&tto->bgpinfo.mpls_gre, attr); + break; + + case BGP_ENCAP_TYPE_VXLAN_GPE: + _RTTO_MAYBE_ADD_ENDPOINT_ADDRESS (vxlan_gpe); + bgp_encap_type_vxlan_gpe_to_tlv (&tto->bgpinfo.vxlan_gpe, attr); + break; + + case BGP_ENCAP_TYPE_MPLS_IN_UDP: + _RTTO_MAYBE_ADD_ENDPOINT_ADDRESS (mpls_udp); + bgp_encap_type_mpls_in_udp_to_tlv (&tto->bgpinfo.mpls_udp, attr); + break; + + case BGP_ENCAP_TYPE_PBB: + _RTTO_MAYBE_ADD_ENDPOINT_ADDRESS (pbb); + bgp_encap_type_pbb_to_tlv (&tto->bgpinfo.pbb, attr); + break; + + default: + assert (0); + } + return tto->type; +} + +struct rfapi_un_option * +rfapi_encap_tlv_to_un_option (struct attr *attr) +{ + struct attr_extra *attre = attr->extra; + struct rfapi_un_option *uo = NULL; + struct rfapi_tunneltype_option *tto; + int rc; + struct bgp_attr_encap_subtlv *stlv; + + if (!attre) + return NULL; + + /* no tunnel encap attr stored */ + if (!attre->encap_tunneltype) + return NULL; + + stlv = attre->encap_subtlvs; + + uo = XCALLOC (MTYPE_RFAPI_UN_OPTION, sizeof (struct rfapi_un_option)); + assert (uo); + uo->type = RFAPI_UN_OPTION_TYPE_TUNNELTYPE; + uo->v.tunnel.type = attre->encap_tunneltype; + tto = &uo->v.tunnel; + + switch (attre->encap_tunneltype) + { + case BGP_ENCAP_TYPE_L2TPV3_OVER_IP: + rc = tlv_to_bgp_encap_type_l2tpv3overip (stlv, &tto->bgpinfo.l2tpv3_ip); + break; + + case BGP_ENCAP_TYPE_GRE: + rc = tlv_to_bgp_encap_type_gre (stlv, &tto->bgpinfo.gre); + break; + + case BGP_ENCAP_TYPE_TRANSMIT_TUNNEL_ENDPOINT: + rc = tlv_to_bgp_encap_type_transmit_tunnel_endpoint (stlv, + &tto->bgpinfo.transmit_tunnel_endpoint); + break; + + case BGP_ENCAP_TYPE_IPSEC_IN_TUNNEL_MODE: + rc = tlv_to_bgp_encap_type_ipsec_in_tunnel_mode (stlv, + &tto->bgpinfo.ipsec_tunnel); + break; + + case BGP_ENCAP_TYPE_IP_IN_IP_TUNNEL_WITH_IPSEC_TRANSPORT_MODE: + rc = + tlv_to_bgp_encap_type_ip_in_ip_tunnel_with_ipsec_transport_mode (stlv, + &tto->bgpinfo.ip_ipsec); + break; + + case BGP_ENCAP_TYPE_MPLS_IN_IP_TUNNEL_WITH_IPSEC_TRANSPORT_MODE: + rc = + tlv_to_bgp_encap_type_mpls_in_ip_tunnel_with_ipsec_transport_mode + (stlv, &tto->bgpinfo.mpls_ipsec); + break; + + case BGP_ENCAP_TYPE_IP_IN_IP: + rc = tlv_to_bgp_encap_type_ip_in_ip (stlv, &tto->bgpinfo.ip_ip); + break; + + case BGP_ENCAP_TYPE_VXLAN: + rc = tlv_to_bgp_encap_type_vxlan (stlv, &tto->bgpinfo.vxlan); + break; + + case BGP_ENCAP_TYPE_NVGRE: + rc = tlv_to_bgp_encap_type_nvgre (stlv, &tto->bgpinfo.nvgre); + break; + + case BGP_ENCAP_TYPE_MPLS: + rc = tlv_to_bgp_encap_type_mpls (stlv, &tto->bgpinfo.mpls); + break; + + case BGP_ENCAP_TYPE_MPLS_IN_GRE: + rc = tlv_to_bgp_encap_type_mpls_in_gre (stlv, &tto->bgpinfo.mpls_gre); + break; + + case BGP_ENCAP_TYPE_VXLAN_GPE: + rc = tlv_to_bgp_encap_type_vxlan_gpe (stlv, &tto->bgpinfo.vxlan_gpe); + break; + + case BGP_ENCAP_TYPE_MPLS_IN_UDP: + rc = tlv_to_bgp_encap_type_mpls_in_udp (stlv, &tto->bgpinfo.mpls_udp); + break; + + case BGP_ENCAP_TYPE_PBB: + rc = tlv_to_bgp_encap_type_pbb (stlv, &tto->bgpinfo.pbb); + break; + + default: + zlog_debug ("%s: unknown tunnel type %d", + __func__, attre->encap_tunneltype); + rc = -1; + break; + } + if (rc) + { + XFREE (MTYPE_RFAPI_UN_OPTION, uo); + uo = NULL; + } + return uo; +} + +/*********************************************************************** + * SUBTLV PRINT + ***********************************************************************/ + +static void +subtlv_print_encap_l2tpv3_over_ip ( + void *stream, + int column_offset, + struct bgp_tea_subtlv_encap_l2tpv3_over_ip *st) +{ + int (*fp) (void *, const char *, ...); + struct vty *vty; + void *out; + const char *vty_newline; + + if (rfapiStream2Vty (stream, &fp, &vty, &out, &vty_newline) == 0) + return; + if (!st) + return; + + fp (out, "%*s%s%s", column_offset, "", "SubTLV: Encap(L2TPv3 over IP)", + vty_newline); + fp (out, "%*s SessionID: %d%s", column_offset, "", st->sessionid, + vty_newline); + fp (out, "%*s Cookie: (length %d)%s", column_offset, "", st->cookie_length, + vty_newline); +} + +static void +subtlv_print_encap_gre ( + void *stream, + int column_offset, + struct bgp_tea_subtlv_encap_gre_key *st) +{ + int (*fp) (void *, const char *, ...); + struct vty *vty; + void *out; + const char *vty_newline; + + if (rfapiStream2Vty (stream, &fp, &vty, &out, &vty_newline) == 0) + return; + if (!st) + return; + + fp (out, "%*s%s%s", column_offset, "", "SubTLV: Encap(GRE)", vty_newline); + fp (out, "%*s GRE key: %d (0x%x)%s", column_offset, "", st->gre_key, + st->gre_key, vty_newline); +} + +static void +subtlv_print_encap_pbb ( + void *stream, + int column_offset, + struct bgp_tea_subtlv_encap_pbb *st) +{ + int (*fp) (void *, const char *, ...); + struct vty *vty; + void *out; + const char *vty_newline; + + if (rfapiStream2Vty (stream, &fp, &vty, &out, &vty_newline) == 0) + return; + if (!st) + return; + + fp (out, "%*s%s%s", column_offset, "", "SubTLV: Encap(PBB)", vty_newline); + if (st->flag_isid) + { + fp (out, "%*s ISID: %d (0x%x)%s", column_offset, "", st->isid, + st->isid, vty_newline); + } + if (st->flag_vid) + { + fp (out, "%*s VID: %d (0x%x)%s", column_offset, "", st->vid, st->vid, + vty_newline); + } + fp (out, "%*s MACADDR %02x:%02x:%02x:%02x:%02x:%02x%s", + column_offset, "", + st->macaddr[0], + st->macaddr[1], + st->macaddr[2], + st->macaddr[3], st->macaddr[4], st->macaddr[5], vty_newline); +} + +static void +subtlv_print_proto_type ( + void *stream, + int column_offset, + struct bgp_tea_subtlv_proto_type *st) +{ + int (*fp) (void *, const char *, ...); + struct vty *vty; + void *out; + const char *vty_newline; + + if (rfapiStream2Vty (stream, &fp, &vty, &out, &vty_newline) == 0) + return; + if (!st) + return; + + fp (out, "%*s%s%s", column_offset, "", "SubTLV: Encap(Proto Type)", + vty_newline); + fp (out, "%*s Proto %d (0x%x)%s", column_offset, "", st->proto, st->proto, + vty_newline); +} + +static void +subtlv_print_color ( + void *stream, + int column_offset, + struct bgp_tea_subtlv_color *st) +{ + int (*fp) (void *, const char *, ...); + struct vty *vty; + void *out; + const char *vty_newline; + + if (rfapiStream2Vty (stream, &fp, &vty, &out, &vty_newline) == 0) + return; + if (!st) + return; + + fp (out, "%*s%s%s", column_offset, "", "SubTLV: Color", vty_newline); + fp (out, "%*s Color: %d (0x%x)", column_offset, "", st->color, st->color, + vty_newline); +} + +static void +subtlv_print_ipsec_ta ( + void *stream, + int column_offset, + struct bgp_tea_subtlv_ipsec_ta *st) +{ + int (*fp) (void *, const char *, ...); + struct vty *vty; + void *out; + const char *vty_newline; + + if (rfapiStream2Vty (stream, &fp, &vty, &out, &vty_newline) == 0) + return; + if (!st) + return; + + fp (out, "%*s%s%s", column_offset, "", "SubTLV: IPSEC TA", vty_newline); + fp (out, "%*s Authenticator Type: %d (0x%x)", column_offset, "", + st->authenticator_type, st->authenticator_type, vty_newline); + fp (out, "%*s Authenticator: (length %d)", column_offset, "", + st->authenticator_length, vty_newline); +} + +/*********************************************************************** + * TLV PRINT + ***********************************************************************/ + +static void +print_encap_type_l2tpv3overip ( + void *stream, + int column_offset, + struct bgp_encap_type_l2tpv3_over_ip *bet) +{ + const char *type = "L2TPv3 over IP"; + int (*fp) (void *, const char *, ...); + struct vty *vty; + void *out; + const char *vty_newline; + + if (rfapiStream2Vty (stream, &fp, &vty, &out, &vty_newline) == 0) + return; + if (!bet) + return; + + fp (out, "%*sTEA type %s%s", column_offset, "", type, vty_newline); + + subtlv_print_encap_l2tpv3_over_ip (stream, column_offset + 2, + &bet->st_encap); + subtlv_print_proto_type (stream, column_offset + 2, &bet->st_proto); + subtlv_print_color (stream, column_offset + 2, &bet->st_color); +} + +static void +print_encap_type_gre ( + void *stream, + int column_offset, + struct bgp_encap_type_gre *bet) +{ + const char *type = "GRE"; + int (*fp) (void *, const char *, ...); + struct vty *vty; + void *out; + const char *vty_newline; + + if (rfapiStream2Vty (stream, &fp, &vty, &out, &vty_newline) == 0) + return; + if (!bet) + return; + + fp (out, "%*sTEA type %s%s", column_offset, "", type, vty_newline); + + subtlv_print_encap_gre (stream, column_offset + 2, &bet->st_encap); + subtlv_print_proto_type (stream, column_offset + 2, &bet->st_proto); + subtlv_print_color (stream, column_offset + 2, &bet->st_color); +} + +static void +print_encap_type_ip_in_ip ( + void *stream, + int column_offset, + struct bgp_encap_type_ip_in_ip *bet) +{ + const char *type = "IP in IP"; + int (*fp) (void *, const char *, ...); + struct vty *vty; + void *out; + const char *vty_newline; + + if (rfapiStream2Vty (stream, &fp, &vty, &out, &vty_newline) == 0) + return; + if (!bet) + return; + + fp (out, "%*sTEA type %s%s", column_offset, "", type, vty_newline); + + subtlv_print_proto_type (stream, column_offset + 2, &bet->st_proto); + subtlv_print_color (stream, column_offset + 2, &bet->st_color); +} + +static void +print_encap_type_transmit_tunnel_endpoint ( + void *stream, + int column_offset, + struct bgp_encap_type_transmit_tunnel_endpoint *bet) +{ + const char *type = "Transmit Tunnel Endpoint"; + int (*fp) (void *, const char *, ...); + struct vty *vty; + void *out; + const char *vty_newline; + + if (rfapiStream2Vty (stream, &fp, &vty, &out, &vty_newline) == 0) + return; + if (!bet) + return; + + fp (out, "%*sTEA type %s%s", column_offset, "", type, vty_newline); + + /* no subtlvs for this type */ +} + +static void +print_encap_type_ipsec_in_tunnel_mode ( + void *stream, + int column_offset, + struct bgp_encap_type_ipsec_in_tunnel_mode *bet) +{ + const char *type = "IPSEC in Tunnel mode"; + int (*fp) (void *, const char *, ...); + struct vty *vty; + void *out; + const char *vty_newline; + + if (rfapiStream2Vty (stream, &fp, &vty, &out, &vty_newline) == 0) + return; + if (!bet) + return; + + fp (out, "%*sTEA type %s%s", column_offset, "", type, vty_newline); + subtlv_print_ipsec_ta (stream, column_offset + 2, &bet->st_ipsec_ta); +} + +static void +print_encap_type_ip_in_ip_tunnel_with_ipsec_transport_mode ( + void *stream, + int column_offset, + struct bgp_encap_type_ip_in_ip_tunnel_with_ipsec_transport_mode *bet) +{ + const char *type = "IP in IP Tunnel with IPSEC transport mode"; + int (*fp) (void *, const char *, ...); + struct vty *vty; + void *out; + const char *vty_newline; + + if (rfapiStream2Vty (stream, &fp, &vty, &out, &vty_newline) == 0) + return; + if (!bet) + return; + + fp (out, "%*sTEA type %s%s", column_offset, "", type, vty_newline); + + subtlv_print_ipsec_ta (stream, column_offset + 2, &bet->st_ipsec_ta); +} + +static void +print_encap_type_mpls_in_ip_tunnel_with_ipsec_transport_mode ( + void *stream, + int column_offset, + struct bgp_encap_type_mpls_in_ip_tunnel_with_ipsec_transport_mode *bet) +{ + const char *type = "MPLS in IP Tunnel with IPSEC transport mode"; + int (*fp) (void *, const char *, ...); + struct vty *vty; + void *out; + const char *vty_newline; + + if (rfapiStream2Vty (stream, &fp, &vty, &out, &vty_newline) == 0) + return; + if (!bet) + return; + + fp (out, "%*sTEA type %s%s", column_offset, "", type, vty_newline); + + subtlv_print_ipsec_ta (stream, column_offset + 2, &bet->st_ipsec_ta); +} + + +static void +print_encap_type_pbb ( + void *stream, + int column_offset, + struct bgp_encap_type_pbb *bet) +{ + const char *type = "PBB"; + int (*fp) (void *, const char *, ...); + struct vty *vty; + void *out; + const char *vty_newline; + + if (rfapiStream2Vty (stream, &fp, &vty, &out, &vty_newline) == 0) + return; + if (!bet) + return; + + fp (out, "%*sTEA type %s%s", column_offset, "", type, vty_newline); + + subtlv_print_encap_pbb (stream, column_offset + 2, &bet->st_encap); +} + + +static void +print_encap_type_vxlan ( + void *stream, + int column_offset, + struct bgp_encap_type_vxlan *bet) +{ + const char *type = "VXLAN"; + int (*fp) (void *, const char *, ...); + struct vty *vty; + void *out; + const char *vty_newline; + + if (rfapiStream2Vty (stream, &fp, &vty, &out, &vty_newline) == 0) + return; + if (!bet) + return; + + fp (out, "%*sTEA type %s%s", column_offset, "", type, vty_newline); + + /* no subtlvs for this type */ +} + + +static void +print_encap_type_nvgre ( + void *stream, + int column_offset, + struct bgp_encap_type_nvgre *bet) +{ + const char *type = "NVGRE"; + int (*fp) (void *, const char *, ...); + struct vty *vty; + void *out; + const char *vty_newline; + + if (rfapiStream2Vty (stream, &fp, &vty, &out, &vty_newline) == 0) + return; + if (!bet) + return; + + fp (out, "%*sTEA type %s%s", column_offset, "", type, vty_newline); + + /* no subtlvs for this type */ +} + +static void +print_encap_type_mpls ( + void *stream, + int column_offset, + struct bgp_encap_type_mpls *bet) +{ + const char *type = "MPLS"; + int (*fp) (void *, const char *, ...); + struct vty *vty; + void *out; + const char *vty_newline; + + if (rfapiStream2Vty (stream, &fp, &vty, &out, &vty_newline) == 0) + return; + if (!bet) + return; + + fp (out, "%*sTEA type %s%s", column_offset, "", type, vty_newline); + + /* no subtlvs for this type */ +} + +static void +print_encap_type_mpls_in_gre ( + void *stream, + int column_offset, + struct bgp_encap_type_mpls_in_gre *bet) +{ + const char *type = "MPLS in GRE"; + int (*fp) (void *, const char *, ...); + struct vty *vty; + void *out; + const char *vty_newline; + + if (rfapiStream2Vty (stream, &fp, &vty, &out, &vty_newline) == 0) + return; + if (!bet) + return; + + fp (out, "%*sTEA type %s%s", column_offset, "", type, vty_newline); + + /* no subtlvs for this type */ +} + +static void +print_encap_type_vxlan_gpe ( + void *stream, + int column_offset, + struct bgp_encap_type_vxlan_gpe *bet) +{ + const char *type = "VXLAN GPE"; + int (*fp) (void *, const char *, ...); + struct vty *vty; + void *out; + const char *vty_newline; + + if (rfapiStream2Vty (stream, &fp, &vty, &out, &vty_newline) == 0) + return; + if (!bet) + return; + + fp (out, "%*sTEA type %s%s", column_offset, "", type, vty_newline); + + /* no subtlvs for this type */ +} + +static void +print_encap_type_mpls_in_udp ( + void *stream, + int column_offset, + struct bgp_encap_type_mpls_in_udp *bet) +{ + const char *type = "MPLS in UDP"; + int (*fp) (void *, const char *, ...); + struct vty *vty; + void *out; + const char *vty_newline; + + if (rfapiStream2Vty (stream, &fp, &vty, &out, &vty_newline) == 0) + return; + if (!bet) + return; + + fp (out, "%*sTEA type %s%s", column_offset, "", type, vty_newline); + + /* no subtlvs for this type */ +} + +void +rfapi_print_tunneltype_option ( + void *stream, + int column_offset, + struct rfapi_tunneltype_option *tto) +{ + switch (tto->type) + { + case BGP_ENCAP_TYPE_L2TPV3_OVER_IP: + print_encap_type_l2tpv3overip (stream, column_offset, + &tto->bgpinfo.l2tpv3_ip); + break; + + case BGP_ENCAP_TYPE_GRE: + print_encap_type_gre (stream, column_offset, &tto->bgpinfo.gre); + break; + + case BGP_ENCAP_TYPE_TRANSMIT_TUNNEL_ENDPOINT: + print_encap_type_transmit_tunnel_endpoint (stream, column_offset, + &tto->bgpinfo.transmit_tunnel_endpoint); + break; + + case BGP_ENCAP_TYPE_IPSEC_IN_TUNNEL_MODE: + print_encap_type_ipsec_in_tunnel_mode (stream, column_offset, + &tto->bgpinfo.ipsec_tunnel); + break; + + case BGP_ENCAP_TYPE_IP_IN_IP_TUNNEL_WITH_IPSEC_TRANSPORT_MODE: + print_encap_type_ip_in_ip_tunnel_with_ipsec_transport_mode (stream, + column_offset, + &tto->bgpinfo.ip_ipsec); + break; + + case BGP_ENCAP_TYPE_MPLS_IN_IP_TUNNEL_WITH_IPSEC_TRANSPORT_MODE: + print_encap_type_mpls_in_ip_tunnel_with_ipsec_transport_mode (stream, + column_offset, + &tto->bgpinfo.mpls_ipsec); + break; + + case BGP_ENCAP_TYPE_IP_IN_IP: + print_encap_type_ip_in_ip (stream, column_offset, &tto->bgpinfo.ip_ip); + break; + + case BGP_ENCAP_TYPE_VXLAN: + print_encap_type_vxlan (stream, column_offset, &tto->bgpinfo.vxlan); + break; + + case BGP_ENCAP_TYPE_NVGRE: + print_encap_type_nvgre (stream, column_offset, &tto->bgpinfo.nvgre); + break; + + case BGP_ENCAP_TYPE_MPLS: + print_encap_type_mpls (stream, column_offset, &tto->bgpinfo.mpls); + break; + + case BGP_ENCAP_TYPE_MPLS_IN_GRE: + print_encap_type_mpls_in_gre (stream, column_offset, + &tto->bgpinfo.mpls_gre); + break; + + case BGP_ENCAP_TYPE_VXLAN_GPE: + print_encap_type_vxlan_gpe (stream, column_offset, + &tto->bgpinfo.vxlan_gpe); + break; + + case BGP_ENCAP_TYPE_MPLS_IN_UDP: + print_encap_type_mpls_in_udp (stream, column_offset, + &tto->bgpinfo.mpls_udp); + break; + + case BGP_ENCAP_TYPE_PBB: + print_encap_type_pbb (stream, column_offset, &tto->bgpinfo.pbb); + break; + + default: + assert (0); + } +} diff --git a/bgpd/rfapi/rfapi_encap_tlv.h b/bgpd/rfapi/rfapi_encap_tlv.h new file mode 100644 index 0000000000..9678655a69 --- /dev/null +++ b/bgpd/rfapi/rfapi_encap_tlv.h @@ -0,0 +1,43 @@ +/* + * Copyright 2015-2016, LabN Consulting, L.L.C. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#ifndef _QUAGGA_BGP_RFAPI_ENCAP_TLV_H +#define _QUAGGA_BGP_RFAPI_ENCAP_TLV_H + +#define RFAPI_BGP_ENCAP_TYPE_DEFAULT BGP_ENCAP_TYPE_IP_IN_IP + +extern bgp_encap_types +rfapi_tunneltype_option_to_tlv ( + struct bgp *bgp, + struct rfapi_ip_addr *ea, + struct rfapi_tunneltype_option *tto, + struct attr *attr, + int always_add); + +extern struct rfapi_un_option * +rfapi_encap_tlv_to_un_option (struct attr *attr); + +extern void +rfapi_print_tunneltype_option ( + void *stream, + int column_offset, + struct rfapi_tunneltype_option *tto); + + +#endif /* _QUAGGA_BGP_RFAPI_ENCAP_TLV_H */ diff --git a/bgpd/rfapi/rfapi_import.c b/bgpd/rfapi/rfapi_import.c new file mode 100644 index 0000000000..8783024f16 --- /dev/null +++ b/bgpd/rfapi/rfapi_import.c @@ -0,0 +1,5150 @@ +/* + * + * Copyright 2009-2016, LabN Consulting, L.L.C. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +/* + * File: rfapi_import.c + * Purpose: Handle import of routes from BGP to RFAPI + */ + +#include + +#include "lib/zebra.h" +#include "lib/prefix.h" +#include "lib/table.h" +#include "lib/vty.h" +#include "lib/memory.h" +#include "lib/log.h" +#include "lib/skiplist.h" +#include "lib/thread.h" + +#include "bgpd/bgpd.h" +#include "bgpd/bgp_ecommunity.h" +#include "bgpd/bgp_attr.h" +#include "bgpd/bgp_route.h" +#include "bgpd/bgp_mplsvpn.h" /* prefix_rd2str() */ +#include "bgpd/bgp_vnc_types.h" + +#include "bgpd/rfapi/rfapi.h" +#include "bgpd/rfapi/bgp_rfapi_cfg.h" +#include "bgpd/rfapi/rfapi_backend.h" +#include "bgpd/rfapi/rfapi_import.h" +#include "bgpd/rfapi/rfapi_private.h" +#include "bgpd/rfapi/rfapi_monitor.h" +#include "bgpd/rfapi/rfapi_nve_addr.h" +#include "bgpd/rfapi/rfapi_vty.h" +#include "bgpd/rfapi/vnc_export_bgp.h" +#include "bgpd/rfapi/vnc_export_bgp_p.h" +#include "bgpd/rfapi/vnc_zebra.h" +#include "bgpd/rfapi/vnc_import_bgp.h" +#include "bgpd/rfapi/vnc_import_bgp_p.h" +#include "bgpd/rfapi/rfapi_rib.h" +#include "bgpd/rfapi/rfapi_encap_tlv.h" +#include "bgpd/rfapi/vnc_debug.h" + +#ifdef HAVE_GLIBC_BACKTRACE +/* for backtrace and friends */ +#include +#endif /* HAVE_GLIBC_BACKTRACE */ + +#undef DEBUG_MONITOR_MOVE_SHORTER +#undef DEBUG_RETURNED_NHL +#undef DEBUG_ROUTE_COUNTERS +#undef DEBUG_ENCAP_MONITOR +#undef DEBUG_L2_EXTRA +#undef DEBUG_IT_NODES +#undef DEBUG_BI_SEARCH + +/* + * Allocated for each withdraw timer instance; freed when the timer + * expires or is canceled + */ +struct rfapi_withdraw +{ + struct rfapi_import_table *import_table; + struct route_node *node; + struct bgp_info *info; + safi_t safi; /* used only for bulk operations */ + /* + * For import table node reference count checking (i.e., debugging). + * Normally when a timer expires, lockoffset should be 0. However, if + * the timer expiration function is called directly (e.g., + * rfapiExpireVpnNow), the node could be locked by a preceding + * route_top() or route_next() in a loop, so we need to pass this + * value in. + */ + int lockoffset; +}; + +/* + * DEBUG FUNCTION + * It's evil and fiendish. It's compiler-dependent. + * ? Might need LDFLAGS -rdynamic to produce all function names + */ +void +rfapiDebugBacktrace (void) +{ +#ifdef HAVE_GLIBC_BACKTRACE +#define RFAPI_DEBUG_BACKTRACE_NENTRIES 200 + void *buf[RFAPI_DEBUG_BACKTRACE_NENTRIES]; + char **syms; + size_t i; + size_t size; + + size = backtrace (buf, RFAPI_DEBUG_BACKTRACE_NENTRIES); + syms = backtrace_symbols (buf, size); + + for (i = 0; i < size && i < RFAPI_DEBUG_BACKTRACE_NENTRIES; ++i) + { + zlog_debug ("backtrace[%2lu]: %s", i, syms[i]); + } + + free (syms); +#else +#endif +} + +/* + * DEBUG FUNCTION + * Count remote routes and compare with actively-maintained values. + * Abort if they disagree. + */ +void +rfapiCheckRouteCount () +{ + struct bgp *bgp = bgp_get_default (); + struct rfapi *h; + struct rfapi_import_table *it; + afi_t afi; + + assert (bgp); + + h = bgp->rfapi; + assert (h); + + for (it = h->imports; it; it = it->next) + { + for (afi = AFI_IP; afi < AFI_MAX; ++afi) + { + + struct route_table *rt; + struct route_node *rn; + + int holddown_count = 0; + int local_count = 0; + int imported_count = 0; + int remote_count = 0; + + rt = it->imported_vpn[afi]; + + for (rn = route_top (rt); rn; rn = route_next (rn)) + { + struct bgp_info *bi; + struct bgp_info *next; + + for (bi = rn->info; bi; bi = next) + { + next = bi->next; + + if (CHECK_FLAG (bi->flags, BGP_INFO_REMOVED)) + { + ++holddown_count; + + } + else + { + if (RFAPI_LOCAL_BI (bi)) + { + ++local_count; + } + else + { + if (RFAPI_DIRECT_IMPORT_BI (bi)) + { + ++imported_count; + } + else + { + ++remote_count; + } + } + } + } + } + + if (it->holddown_count[afi] != holddown_count) + { + zlog_debug ("%s: it->holddown_count %d != holddown_count %d", + __func__, it->holddown_count[afi], holddown_count); + assert (0); + } + if (it->remote_count[afi] != remote_count) + { + zlog_debug ("%s: it->remote_count %d != remote_count %d", + __func__, it->remote_count[afi], remote_count); + assert (0); + } + if (it->imported_count[afi] != imported_count) + { + zlog_debug ("%s: it->imported_count %d != imported_count %d", + __func__, it->imported_count[afi], imported_count); + assert (0); + } + } + } +} + +#if DEBUG_ROUTE_COUNTERS +#define VNC_ITRCCK do {rfapiCheckRouteCount();} while (0) +#else +#define VNC_ITRCCK +#endif + +/* + * Validate reference count for a node in an import table + * + * Normally lockoffset is 0 for nodes in quiescent state. However, + * route_unlock_node will delete the node if it is called when + * node->lock == 1, and we have to validate the refcount before + * the node is deleted. In this case, we specify lockoffset 1. + */ +void +rfapiCheckRefcount (struct route_node *rn, safi_t safi, int lockoffset) +{ + unsigned int count_bi = 0; + unsigned int count_monitor = 0; + struct bgp_info *bi; + struct rfapi_monitor_encap *hme; + struct rfapi_monitor_vpn *hmv; + + for (bi = rn->info; bi; bi = bi->next) + ++count_bi; + + + if (rn->aggregate) + { + ++count_monitor; /* rfapi_it_extra */ + + switch (safi) + { + void *cursor; + int rc; + + case SAFI_ENCAP: + for (hme = RFAPI_MONITOR_ENCAP (rn); hme; hme = hme->next) + ++count_monitor; + break; + + case SAFI_MPLS_VPN: + + for (hmv = RFAPI_MONITOR_VPN (rn); hmv; hmv = hmv->next) + ++count_monitor; + + if (RFAPI_MONITOR_EXTERIOR (rn)->source) + { + ++count_monitor; /* sl */ + cursor = NULL; + for (rc = skiplist_next (RFAPI_MONITOR_EXTERIOR (rn)->source, + NULL, NULL, &cursor); + !rc; + rc = skiplist_next (RFAPI_MONITOR_EXTERIOR (rn)->source, + NULL, NULL, &cursor)) + { + + ++count_monitor; /* sl entry */ + } + } + break; + + default: + assert (0); + } + } + + if (count_bi + count_monitor + lockoffset != rn->lock) + { + zlog_debug + ("%s: count_bi=%d, count_monitor=%d, lockoffset=%d, rn->lock=%d", + __func__, count_bi, count_monitor, lockoffset, rn->lock); + assert (0); + } +} + +/* + * Perform deferred rfapi_close operations that were queued + * during callbacks. + */ +static wq_item_status +rfapi_deferred_close_workfunc (struct work_queue *q, void *data) +{ + struct rfapi_descriptor *rfd = data; + struct rfapi *h = q->spec.data; + + assert (!(h->flags & RFAPI_INCALLBACK)); + rfapi_close (rfd); + zlog_debug ("%s: completed deferred close on handle %p", __func__, rfd); + return WQ_SUCCESS; +} + +/* + * Extract layer 2 option from Encap TLVS in BGP attrs + */ +int +rfapiGetL2o (struct attr *attr, struct rfapi_l2address_option *l2o) +{ + if (attr && attr->extra) + { + + struct bgp_attr_encap_subtlv *pEncap; + + for (pEncap = attr->extra->vnc_subtlvs; pEncap; pEncap = pEncap->next) + { + + if (pEncap->type == BGP_VNC_SUBTLV_TYPE_RFPOPTION) + { + if (pEncap->value[0] == RFAPI_VN_OPTION_TYPE_L2ADDR) + { + + if (pEncap->value[1] == 14) + { + memcpy (l2o->macaddr.octet, pEncap->value + 2, + ETHER_ADDR_LEN); + l2o->label = + ((pEncap->value[10] >> 4) & 0x0f) + + ((pEncap->value[9] << 4) & 0xff0) + + ((pEncap->value[8] << 12) & 0xff000); + + l2o->local_nve_id = pEncap->value[12]; + + l2o->logical_net_id = + (pEncap->value[15] & 0xff) + + ((pEncap->value[14] << 8) & 0xff00) + + ((pEncap->value[13] << 16) & 0xff0000); + } + + return 0; + } + } + } + } + + return ENOENT; +} + +/* + * Extract the lifetime from the Tunnel Encap attribute of a route in + * an import table + */ +int +rfapiGetVncLifetime (struct attr *attr, uint32_t * lifetime) +{ + struct bgp_attr_encap_subtlv *pEncap; + + *lifetime = RFAPI_INFINITE_LIFETIME; /* default to infinite */ + + if (attr && attr->extra) + { + + for (pEncap = attr->extra->vnc_subtlvs; pEncap; pEncap = pEncap->next) + { + + if (pEncap->type == BGP_VNC_SUBTLV_TYPE_LIFETIME) + { /* lifetime */ + if (pEncap->length == 4) + { + memcpy (lifetime, pEncap->value, 4); + *lifetime = ntohl (*lifetime); + return 0; + } + } + } + } + + return ENOENT; +} + +/* + * Extract the tunnel type from the extended community + */ +int +rfapiGetTunnelType (struct attr *attr, + bgp_encap_types *type) +{ + *type = BGP_ENCAP_TYPE_MPLS; /* default to MPLS */ + if (attr && attr->extra && attr->extra->ecommunity) + { + struct ecommunity *ecom = attr->extra->ecommunity; + int i; + + for (i = 0; i < (ecom->size * ECOMMUNITY_SIZE); i += ECOMMUNITY_SIZE) + { + uint8_t *ep; + + ep = ecom->val + i; + if (ep[0] == ECOMMUNITY_ENCODE_OPAQUE && + ep[1] == ECOMMUNITY_OPAQUE_SUBTYPE_ENCAP) + { + *type = (ep[6]<<8) + ep[7]; + return 0; + } + } + } + + return ENOENT; +} + + +/* + * Look for UN address in Encap attribute + */ +int +rfapiGetVncTunnelUnAddr (struct attr *attr, struct prefix *p) +{ + struct bgp_attr_encap_subtlv *pEncap; + bgp_encap_types tun_type; + + rfapiGetTunnelType (attr, &tun_type); + if (p && tun_type == BGP_ENCAP_TYPE_MPLS) + { + /* MPLS carries UN address in next hop */ + rfapiNexthop2Prefix (attr, p); + if (p->family != 0) + return 0; + } + if (attr && attr->extra) + { + for (pEncap = attr->extra->encap_subtlvs; pEncap; pEncap = pEncap->next) + { + + if (pEncap->type == BGP_ENCAP_SUBTLV_TYPE_REMOTE_ENDPOINT) + { /* un addr */ + switch (pEncap->length) + { + case 8: + if (p) + { + p->family = AF_INET; + p->prefixlen = 32; + memcpy (p->u.val, pEncap->value, 4); + } + return 0; + + case 20: + if (p) + { + p->family = AF_INET6; + p->prefixlen = 128; + memcpy (p->u.val, pEncap->value, 16); + } + return 0; + } + } + } + } + + return ENOENT; +} + +/* + * Get UN address wherever it might be + */ +int +rfapiGetUnAddrOfVpnBi (struct bgp_info *bi, struct prefix *p) +{ + /* If it's in this route's VNC attribute, we're done */ + if (!rfapiGetVncTunnelUnAddr (bi->attr, p)) + return 0; + /* + * Otherwise, see if it's cached from a corresponding ENCAP SAFI + * advertisement + */ + if (bi->extra) + { + switch (bi->extra->vnc.import.un_family) + { + case AF_INET: + if (p) + { + p->family = bi->extra->vnc.import.un_family; + p->u.prefix4 = bi->extra->vnc.import.un.addr4; + p->prefixlen = 32; + } + return 0; + case AF_INET6: + if (p) + { + p->family = bi->extra->vnc.import.un_family; + p->u.prefix6 = bi->extra->vnc.import.un.addr6; + p->prefixlen = 128; + } + return 0; + default: + if (p) + p->family = 0; +#if DEBUG_ENCAP_MONITOR + zlog_debug ("%s: bi->extra->vnc.import.un_family is 0, no UN addr", + __func__); +#endif + break; + } + } + + return ENOENT; +} + + +/* + * Make a new bgp_info from gathered parameters + */ +static struct bgp_info * +rfapiBgpInfoCreate ( + struct attr *attr, + struct peer *peer, + void *rfd, + struct prefix_rd *prd, + u_char type, + u_char sub_type, + uint32_t *label) +{ + struct bgp_info *new; + + new = bgp_info_new (); + assert (new); + + if (attr) + { + if (!new->attr) + new->attr = bgp_attr_intern (attr); + } + bgp_info_extra_get (new); + if (prd) + { + new->extra->vnc.import.rd = *prd; + rfapi_time (&new->extra->vnc.import.create_time); + } + if (label) + encode_label (*label, new->extra->tag); + new->type = type; + new->sub_type = sub_type; + new->peer = peer; + peer_lock (peer); + + return new; +} + +/* + * Frees bgp_info as used in import tables (parts are not + * allocated exactly the way they are in the main RIBs) + */ +static void +rfapiBgpInfoFree (struct bgp_info *goner) +{ + if (!goner) + return; + + if (goner->peer) + { + zlog_debug ("%s: calling peer_unlock(%p), #%d", + __func__, goner->peer, goner->peer->lock); + peer_unlock (goner->peer); + } + + if (goner->attr) + { + bgp_attr_unintern (&goner->attr); + } + if (goner->extra) + { + assert (!goner->extra->damp_info); /* Not used in import tbls */ + XFREE (MTYPE_BGP_ROUTE_EXTRA, goner->extra); + goner->extra = NULL; + } + XFREE (MTYPE_BGP_ROUTE, goner); +} + +struct rfapi_import_table * +rfapiMacImportTableGetNoAlloc (struct bgp *bgp, uint32_t lni) +{ + struct rfapi *h; + struct rfapi_import_table *it = NULL; + uintptr_t lni_as_ptr = lni; + + h = bgp->rfapi; + if (!h) + return NULL; + + if (!h->import_mac) + return NULL; + + if (skiplist_search (h->import_mac, (void *) lni_as_ptr, (void **) &it)) + return NULL; + + return it; +} + +struct rfapi_import_table * +rfapiMacImportTableGet (struct bgp *bgp, uint32_t lni) +{ + struct rfapi *h; + struct rfapi_import_table *it = NULL; + uintptr_t lni_as_ptr = lni; + + h = bgp->rfapi; + assert (h); + + if (!h->import_mac) + { + /* default cmp is good enough for LNI */ + h->import_mac = skiplist_new (0, NULL, NULL); + } + + if (skiplist_search (h->import_mac, (void *) lni_as_ptr, (void **) &it)) + { + + struct ecommunity *enew; + struct ecommunity_val eval; + afi_t afi; + + it = + XCALLOC (MTYPE_RFAPI_IMPORTTABLE, sizeof (struct rfapi_import_table)); + /* set RT list of new import table based on LNI */ + memset ((char *) &eval, 0, sizeof (eval)); + eval.val[0] = 0; /* VNC L2VPN */ + eval.val[1] = 2; /* VNC L2VPN */ + eval.val[5] = (lni >> 16) & 0xff; + eval.val[6] = (lni >> 8) & 0xff; + eval.val[7] = (lni >> 0) & 0xff; + + enew = ecommunity_new (); + ecommunity_add_val (enew, &eval); + it->rt_import_list = enew; + + for (afi = AFI_IP; afi < AFI_MAX; ++afi) + { + it->imported_vpn[afi] = route_table_init (); + it->imported_encap[afi] = route_table_init (); + } + + it->l2_logical_net_id = lni; + + skiplist_insert (h->import_mac, (void *) lni_as_ptr, it); + } + + assert (it); + return it; +} + +/* + * Implement MONITOR_MOVE_SHORTER(original_node) from + * RFAPI-Import-Event-Handling.txt + * + * Returns pointer to the list of moved monitors + */ +static struct rfapi_monitor_vpn * +rfapiMonitorMoveShorter (struct route_node *original_vpn_node, int lockoffset) +{ + struct bgp_info *bi; + struct route_node *par; + struct rfapi_monitor_vpn *m; + struct rfapi_monitor_vpn *mlast; + struct rfapi_monitor_vpn *moved; + int movecount = 0; + int parent_already_refcounted = 0; + + RFAPI_CHECK_REFCOUNT (original_vpn_node, SAFI_MPLS_VPN, lockoffset); + +#if DEBUG_MONITOR_MOVE_SHORTER + { + char buf[BUFSIZ]; + + prefix2str (&original_vpn_node->p, buf, BUFSIZ); + buf[BUFSIZ - 1] = 0; + zlog_debug ("%s: called with node pfx=%s", __func__, buf); + } +#endif + + /* + * 1. If there is at least one bi (either regular route or + * route marked as withdrawn, with a pending timer) at + * original_node with a valid UN address, we're done. Return. + */ + for (bi = original_vpn_node->info; bi; bi = bi->next) + { + struct prefix pfx; + + if (!rfapiGetUnAddrOfVpnBi (bi, &pfx)) + { +#if DEBUG_MONITOR_MOVE_SHORTER + zlog_debug ("%s: have valid UN at original node, no change", + __func__); +#endif + return NULL; + } + } + + /* + * 2. Travel up the tree (toward less-specific prefixes) from + * original_node to find the first node that has at least + * one route (even if it is only a withdrawn route) with a + * valid UN address. Call this node "Node P." + */ + for (par = original_vpn_node->parent; par; par = par->parent) + { + for (bi = par->info; bi; bi = bi->next) + { + struct prefix pfx; + if (!rfapiGetUnAddrOfVpnBi (bi, &pfx)) + { + break; + } + } + if (bi) + break; + } + + if (par) + { + RFAPI_CHECK_REFCOUNT (par, SAFI_MPLS_VPN, 0); + } + + /* + * If no less-specific routes, try to use the 0/0 node + */ + if (!par) + { + /* this isn't necessarily 0/0 */ + par = route_top (original_vpn_node->table); + + /* + * If we got the top node but it wasn't 0/0, + * ignore it + */ + if (par && par->p.prefixlen) + { + route_unlock_node (par); /* maybe free */ + par = NULL; + } + + if (par) + { + ++parent_already_refcounted; + } + } + + /* + * Create 0/0 node if it isn't there + */ + if (!par) + { + struct prefix pfx_default; + + memset (&pfx_default, 0, sizeof (pfx_default)); + pfx_default.family = original_vpn_node->p.family; + + /* creates default node if none exists */ + par = route_node_get (original_vpn_node->table, &pfx_default); + ++parent_already_refcounted; + } + + /* + * 3. Move each of the monitors found at original_node to Node P. + * These are "Moved Monitors." + * + */ + + /* + * Attach at end so that the list pointer we return points + * only to the moved routes + */ + for (m = RFAPI_MONITOR_VPN (par), mlast = NULL; m; mlast = m, m = m->next); + + if (mlast) + { + moved = mlast->next = RFAPI_MONITOR_VPN (original_vpn_node); + } + else + { + moved = RFAPI_MONITOR_VPN_W_ALLOC (par) = + RFAPI_MONITOR_VPN (original_vpn_node); + } + if (RFAPI_MONITOR_VPN (original_vpn_node)) /* check agg, so not allocated */ + RFAPI_MONITOR_VPN_W_ALLOC (original_vpn_node) = NULL; + + /* + * update the node pointers on the monitors + */ + for (m = moved; m; m = m->next) + { + ++movecount; + m->node = par; + } + + RFAPI_CHECK_REFCOUNT (par, SAFI_MPLS_VPN, + parent_already_refcounted - movecount); + while (movecount > parent_already_refcounted) + { + route_lock_node (par); + ++parent_already_refcounted; + } + while (movecount < parent_already_refcounted) + { + /* unlikely, but code defensively */ + route_unlock_node (par); + --parent_already_refcounted; + } + RFAPI_CHECK_REFCOUNT (original_vpn_node, SAFI_MPLS_VPN, + movecount + lockoffset); + while (movecount--) + { + route_unlock_node (original_vpn_node); + } + +#if DEBUG_MONITOR_MOVE_SHORTER + { + char buf[BUFSIZ]; + + prefix2str (&par->p, buf, BUFSIZ); + buf[BUFSIZ - 1] = 0; + zlog_debug ("%s: moved to node pfx=%s", __func__, buf); + } +#endif + + + return moved; +} + +/* + * Implement MONITOR_MOVE_LONGER(new_node) from + * RFAPI-Import-Event-Handling.txt + */ +static void +rfapiMonitorMoveLonger (struct route_node *new_vpn_node) +{ + struct rfapi_monitor_vpn *monitor; + struct rfapi_monitor_vpn *mlast; + struct bgp_info *bi; + struct route_node *par; + + RFAPI_CHECK_REFCOUNT (new_vpn_node, SAFI_MPLS_VPN, 0); + + /* + * Make sure we have at least one valid route at the new node + */ + for (bi = new_vpn_node->info; bi; bi = bi->next) + { + struct prefix pfx; + if (!rfapiGetUnAddrOfVpnBi (bi, &pfx)) + break; + } + + if (!bi) + { + zlog_debug ("%s: no valid routes at node %p, so not attempting moves", + __func__, new_vpn_node); + return; + } + + /* + * Find first parent node that has monitors + */ + for (par = new_vpn_node->parent; par; par = par->parent) + { + if (RFAPI_MONITOR_VPN (par)) + break; + } + + if (!par) + { + zlog_debug ("%s: no parent nodes with monitors, done", __func__); + return; + } + + /* + * Check each of these monitors to see of their longest-match + * is now the updated node. Move any such monitors to the more- + * specific updated node + */ + for (mlast = NULL, monitor = RFAPI_MONITOR_VPN (par); monitor;) + { + + /* + * If new longest match for monitor prefix is the new + * route's prefix, move monitor to new route's prefix + */ + if (prefix_match (&new_vpn_node->p, &monitor->p)) + { + /* detach */ + if (mlast) + { + mlast->next = monitor->next; + } + else + { + RFAPI_MONITOR_VPN_W_ALLOC (par) = monitor->next; + } + + + /* attach */ + monitor->next = RFAPI_MONITOR_VPN (new_vpn_node); + RFAPI_MONITOR_VPN_W_ALLOC (new_vpn_node) = monitor; + monitor->node = new_vpn_node; + + route_lock_node (new_vpn_node); /* incr refcount */ + + monitor = mlast ? mlast->next : RFAPI_MONITOR_VPN (par); + + RFAPI_CHECK_REFCOUNT (par, SAFI_MPLS_VPN, 1); + /* decr refcount after we're done with par as this might free it */ + route_unlock_node (par); + + continue; + } + mlast = monitor; + monitor = monitor->next; + } + + RFAPI_CHECK_REFCOUNT (new_vpn_node, SAFI_MPLS_VPN, 0); +} + + +static void +rfapiBgpInfoChainFree (struct bgp_info *bi) +{ + struct bgp_info *next; + + while (bi) + { + + /* + * If there is a timer waiting to delete this bi, cancel + * the timer and delete immediately + */ + if (CHECK_FLAG (bi->flags, BGP_INFO_REMOVED) && + bi->extra->vnc.import.timer) + { + + struct thread *t = (struct thread *) bi->extra->vnc.import.timer; + struct rfapi_withdraw *wcb = t->arg; + + XFREE (MTYPE_RFAPI_WITHDRAW, wcb); + thread_cancel (t); + } + + next = bi->next; + bi->next = NULL; + rfapiBgpInfoFree (bi); + bi = next; + } +} + +static void +rfapiImportTableFlush (struct rfapi_import_table *it) +{ + afi_t afi; + + /* + * Free ecommunity + */ + ecommunity_free (&it->rt_import_list); + it->rt_import_list = NULL; + + for (afi = AFI_IP; afi < AFI_MAX; ++afi) + { + + struct route_node *rn; + + for (rn = route_top (it->imported_vpn[afi]); rn; rn = route_next (rn)) + { + /* + * Each route_node has: + * aggregate: points to rfapi_it_extra with monitor chain(s) + * info: points to chain of bgp_info + */ + /* free bgp_info and its children */ + rfapiBgpInfoChainFree (rn->info); + rn->info = NULL; + + rfapiMonitorExtraFlush (SAFI_MPLS_VPN, rn); + } + + for (rn = route_top (it->imported_encap[afi]); rn; rn = route_next (rn)) + { + /* free bgp_info and its children */ + rfapiBgpInfoChainFree (rn->info); + rn->info = NULL; + + rfapiMonitorExtraFlush (SAFI_ENCAP, rn); + } + + route_table_finish (it->imported_vpn[afi]); + route_table_finish (it->imported_encap[afi]); + } + if (it->monitor_exterior_orphans) + { + skiplist_free (it->monitor_exterior_orphans); + } +} + +void +rfapiImportTableRefDelByIt ( + struct bgp *bgp, + struct rfapi_import_table *it_target) +{ + struct rfapi *h; + struct rfapi_import_table *it; + struct rfapi_import_table *prev = NULL; + + assert (it_target); + + h = bgp->rfapi; + assert (h); + + for (it = h->imports; it; prev = it, it = it->next) + { + if (it == it_target) + break; + } + + assert (it); + assert (it->refcount); + + it->refcount -= 1; + + if (!it->refcount) + { + if (prev) + { + prev->next = it->next; + } + else + { + h->imports = it->next; + } + rfapiImportTableFlush (it); + XFREE (MTYPE_RFAPI_IMPORTTABLE, it); + } +} + +#if RFAPI_REQUIRE_ENCAP_BEEC +/* + * Look for magic BGP Encapsulation Extended Community value + * Format in RFC 5512 Sect. 4.5 + */ +static int +rfapiEcommunitiesMatchBeec (struct ecommunity *ecom, + bgp_encap_types type) +{ + int i; + + if (!ecom) + return 0; + + for (i = 0; i < (ecom->size * ECOMMUNITY_SIZE); i += ECOMMUNITY_SIZE) + { + + uint8_t *ep; + + ep = ecom->val + i; + + if (ep[0] == ECOMMUNITY_ENCODE_OPAQUE && + ep[1] == ECOMMUNITY_OPAQUE_SUBTYPE_ENCAP && + ep[6] == ((type && 0xff00)>>8) && + ep[7] == (type&0xff)) + { + + return 1; + } + } + return 0; + +} +#endif + +int +rfapiEcommunitiesIntersect (struct ecommunity *e1, struct ecommunity *e2) +{ + int i, j; + + if (!e1 || !e2) + return 0; + + { + char *s1, *s2; + s1 = ecommunity_ecom2str (e1, ECOMMUNITY_FORMAT_DISPLAY); + s2 = ecommunity_ecom2str (e2, ECOMMUNITY_FORMAT_DISPLAY); + zlog_debug ("%s: e1[%s], e2[%s]", __func__, s1, s2); + XFREE (MTYPE_ECOMMUNITY_STR, s1); + XFREE (MTYPE_ECOMMUNITY_STR, s2); + } + + for (i = 0; i < e1->size; ++i) + { + for (j = 0; j < e2->size; ++j) + { + if (!memcmp (e1->val + (i * ECOMMUNITY_SIZE), + e2->val + (j * ECOMMUNITY_SIZE), ECOMMUNITY_SIZE)) + { + + return 1; + } + } + } + return 0; +} + +int +rfapiEcommunityGetLNI (struct ecommunity *ecom, uint32_t * lni) +{ + if (ecom) + { + int i; + for (i = 0; i < ecom->size; ++i) + { + uint8_t *p = ecom->val + (i * ECOMMUNITY_SIZE); + + if ((*(p + 0) == 0x00) && (*(p + 1) == 0x02)) + { + + *lni = (*(p + 5) << 16) | (*(p + 6) << 8) | (*(p + 7)); + return 0; + } + } + } + return ENOENT; +} + +static int +rfapiVpnBiNhEqualsPt (struct bgp_info *bi, struct rfapi_ip_addr *hpt) +{ + uint8_t family; + + if (!hpt || !bi) + return 0; + + family = BGP_MP_NEXTHOP_FAMILY (bi->attr->extra->mp_nexthop_len); + + if (hpt->addr_family != family) + return 0; + + switch (family) + { + case AF_INET: + if (bi->attr->extra->mp_nexthop_global_in.s_addr != hpt->addr.v4.s_addr) + return 0; + break; + + case AF_INET6: + if (IPV6_ADDR_CMP (&bi->attr->extra->mp_nexthop_global, &hpt->addr.v6)) + return 0; + break; + + default: + return 0; + break; + } + + return 1; +} + + +/* + * Compare 2 VPN BIs. Return true if they have the same VN and UN addresses + */ +static int +rfapiVpnBiSamePtUn (struct bgp_info *bi1, struct bgp_info *bi2) +{ + struct prefix pfx_un1; + struct prefix pfx_un2; + + if (!bi1 || !bi2) + return 0; + + if (!bi1->attr || !bi2->attr) + return 0; + + if (!bi1->attr->extra || !bi2->attr->extra) + return 0; + + /* + * VN address comparisons + */ + + if (BGP_MP_NEXTHOP_FAMILY (bi1->attr->extra->mp_nexthop_len) != + BGP_MP_NEXTHOP_FAMILY (bi2->attr->extra->mp_nexthop_len)) + { + return 0; + } + + switch (BGP_MP_NEXTHOP_FAMILY (bi1->attr->extra->mp_nexthop_len)) + { + + case AF_INET: + if (bi1->attr->extra->mp_nexthop_global_in.s_addr != + bi2->attr->extra->mp_nexthop_global_in.s_addr) + return 0; + break; + + case AF_INET6: + if (IPV6_ADDR_CMP (&bi1->attr->extra->mp_nexthop_global, + &bi2->attr->extra->mp_nexthop_global)) + return 0; + break; + + default: + return 0; + break; + } + + /* + * UN address comparisons + */ + if (rfapiGetVncTunnelUnAddr (bi1->attr, &pfx_un1)) + { + if (bi1->extra) + { + pfx_un1.family = bi1->extra->vnc.import.un_family; + switch (bi1->extra->vnc.import.un_family) + { + case AF_INET: + pfx_un1.u.prefix4 = bi1->extra->vnc.import.un.addr4; + break; + case AF_INET6: + pfx_un1.u.prefix6 = bi1->extra->vnc.import.un.addr6; + break; + default: + pfx_un1.family = 0; + break; + } + } + } + + if (rfapiGetVncTunnelUnAddr (bi2->attr, &pfx_un2)) + { + if (bi2->extra) + { + pfx_un2.family = bi2->extra->vnc.import.un_family; + switch (bi2->extra->vnc.import.un_family) + { + case AF_INET: + pfx_un2.u.prefix4 = bi2->extra->vnc.import.un.addr4; + break; + case AF_INET6: + pfx_un2.u.prefix6 = bi2->extra->vnc.import.un.addr6; + break; + default: + pfx_un2.family = 0; + break; + } + } + } + + if (!pfx_un1.family || !pfx_un2.family) + return 0; + + if (pfx_un1.family != pfx_un2.family) + return 0; + + switch (pfx_un1.family) + { + case AF_INET: + if (!IPV4_ADDR_SAME + (&pfx_un1.u.prefix4.s_addr, &pfx_un2.u.prefix4.s_addr)) + return 0; + break; + case AF_INET6: + if (!IPV6_ADDR_SAME (&pfx_un1.u.prefix6, &pfx_un2.u.prefix6)) + return 0; + break; + } + + + + return 1; +} + +uint8_t +rfapiRfpCost (struct attr * attr) +{ + if (attr->flag & ATTR_FLAG_BIT (BGP_ATTR_LOCAL_PREF)) + { + if (attr->local_pref > 255) + { + return 0; + } + return 255 - attr->local_pref; + } + + return 255; +} + +/*------------------------------------------ + * rfapi_extract_l2o + * + * Find Layer 2 options in an option chain + * + * input: + * pHop option chain + * + * output: + * l2o layer 2 options extracted + * + * return value: + * 0 OK + * 1 no options found + * + --------------------------------------------*/ +int +rfapi_extract_l2o (struct bgp_tea_options *pHop, /* chain of options */ + struct rfapi_l2address_option *l2o) /* return extracted value */ +{ + struct bgp_tea_options *p; + + for (p = pHop; p; p = p->next) + { + if ((p->type == RFAPI_VN_OPTION_TYPE_L2ADDR) && (p->length >= 8)) + { + + char *v = p->value; + + memcpy (&l2o->macaddr, v, 6); + + l2o->label = + ((v[6] << 12) & 0xff000) + + ((v[7] << 4) & 0xff0) + ((v[8] >> 4) & 0xf); + + l2o->local_nve_id = (uint8_t) v[10]; + + l2o->logical_net_id = (v[11] << 16) + (v[12] << 8) + (v[13] << 0); + + return 0; + } + } + return 1; +} + +static struct rfapi_next_hop_entry * +rfapiRouteInfo2NextHopEntry ( + struct rfapi_ip_prefix *rprefix, + struct bgp_info *bi, /* route to encode */ + uint32_t lifetime, /* use this in nhe */ + struct route_node *rn) /* req for L2 eth addr */ +{ + struct rfapi_next_hop_entry *new; + int have_vnc_tunnel_un = 0; + +#if DEBUG_ENCAP_MONITOR + zlog_debug ("%s: entry, bi %p, rn %p", __func__, bi, rn); +#endif + + new = XCALLOC (MTYPE_RFAPI_NEXTHOP, sizeof (struct rfapi_next_hop_entry)); + assert (new); + + new->prefix = *rprefix; + + if (bi->extra && + decode_rd_type(bi->extra->vnc.import.rd.val) == RD_TYPE_VNC_ETH) + { + /* ethernet */ + + struct rfapi_vn_option *vo; + + vo = XCALLOC (MTYPE_RFAPI_VN_OPTION, sizeof (struct rfapi_vn_option)); + assert (vo); + + vo->type = RFAPI_VN_OPTION_TYPE_L2ADDR; + + memcpy (&vo->v.l2addr.macaddr, &rn->p.u.prefix_eth.octet, + ETHER_ADDR_LEN); + /* only low 3 bytes of this are significant */ + if (bi->attr && bi->attr->extra) + { + (void) rfapiEcommunityGetLNI (bi->attr->extra->ecommunity, + &vo->v.l2addr.logical_net_id); + } + + /* local_nve_id comes from lower byte of RD type */ + vo->v.l2addr.local_nve_id = bi->extra->vnc.import.rd.val[1]; + + /* label comes from MP_REACH_NLRI label */ + vo->v.l2addr.label = decode_label (bi->extra->tag); + + new->vn_options = vo; + + /* + * If there is an auxiliary prefix (i.e., host IP address), + * use it as the nexthop prefix instead of the query prefix + */ + if (bi->extra->vnc.import.aux_prefix.family) + { + rfapiQprefix2Rprefix (&bi->extra->vnc.import.aux_prefix, + &new->prefix); + } + } + + if (bi->attr) + { + bgp_encap_types tun_type; + new->prefix.cost = rfapiRfpCost (bi->attr); + + if (bi->attr->extra) + { + + struct bgp_attr_encap_subtlv *pEncap; + + switch (BGP_MP_NEXTHOP_FAMILY (bi->attr->extra->mp_nexthop_len)) + { + case AF_INET: + new->vn_address.addr_family = AF_INET; + new->vn_address.addr.v4 = bi->attr->extra->mp_nexthop_global_in; + break; + + case AF_INET6: + new->vn_address.addr_family = AF_INET6; + new->vn_address.addr.v6 = bi->attr->extra->mp_nexthop_global; + break; + + default: + zlog_warn ("%s: invalid vpn nexthop length: %d", + __func__, bi->attr->extra->mp_nexthop_len); + rfapi_free_next_hop_list (new); + return NULL; + } + + for (pEncap = bi->attr->extra->vnc_subtlvs; pEncap; + pEncap = pEncap->next) + { + switch (pEncap->type) + { + case BGP_VNC_SUBTLV_TYPE_LIFETIME: + /* use configured lifetime, not attr lifetime */ + break; + + default: + zlog_warn ("%s: unknown VNC option type %d", + __func__, pEncap->type); + + + break; + } + } + + rfapiGetTunnelType (bi->attr, &tun_type); + if (tun_type == BGP_ENCAP_TYPE_MPLS) + { + struct prefix p; + /* MPLS carries UN address in next hop */ + rfapiNexthop2Prefix (bi->attr, &p); + if (p.family != 0) + { + rfapiQprefix2Raddr(&p, &new->un_address); + have_vnc_tunnel_un = 1; + } + } + + for (pEncap = bi->attr->extra->encap_subtlvs; pEncap; + pEncap = pEncap->next) + { + switch (pEncap->type) + { + case BGP_ENCAP_SUBTLV_TYPE_REMOTE_ENDPOINT: + /* + * Overrides ENCAP UN address, if any + */ + switch (pEncap->length) + { + + case 8: + new->un_address.addr_family = AF_INET; + memcpy (&new->un_address.addr.v4, pEncap->value, 4); + have_vnc_tunnel_un = 1; + break; + + case 20: + new->un_address.addr_family = AF_INET6; + memcpy (&new->un_address.addr.v6, pEncap->value, 16); + have_vnc_tunnel_un = 1; + break; + + default: + zlog_warn + ("%s: invalid tunnel subtlv UN addr length (%d) for bi %p", + __func__, pEncap->length, bi); + } + break; + + default: + zlog_warn ("%s: unknown Encap Attribute option type %d", + __func__, pEncap->type); + + + break; + } + } + + new->un_options = rfapi_encap_tlv_to_un_option (bi->attr); + +#if DEBUG_ENCAP_MONITOR + zlog_debug ("%s: line %d: have_vnc_tunnel_un=%d", + __func__, __LINE__, have_vnc_tunnel_un); +#endif + + if (!have_vnc_tunnel_un && bi && bi->extra) + { + /* + * use cached UN address from ENCAP route + */ + new->un_address.addr_family = bi->extra->vnc.import.un_family; + switch (new->un_address.addr_family) + { + case AF_INET: + new->un_address.addr.v4 = bi->extra->vnc.import.un.addr4; + break; + case AF_INET6: + new->un_address.addr.v6 = bi->extra->vnc.import.un.addr6; + break; + default: + zlog_warn ("%s: invalid UN addr family (%d) for bi %p", + __func__, new->un_address.addr_family, bi); + rfapi_free_next_hop_list (new); + return NULL; + break; + } + } + } + } + new->lifetime = lifetime; + return new; +} + +int +rfapiHasNonRemovedRoutes (struct route_node *rn) +{ + struct bgp_info *bi; + + for (bi = rn->info; bi; bi = bi->next) + { + struct prefix pfx; + + if (!CHECK_FLAG (bi->flags, BGP_INFO_REMOVED) && + (bi->extra && !rfapiGetUnAddrOfVpnBi (bi, &pfx))) + { + + return 1; + } + } + return 0; +} + +#if DEBUG_IT_NODES +/* + * DEBUG FUNCTION + */ +void +rfapiDumpNode (struct route_node *rn) +{ + struct bgp_info *bi; + + zlog_debug ("%s: rn=%p", __func__, rn); + for (bi = rn->info; bi; bi = bi->next) + { + struct prefix pfx; + int ctrc = rfapiGetUnAddrOfVpnBi (bi, &pfx); + int nr; + + if (!CHECK_FLAG (bi->flags, BGP_INFO_REMOVED) && (bi->extra && !ctrc)) + { + + nr = 1; + } + else + { + nr = 0; + } + + zlog_debug (" bi=%p, nr=%d, flags=0x%x, extra=%p, ctrc=%d", + bi, nr, bi->flags, bi->extra, ctrc); + } +} +#endif + +static int +rfapiNhlAddNodeRoutes ( + struct route_node *rn, /* in */ + struct rfapi_ip_prefix *rprefix, /* in */ + uint32_t lifetime, /* in */ + int removed, /* in */ + struct rfapi_next_hop_entry **head, /* in/out */ + struct rfapi_next_hop_entry **tail, /* in/out */ + struct rfapi_ip_addr *exclude_vnaddr, /* omit routes to same NVE */ + struct route_node *rfd_rib_node,/* preload this NVE rib node */ + struct prefix *pfx_target_original) /* query target */ +{ + struct bgp_info *bi; + struct rfapi_next_hop_entry *new; + struct prefix pfx_un; + struct skiplist *seen_nexthops; + int count = 0; + int is_l2 = (rn->p.family == AF_ETHERNET); + + if (rfapiRibFTDFilterRecentPrefix( + (struct rfapi_descriptor *)(rfd_rib_node->table->info), rn, + pfx_target_original)) + { + return 0; + } + + seen_nexthops = + skiplist_new (0, vnc_prefix_cmp, (void (*)(void *)) prefix_free); + + for (bi = rn->info; bi; bi = bi->next) + { + + struct prefix pfx_vn; + struct prefix *newpfx; + + if (removed && !CHECK_FLAG (bi->flags, BGP_INFO_REMOVED)) + { +#if DEBUG_RETURNED_NHL + zlog_debug ("%s: want holddown, this route not holddown, skip", + __func__); +#endif + continue; + } + if (!removed && CHECK_FLAG (bi->flags, BGP_INFO_REMOVED)) + { + continue; + } + + if (!bi->extra) + { + continue; + } + + /* + * Check for excluded VN address + */ + if (rfapiVpnBiNhEqualsPt (bi, exclude_vnaddr)) + continue; + + /* + * Check for VN address (nexthop) copied already + */ + if (is_l2) + { + /* L2 routes: semantic nexthop in aux_prefix; VN addr ain't it */ + pfx_vn = bi->extra->vnc.import.aux_prefix; + } + else + { + rfapiNexthop2Prefix (bi->attr, &pfx_vn); + } + if (!skiplist_search (seen_nexthops, &pfx_vn, NULL)) + { +#if DEBUG_RETURNED_NHL + char buf[BUFSIZ]; + + prefix2str (&pfx_vn, buf, BUFSIZ); + buf[BUFSIZ - 1] = 0; /* guarantee NUL-terminated */ + zlog_debug ("%s: already put VN/nexthop %s, skip", __func__, buf); +#endif + continue; + } + + if (rfapiGetUnAddrOfVpnBi (bi, &pfx_un)) + { +#if DEBUG_ENCAP_MONITOR + zlog_debug ("%s: failed to get UN address of this VPN bi", + __func__); +#endif + continue; + } + + newpfx = prefix_new (); + *newpfx = pfx_vn; + skiplist_insert (seen_nexthops, newpfx, newpfx); + + new = rfapiRouteInfo2NextHopEntry(rprefix, bi, lifetime, rn); + if (new) + { + if (rfapiRibPreloadBi(rfd_rib_node, &pfx_vn, &pfx_un, lifetime, bi)) + { + /* duplicate filtered by RIB */ + rfapi_free_next_hop_list (new); + new = NULL; + } + } + + if (new) + { + if (*tail) + { + (*tail)->next = new; + } + else + { + *head = new; + } + *tail = new; + ++count; + } + } + + skiplist_free (seen_nexthops); + + return count; +} + + +/* + * Breadth-first + * + * omit_node is meant for the situation where we are adding a subtree + * of a parent of some original requested node. The response already + * contains the original requested node, and we don't want to duplicate + * its routes in the list, so we skip it if the right or left node + * matches (of course, we still travel down its child subtrees). + */ +static int +rfapiNhlAddSubtree ( + struct route_node *rn, /* in */ + uint32_t lifetime, /* in */ + struct rfapi_next_hop_entry **head, /* in/out */ + struct rfapi_next_hop_entry **tail, /* in/out */ + struct route_node *omit_node, /* in */ + struct rfapi_ip_addr *exclude_vnaddr,/* omit routes to same NVE */ + struct route_table *rfd_rib_table,/* preload here */ + struct prefix *pfx_target_original) /* query target */ +{ + struct rfapi_ip_prefix rprefix; + int rcount = 0; + + if (rn->l_left && rn->l_left != omit_node) + { + if (rn->l_left->info) + { + int count = 0; + struct route_node *rib_rn = NULL; + + rfapiQprefix2Rprefix (&rn->l_left->p, &rprefix); + if (rfd_rib_table) + { + rib_rn = route_node_get(rfd_rib_table, &rn->l_left->p); + } + + count = rfapiNhlAddNodeRoutes (rn->l_left, &rprefix, lifetime, 0, + head, tail, exclude_vnaddr, rib_rn, pfx_target_original); + if (!count) + { + count = rfapiNhlAddNodeRoutes (rn->l_left, &rprefix, lifetime, 1, + head, tail, exclude_vnaddr, rib_rn, pfx_target_original); + } + rcount += count; + if (rib_rn) + route_unlock_node(rib_rn); + } + } + + if (rn->l_right && rn->l_right != omit_node) + { + if (rn->l_right->info) + { + int count = 0; + struct route_node *rib_rn = NULL; + + rfapiQprefix2Rprefix (&rn->l_right->p, &rprefix); + if (rfd_rib_table) + { + rib_rn = route_node_get(rfd_rib_table, &rn->l_right->p); + } + count = rfapiNhlAddNodeRoutes (rn->l_right, &rprefix, lifetime, 0, + head, tail, exclude_vnaddr, rib_rn, pfx_target_original); + if (!count) + { + count = rfapiNhlAddNodeRoutes (rn->l_right, &rprefix, lifetime, 1, + head, tail, exclude_vnaddr, rib_rn, pfx_target_original); + } + rcount += count; + if (rib_rn) + route_unlock_node(rib_rn); + } + } + + if (rn->l_left) + { + rcount += rfapiNhlAddSubtree (rn->l_left, lifetime, head, tail, omit_node, + exclude_vnaddr, rfd_rib_table, pfx_target_original); + } + if (rn->l_right) + { + rcount += rfapiNhlAddSubtree (rn->l_right, lifetime, head, tail, + omit_node, exclude_vnaddr, rfd_rib_table, pfx_target_original); + } + + return rcount; +} + +/* + * Implementation of ROUTE_LIST(node) from RFAPI-Import-Event-Handling.txt + * + * Construct an rfapi nexthop list based on the routes attached to + * the specified node. + * + * If there are any routes that do NOT have BGP_INFO_REMOVED set, + * return those only. If there are ONLY routes with BGP_INFO_REMOVED, + * then return those, and also include all the non-removed routes from the + * next less-specific node (i.e., this node's parent) at the end. + */ +struct rfapi_next_hop_entry * +rfapiRouteNode2NextHopList ( + struct route_node *rn, + uint32_t lifetime, /* put into nexthop entries */ + struct rfapi_ip_addr *exclude_vnaddr,/* omit routes to same NVE */ + struct route_table *rfd_rib_table,/* preload here */ + struct prefix *pfx_target_original) /* query target */ +{ + struct rfapi_ip_prefix rprefix; + struct rfapi_next_hop_entry *answer = NULL; + struct rfapi_next_hop_entry *last = NULL; + struct route_node *parent; + int count = 0; + struct route_node *rib_rn; + +#if DEBUG_RETURNED_NHL + { + char buf[BUFSIZ]; + + prefix2str (&rn->p, buf, BUFSIZ); + buf[BUFSIZ - 1] = 0; + zlog_debug ("%s: called with node pfx=%s", __func__, buf); + } + rfapiDebugBacktrace (); +#endif + + rfapiQprefix2Rprefix (&rn->p, &rprefix); + + rib_rn = rfd_rib_table? route_node_get(rfd_rib_table, &rn->p): NULL; + + /* + * Add non-withdrawn routes at this node + */ + count = rfapiNhlAddNodeRoutes (rn, &rprefix, lifetime, 0, &answer, &last, + exclude_vnaddr, rib_rn, pfx_target_original); + + /* + * If the list has at least one entry, it's finished + */ + if (count) + { + count += rfapiNhlAddSubtree (rn, lifetime, &answer, &last, NULL, + exclude_vnaddr, rfd_rib_table, pfx_target_original); + zlog_debug ("%s: %d nexthops, answer=%p", __func__, count, answer); +#if DEBUG_RETURNED_NHL + rfapiPrintNhl (NULL, answer); +#endif + if (rib_rn) + route_unlock_node(rib_rn); + return answer; + } + + /* + * Add withdrawn routes at this node + */ + count = rfapiNhlAddNodeRoutes (rn, &rprefix, lifetime, 1, &answer, &last, + exclude_vnaddr, rib_rn, pfx_target_original); + if (rib_rn) + route_unlock_node(rib_rn); + + // rfapiPrintNhl(NULL, answer); + + /* + * walk up the tree until we find a node with non-deleted + * routes, then add them + */ + for (parent = rn->parent; parent; parent = parent->parent) + { + if (rfapiHasNonRemovedRoutes (parent)) + { + break; + } + } + + /* + * Add non-withdrawn routes from less-specific prefix + */ + if (parent) + { + rib_rn = rfd_rib_table? route_node_get(rfd_rib_table, &parent->p): NULL; + rfapiQprefix2Rprefix (&parent->p, &rprefix); + count += rfapiNhlAddNodeRoutes (parent, &rprefix, lifetime, 0, + &answer, &last, exclude_vnaddr, rib_rn, pfx_target_original); + count += rfapiNhlAddSubtree (parent, lifetime, &answer, &last, rn, + exclude_vnaddr, rfd_rib_table, pfx_target_original); + if (rib_rn) + route_unlock_node(rib_rn); + } + else + { + /* + * There is no parent with non-removed routes. Still need to + * add subtree of original node if it contributed routes to the + * answer. + */ + if (count) + count += rfapiNhlAddSubtree (rn, lifetime, &answer, &last, rn, + exclude_vnaddr, rfd_rib_table, pfx_target_original); + } + + zlog_debug ("%s: %d nexthops, answer=%p", __func__, count, answer); +#if DEBUG_RETURNED_NHL + rfapiPrintNhl (NULL, answer); +#endif + return answer; +} + +/* + * Construct nexthop list of all routes in table + */ +struct rfapi_next_hop_entry * +rfapiRouteTable2NextHopList ( + struct route_table *rt, + uint32_t lifetime, /* put into nexthop entries */ + struct rfapi_ip_addr *exclude_vnaddr,/* omit routes to same NVE */ + struct route_table *rfd_rib_table, /* preload this NVE rib table */ + struct prefix *pfx_target_original) /* query target */ +{ + struct route_node *rn; + struct rfapi_next_hop_entry *biglist = NULL; + struct rfapi_next_hop_entry *nhl; + struct rfapi_next_hop_entry *tail = NULL; + int count = 0; + + for (rn = route_top (rt); rn; rn = route_next (rn)) + { + + nhl = rfapiRouteNode2NextHopList (rn, lifetime, exclude_vnaddr, + rfd_rib_table, pfx_target_original); + if (!tail) + { + tail = biglist = nhl; + if (tail) + count = 1; + } + else + { + tail->next = nhl; + } + if (tail) + { + while (tail->next) + { + ++count; + tail = tail->next; + } + } + } + + zlog_debug ("%s: returning %d routes", __func__, count); + return biglist; +} + +struct rfapi_next_hop_entry * +rfapiEthRouteNode2NextHopList ( + struct route_node *rn, + struct rfapi_ip_prefix *rprefix, + uint32_t lifetime, /* put into nexthop entries */ + struct rfapi_ip_addr *exclude_vnaddr,/* omit routes to same NVE */ + struct route_table *rfd_rib_table,/* preload NVE rib table */ + struct prefix *pfx_target_original) /* query target */ +{ + int count = 0; + struct rfapi_next_hop_entry *answer = NULL; + struct rfapi_next_hop_entry *last = NULL; + struct route_node *rib_rn; + + rib_rn = rfd_rib_table? route_node_get(rfd_rib_table, &rn->p): NULL; + + count = rfapiNhlAddNodeRoutes (rn, rprefix, lifetime, 0, &answer, &last, + NULL, rib_rn, pfx_target_original); + +#if DEBUG_ENCAP_MONITOR + zlog_debug ("%s: node %p: %d non-holddown routes", __func__, rn, count); +#endif + + if (!count) + { + count = rfapiNhlAddNodeRoutes (rn, rprefix, lifetime, 1, &answer, &last, + exclude_vnaddr, rib_rn, pfx_target_original); + zlog_debug ("%s: node %p: %d holddown routes", __func__, rn, count); + } + + if (rib_rn) + route_unlock_node(rib_rn); + +#if DEBUG_RETURNED_NHL + rfapiPrintNhl (NULL, answer); +#endif + + return answer; +} + + +/* + * Construct nexthop list of all routes in table + */ +struct rfapi_next_hop_entry * +rfapiEthRouteTable2NextHopList ( + uint32_t logical_net_id, + struct rfapi_ip_prefix *rprefix, + uint32_t lifetime, /* put into nexthop entries */ + struct rfapi_ip_addr *exclude_vnaddr,/* omit routes to same NVE */ + struct route_table *rfd_rib_table, /* preload NVE rib node */ + struct prefix *pfx_target_original) /* query target */ +{ + struct rfapi_import_table *it; + struct bgp *bgp = bgp_get_default (); + struct route_table *rt; + struct route_node *rn; + struct rfapi_next_hop_entry *biglist = NULL; + struct rfapi_next_hop_entry *nhl; + struct rfapi_next_hop_entry *tail = NULL; + int count = 0; + + + it = rfapiMacImportTableGet (bgp, logical_net_id); + rt = it->imported_vpn[AFI_ETHER]; + + for (rn = route_top (rt); rn; rn = route_next (rn)) + { + + nhl = rfapiEthRouteNode2NextHopList(rn, rprefix, lifetime, + exclude_vnaddr, rfd_rib_table, pfx_target_original); + if (!tail) + { + tail = biglist = nhl; + if (tail) + count = 1; + } + else + { + tail->next = nhl; + } + if (tail) + { + while (tail->next) + { + ++count; + tail = tail->next; + } + } + } + + zlog_debug ("%s: returning %d routes", __func__, count); + return biglist; +} + +/* + * Insert a new bi to the imported route table node, + * keeping the list of BIs sorted best route first + */ +static void +rfapiBgpInfoAttachSorted ( + struct route_node *rn, + struct bgp_info *info_new, + afi_t afi, + safi_t safi) +{ + struct bgp *bgp; + struct bgp_info *prev; + struct bgp_info *next; + + bgp = bgp_get_default (); /* assume 1 instance for now */ + + if (VNC_DEBUG(IMPORT_BI_ATTACH)) + { + zlog_debug ("%s: info_new->peer=%p", __func__, info_new->peer); + zlog_debug ("%s: info_new->peer->su_remote=%p", __func__, + info_new->peer->su_remote); + } + + for (prev = NULL, next = rn->info; next; prev = next, next = next->next) + { + if (!bgp || + (!CHECK_FLAG (info_new->flags, BGP_INFO_REMOVED) && + CHECK_FLAG (next->flags, BGP_INFO_REMOVED)) || + bgp_info_cmp_compatible (bgp, info_new, next, afi, safi) == -1) + { /* -1 if 1st is better */ + break; + } + } + zlog_debug ("%s: prev=%p, next=%p", __func__, prev, next); + if (prev) + { + prev->next = info_new; + } + else + { + rn->info = info_new; + } + info_new->prev = prev; + info_new->next = next; + if (next) + next->prev = info_new; +} + +static void +rfapiBgpInfoDetach (struct route_node *rn, struct bgp_info *bi) +{ + /* + * Remove the route (doubly-linked) + */ + if (bi->next) + bi->next->prev = bi->prev; + if (bi->prev) + bi->prev->next = bi->next; + else + rn->info = bi->next; +} + +/* + * For L3-indexed import tables + */ +static int +rfapi_bi_peer_rd_cmp (void *b1, void *b2) +{ + struct bgp_info *bi1 = b1; + struct bgp_info *bi2 = b2; + + /* + * Compare peers + */ + if (bi1->peer < bi2->peer) + return -1; + if (bi1->peer > bi2->peer) + return 1; + + /* + * compare RDs + */ + return vnc_prefix_cmp ((struct prefix *) &bi1->extra->vnc.import.rd, + (struct prefix *) &bi2->extra->vnc.import.rd); +} + +/* + * For L2-indexed import tables + * The BIs in these tables should ALWAYS have an aux_prefix set because + * they arrive via IPv4 or IPv6 advertisements. + */ +static int +rfapi_bi_peer_rd_aux_cmp (void *b1, void *b2) +{ + struct bgp_info *bi1 = b1; + struct bgp_info *bi2 = b2; + int rc; + + /* + * Compare peers + */ + if (bi1->peer < bi2->peer) + return -1; + if (bi1->peer > bi2->peer) + return 1; + + /* + * compare RDs + */ + rc = vnc_prefix_cmp ((struct prefix *) &bi1->extra->vnc.import.rd, + (struct prefix *) &bi2->extra->vnc.import.rd); + if (rc) + { + return rc; + } + + /* + * L2 import tables can have multiple entries with the + * same MAC address, same RD, but different L3 addresses. + * + * Use presence of aux_prefix with AF=ethernet and prefixlen=1 + * as magic value to signify explicit wildcarding of the aux_prefix. + * This magic value will not appear in bona fide bi entries in + * the import table, but is allowed in the "fake" bi used to + * probe the table when searching. (We have to test both b1 and b2 + * because there is no guarantee of the order the test key and + * the real key will be passed) + */ + if ((bi1->extra->vnc.import.aux_prefix.family == AF_ETHERNET && + (bi1->extra->vnc.import.aux_prefix.prefixlen == 1)) || + (bi2->extra->vnc.import.aux_prefix.family == AF_ETHERNET && + (bi2->extra->vnc.import.aux_prefix.prefixlen == 1))) + { + + /* + * wildcard aux address specified + */ + return 0; + } + + return vnc_prefix_cmp (&bi1->extra->vnc.import.aux_prefix, + &bi2->extra->vnc.import.aux_prefix); +} + + +/* + * Index on RD and Peer + */ +static void +rfapiItBiIndexAdd ( + struct route_node *rn, /* Import table VPN node */ + struct bgp_info *bi) /* new BI */ +{ + struct skiplist *sl; + + assert (rn); + assert (bi); + assert (bi->extra); + + { + char buf[BUFSIZ]; + prefix_rd2str (&bi->extra->vnc.import.rd, buf, BUFSIZ); + zlog_debug ("%s: bi %p, peer %p, rd %s", __func__, bi, bi->peer, buf); + } + + sl = RFAPI_RDINDEX_W_ALLOC (rn); + if (!sl) + { + if (AF_ETHERNET == rn->p.family) + { + sl = skiplist_new (0, rfapi_bi_peer_rd_aux_cmp, NULL); + } + else + { + sl = skiplist_new (0, rfapi_bi_peer_rd_cmp, NULL); + } + RFAPI_IT_EXTRA_GET (rn)->u.vpn.idx_rd = sl; + route_lock_node (rn); /* for skiplist */ + } + assert (!skiplist_insert (sl, (void *) bi, (void *) bi)); + route_lock_node (rn); /* for skiplist entry */ + + /* NB: BIs in import tables are not refcounted */ +} + +static void +rfapiItBiIndexDump (struct route_node *rn) +{ + struct skiplist *sl; + void *cursor = NULL; + struct bgp_info *k; + struct bgp_info *v; + int rc; + + sl = RFAPI_RDINDEX (rn); + if (!sl) + return; + + for (rc = skiplist_next (sl, (void **) &k, (void **) &v, &cursor); + !rc; rc = skiplist_next (sl, (void **) &k, (void **) &v, &cursor)) + { + + char buf[BUFSIZ]; + char buf_aux_pfx[BUFSIZ]; + + prefix_rd2str (&k->extra->vnc.import.rd, buf, BUFSIZ); + buf_aux_pfx[0] = 0; + if (k->extra->vnc.import.aux_prefix.family) + { + prefix2str (&k->extra->vnc.import.aux_prefix, buf_aux_pfx, BUFSIZ); + } + else + { + strncpy (buf_aux_pfx, "(none)", BUFSIZ); + buf_aux_pfx[BUFSIZ - 1] = 0; + } + + zlog_debug ("bi %p, peer %p, rd %s, aux_prefix %s", k, k->peer, buf, + buf_aux_pfx); + } +} + +static struct bgp_info * +rfapiItBiIndexSearch ( + struct route_node *rn, /* Import table VPN node */ + struct prefix_rd *prd, + struct peer *peer, + struct prefix *aux_prefix) /* optional L3 addr for L2 ITs */ +{ + struct skiplist *sl; + int rc; + struct bgp_info bi_fake; + struct bgp_info_extra bi_extra; + struct bgp_info *bi_result; + + sl = RFAPI_RDINDEX (rn); + if (!sl) + return NULL; + +#if DEBUG_BI_SEARCH + { + char buf[BUFSIZ]; + char buf_aux_pfx[BUFSIZ]; + + prefix_rd2str (prd, buf, BUFSIZ); + if (aux_prefix) + { + prefix2str (aux_prefix, buf_aux_pfx, BUFSIZ); + } + else + { + strncpy (buf_aux_pfx, "(nil)", BUFSIZ - 1); + buf_aux_pfx[BUFSIZ - 1] = 0; + } + + zlog_debug ("%s want prd=%s, peer=%p, aux_prefix=%s", + __func__, buf, peer, buf_aux_pfx); + rfapiItBiIndexDump (rn); + } +#endif + + /* threshold is a WAG */ + if (sl->count < 3) + { +#if DEBUG_BI_SEARCH + zlog_debug ("%s: short list algorithm", __func__); +#endif + /* if short list, linear search might be faster */ + for (bi_result = rn->info; bi_result; bi_result = bi_result->next) + { +#if DEBUG_BI_SEARCH + { + char buf[BUFSIZ]; + prefix_rd2str (&bi_result->extra->vnc.import.rd, buf, BUFSIZ); + zlog_debug ("%s: bi has prd=%s, peer=%p", __func__, + buf, bi_result->peer); + } +#endif + if (peer == bi_result->peer && + !prefix_cmp ((struct prefix *) &bi_result->extra->vnc.import.rd, + (struct prefix *) prd)) + { + +#if DEBUG_BI_SEARCH + zlog_debug ("%s: peer and RD same, doing aux_prefix check", + __func__); +#endif + if (!aux_prefix || + !prefix_cmp (aux_prefix, + &bi_result->extra->vnc.import.aux_prefix)) + { + +#if DEBUG_BI_SEARCH + zlog_debug ("%s: match", __func__); +#endif + break; + } + + } + } + return bi_result; + } + + bi_fake.peer = peer; + bi_fake.extra = &bi_extra; + bi_fake.extra->vnc.import.rd = *(struct prefix_rd *) prd; + if (aux_prefix) + { + bi_fake.extra->vnc.import.aux_prefix = *aux_prefix; + } + else + { + /* wildcard */ + bi_fake.extra->vnc.import.aux_prefix.family = AF_ETHERNET; + bi_fake.extra->vnc.import.aux_prefix.prefixlen = 1; + } + + rc = skiplist_search (sl, (void *) &bi_fake, (void *) &bi_result); + + if (rc) + { +#if DEBUG_BI_SEARCH + zlog_debug ("%s: no match", __func__); +#endif + return NULL; + } + +#if DEBUG_BI_SEARCH + zlog_debug ("%s: matched bi=%p", __func__, bi_result); +#endif + + return bi_result; +} + +static void +rfapiItBiIndexDel ( + struct route_node *rn, /* Import table VPN node */ + struct bgp_info *bi) /* old BI */ +{ + struct skiplist *sl; + int rc; + + { + char buf[BUFSIZ]; + prefix_rd2str (&bi->extra->vnc.import.rd, buf, BUFSIZ); + zlog_debug ("%s: bi %p, peer %p, rd %s", __func__, bi, bi->peer, buf); + } + + sl = RFAPI_RDINDEX (rn); + assert (sl); + + rc = skiplist_delete (sl, (void *) (bi), (void *) bi); + if (rc) + { + rfapiItBiIndexDump (rn); + } + assert (!rc); + + route_unlock_node (rn); /* for skiplist entry */ + + /* NB: BIs in import tables are not refcounted */ +} + +/* + * Add a backreference at the ENCAP node to the VPN route that + * refers to it + */ +static void +rfapiMonitorEncapAdd ( + struct rfapi_import_table *import_table, + struct prefix *p, /* VN address */ + struct route_node *vpn_rn, /* VPN node */ + struct bgp_info *vpn_bi) /* VPN bi/route */ +{ + afi_t afi = family2afi (p->family); + struct route_node *rn; + struct rfapi_monitor_encap *m; + + assert (afi); + rn = route_node_get (import_table->imported_encap[afi], p); /* locks rn */ + assert (rn); + + m = + XCALLOC (MTYPE_RFAPI_MONITOR_ENCAP, sizeof (struct rfapi_monitor_encap)); + assert (m); + + m->node = vpn_rn; + m->bi = vpn_bi; + m->rn = rn; + + /* insert to encap node's list */ + m->next = RFAPI_MONITOR_ENCAP (rn); + if (m->next) + m->next->prev = m; + RFAPI_MONITOR_ENCAP_W_ALLOC (rn) = m; + + /* for easy lookup when deleting vpn route */ + vpn_bi->extra->vnc.import.hme = m; + + zlog_debug + ("%s: it=%p, vpn_bi=%p, afi=%d, encap rn=%p, setting vpn_bi->extra->vnc.import.hme=%p", + __func__, import_table, vpn_bi, afi, rn, m); + + RFAPI_CHECK_REFCOUNT (rn, SAFI_ENCAP, 0); +} + +static void +rfapiMonitorEncapDelete (struct bgp_info *vpn_bi) +{ + /* + * Remove encap monitor + */ + zlog_debug ("%s: vpn_bi=%p", __func__, vpn_bi); + if (vpn_bi->extra) + { + struct rfapi_monitor_encap *hme = vpn_bi->extra->vnc.import.hme; + + if (hme) + { + + zlog_debug ("%s: hme=%p", __func__, hme); + + /* Refcount checking takes too long here */ + //RFAPI_CHECK_REFCOUNT(hme->rn, SAFI_ENCAP, 0); + if (hme->next) + hme->next->prev = hme->prev; + if (hme->prev) + hme->prev->next = hme->next; + else + RFAPI_MONITOR_ENCAP_W_ALLOC (hme->rn) = hme->next; + /* Refcount checking takes too long here */ + //RFAPI_CHECK_REFCOUNT(hme->rn, SAFI_ENCAP, 1); + + /* see if the struct rfapi_it_extra is empty and can be freed */ + rfapiMonitorExtraPrune (SAFI_ENCAP, hme->rn); + + route_unlock_node (hme->rn); /* decr ref count */ + XFREE (MTYPE_RFAPI_MONITOR_ENCAP, hme); + vpn_bi->extra->vnc.import.hme = NULL; + } + } +} + +/* + * quagga lib/thread.h says this must return int even though + * it doesn't do anything with the return value + */ +static int +rfapiWithdrawTimerVPN (struct thread *t) +{ + struct rfapi_withdraw *wcb = t->arg; + struct bgp_info *bi = wcb->info; + struct bgp *bgp = bgp_get_default (); + + struct rfapi_monitor_vpn *moved; + afi_t afi; + + assert (wcb->node); + assert (bi); + assert (wcb->import_table); + assert (bi->extra); + + RFAPI_CHECK_REFCOUNT (wcb->node, SAFI_MPLS_VPN, wcb->lockoffset); + + { + char buf[BUFSIZ]; + + zlog_debug ("%s: removing bi %p at prefix %s/%d", + __func__, + bi, + rfapi_ntop (wcb->node->p.family, &wcb->node->p.u.prefix, buf, + BUFSIZ), wcb->node->p.prefixlen); + } + + /* + * Remove the route (doubly-linked) + */ + if (CHECK_FLAG (bi->flags, BGP_INFO_VALID) + && VALID_INTERIOR_TYPE (bi->type)) + RFAPI_MONITOR_EXTERIOR (wcb->node)->valid_interior_count--; + + afi = family2afi (wcb->node->p.family); + wcb->import_table->holddown_count[afi] -= 1; /* keep count consistent */ + rfapiItBiIndexDel (wcb->node, bi); + rfapiBgpInfoDetach (wcb->node, bi); /* with removed bi */ + + vnc_import_bgp_exterior_del_route_interior (bgp, wcb->import_table, + wcb->node, bi); + + + /* + * If VNC is configured to send response remove messages, AND + * if the removed route had a UN address, do response removal + * processing. + */ + if (!(bgp->rfapi_cfg->flags & BGP_VNC_CONFIG_RESPONSE_REMOVAL_DISABLE)) + { + + int has_valid_duplicate = 0; + struct bgp_info *bii; + + /* + * First check if there are any OTHER routes at this node + * that have the same nexthop and a valid UN address. If + * there are (e.g., from other peers), then the route isn't + * really gone, so skip sending a response removal message. + */ + for (bii = wcb->node->info; bii; bii = bii->next) + { + if (rfapiVpnBiSamePtUn (bi, bii)) + { + has_valid_duplicate = 1; + break; + } + } + + zlog_debug ("%s: has_valid_duplicate=%d", __func__, + has_valid_duplicate); + + if (!has_valid_duplicate) + { + rfapiRibPendingDeleteRoute (bgp, wcb->import_table, afi, wcb->node); + } + } + + rfapiMonitorEncapDelete (bi); + + /* + * If there are no VPN monitors at this VPN Node A, + * we are done + */ + if (!RFAPI_MONITOR_VPN (wcb->node)) + { + zlog_debug ("%s: no VPN monitors at this node", __func__); + goto done; + } + + /* + * rfapiMonitorMoveShorter only moves monitors if there are + * no remaining valid routes at the current node + */ + moved = rfapiMonitorMoveShorter (wcb->node, 1); + + if (moved) + { + rfapiMonitorMovedUp (wcb->import_table, wcb->node, moved->node, moved); + } + +done: + /* + * Free VPN bi + */ + rfapiBgpInfoFree (bi); + wcb->info = NULL; + + /* + * If route count at this node has gone to 0, withdraw exported prefix + */ + if (!wcb->node->info) + { + /* see if the struct rfapi_it_extra is empty and can be freed */ + rfapiMonitorExtraPrune (SAFI_MPLS_VPN, wcb->node); + vnc_direct_bgp_del_prefix (bgp, wcb->import_table, wcb->node); + vnc_zebra_del_prefix (bgp, wcb->import_table, wcb->node); + } + else + { + /* + * nexthop change event + * vnc_direct_bgp_add_prefix() will recompute the VN addr ecommunity + */ + vnc_direct_bgp_add_prefix (bgp, wcb->import_table, wcb->node); + } + + RFAPI_CHECK_REFCOUNT (wcb->node, SAFI_MPLS_VPN, 1 + wcb->lockoffset); + route_unlock_node (wcb->node); /* decr ref count */ + XFREE (MTYPE_RFAPI_WITHDRAW, wcb); + return 0; +} + +/* + * This works for multiprotocol extension, but not for plain ol' + * unicast IPv4 because that nexthop is stored in attr->nexthop + */ +void +rfapiNexthop2Prefix (struct attr *attr, struct prefix *p) +{ + assert (p); + assert (attr); + assert (attr->extra); + + memset (p, 0, sizeof (struct prefix)); + + switch (p->family = BGP_MP_NEXTHOP_FAMILY (attr->extra->mp_nexthop_len)) + { + case AF_INET: + p->u.prefix4 = attr->extra->mp_nexthop_global_in; + p->prefixlen = 32; + break; + + case AF_INET6: + p->u.prefix6 = attr->extra->mp_nexthop_global; + p->prefixlen = 128; + break; + + default: + zlog_debug ("%s: Family is unknown = %d", + __func__, p->family); + } +} + +void +rfapiUnicastNexthop2Prefix (afi_t afi, struct attr *attr, struct prefix *p) +{ + if (afi == AFI_IP) + { + p->family = AF_INET; + p->prefixlen = 32; + p->u.prefix4 = attr->nexthop; + } + else + { + rfapiNexthop2Prefix (attr, p); + } +} + +static int +rfapiAttrNexthopAddrDifferent (struct prefix *p1, struct prefix *p2) +{ + if (!p1 || !p2) + { + zlog_debug ("%s: p1 or p2 is NULL", __func__); + return 1; + } + + /* + * Are address families the same? + */ + if (p1->family != p2->family) + { + return 1; + } + + switch (p1->family) + { + case AF_INET: + if (IPV4_ADDR_SAME (&p1->u.prefix4, &p2->u.prefix4)) + return 0; + break; + + case AF_INET6: + if (IPV6_ADDR_SAME (&p1->u.prefix6, &p2->u.prefix6)) + return 0; + break; + + default: + assert (1); + + } + + return 1; +} + +static void +rfapiCopyUnEncap2VPN (struct bgp_info *encap_bi, struct bgp_info *vpn_bi) +{ + struct attr_extra *attre; + + if (!encap_bi->attr || !encap_bi->attr->extra) + { + zlog_warn ("%s: no encap bi attr/extra, can't copy UN address", + __func__); + return; + } + + if (!vpn_bi || !vpn_bi->extra) + { + zlog_warn ("%s: no vpn bi attr/extra, can't copy UN address", + __func__); + return; + } + + attre = encap_bi->attr->extra; + + switch (BGP_MP_NEXTHOP_FAMILY (attre->mp_nexthop_len)) + { + case AF_INET: + + /* + * instrumentation to debug segfault of 091127 + */ + zlog_debug ("%s: vpn_bi=%p", __func__, vpn_bi); + if (vpn_bi) + { + zlog_debug ("%s: vpn_bi->extra=%p", __func__, vpn_bi->extra); + } + + vpn_bi->extra->vnc.import.un_family = AF_INET; + vpn_bi->extra->vnc.import.un.addr4 = attre->mp_nexthop_global_in; + break; + + case AF_INET6: + vpn_bi->extra->vnc.import.un_family = AF_INET6; + vpn_bi->extra->vnc.import.un.addr6 = attre->mp_nexthop_global; + break; + + default: + zlog_warn ("%s: invalid encap nexthop length: %d", + __func__, attre->mp_nexthop_len); + vpn_bi->extra->vnc.import.un_family = 0; + break; + } +} + +/* + * returns 0 on success, nonzero on error + */ +static int +rfapiWithdrawEncapUpdateCachedUn ( + struct rfapi_import_table *import_table, + struct bgp_info *encap_bi, + struct route_node *vpn_rn, + struct bgp_info *vpn_bi) +{ + if (!encap_bi) + { + + /* + * clear cached UN address + */ + if (!vpn_bi || !vpn_bi->extra) + { + zlog_warn ("%s: missing VPN bi/extra, can't clear UN addr", + __func__); + return 1; + } + vpn_bi->extra->vnc.import.un_family = 0; + memset (&vpn_bi->extra->vnc.import.un, 0, + sizeof (vpn_bi->extra->vnc.import.un)); + if (CHECK_FLAG (vpn_bi->flags, BGP_INFO_VALID)) + { + if (rfapiGetVncTunnelUnAddr (vpn_bi->attr, NULL)) + { + UNSET_FLAG (vpn_bi->flags, BGP_INFO_VALID); + if (VALID_INTERIOR_TYPE (vpn_bi->type)) + RFAPI_MONITOR_EXTERIOR (vpn_rn)->valid_interior_count--; + /* signal interior route withdrawal to import-exterior */ + vnc_import_bgp_exterior_del_route_interior (bgp_get_default (), + import_table, + vpn_rn, vpn_bi); + } + } + + } + else + { + if (!vpn_bi) + { + zlog_warn ("%s: missing VPN bi, can't clear UN addr", __func__); + return 1; + } + rfapiCopyUnEncap2VPN (encap_bi, vpn_bi); + if (!CHECK_FLAG (vpn_bi->flags, BGP_INFO_VALID)) + { + SET_FLAG (vpn_bi->flags, BGP_INFO_VALID); + if (VALID_INTERIOR_TYPE (vpn_bi->type)) + RFAPI_MONITOR_EXTERIOR (vpn_rn)->valid_interior_count++; + /* signal interior route withdrawal to import-exterior */ + vnc_import_bgp_exterior_add_route_interior (bgp_get_default (), + import_table, + vpn_rn, vpn_bi); + } + } + return 0; +} + +static int +rfapiWithdrawTimerEncap (struct thread *t) +{ + struct rfapi_withdraw *wcb = t->arg; + struct bgp_info *bi = wcb->info; + int was_first_route = 0; + struct rfapi_monitor_encap *em; + struct skiplist *vpn_node_sl = skiplist_new (0, NULL, NULL); + + assert (wcb->node); + assert (bi); + assert (wcb->import_table); + + RFAPI_CHECK_REFCOUNT (wcb->node, SAFI_ENCAP, 0); + + if (wcb->node->info == bi) + was_first_route = 1; + + /* + * Remove the route/bi and free it + */ + rfapiBgpInfoDetach (wcb->node, bi); + rfapiBgpInfoFree (bi); + + if (!was_first_route) + goto done; + + for (em = RFAPI_MONITOR_ENCAP (wcb->node); em; em = em->next) + { + + /* + * Update monitoring VPN BIs with new encap info at the + * head of the encap bi chain (which could be NULL after + * removing the expiring bi above) + */ + if (rfapiWithdrawEncapUpdateCachedUn + (wcb->import_table, wcb->node->info, em->node, em->bi)) + continue; + + /* + * Build a list of unique VPN nodes referenced by these monitors. + * Use a skiplist for speed. + */ + skiplist_insert (vpn_node_sl, em->node, em->node); + } + + + /* + * for each VPN node referenced in the ENCAP monitors: + */ + struct route_node *rn; + while (!skiplist_first (vpn_node_sl, (void **) &rn, NULL)) + { + if (!wcb->node->info) + { + struct rfapi_monitor_vpn *moved; + + moved = rfapiMonitorMoveShorter (rn, 0); + if (moved) + { + //rfapiDoRouteCallback(wcb->import_table, moved->node, moved); + rfapiMonitorMovedUp (wcb->import_table, rn, moved->node, moved); + } + } + else + { + //rfapiDoRouteCallback(wcb->import_table, rn, NULL); + rfapiMonitorItNodeChanged (wcb->import_table, rn, NULL); + } + skiplist_delete_first (vpn_node_sl); + } + +done: + RFAPI_CHECK_REFCOUNT (wcb->node, SAFI_ENCAP, 1); + route_unlock_node (wcb->node); /* decr ref count */ + XFREE (MTYPE_RFAPI_WITHDRAW, wcb); + skiplist_free (vpn_node_sl); + return 0; +} + + +/* + * Works for both VPN and ENCAP routes; timer_service_func is different + * in each case + */ +static void +rfapiBiStartWithdrawTimer ( + struct rfapi_import_table *import_table, + struct route_node *rn, + struct bgp_info *bi, + afi_t afi, + safi_t safi, + int (*timer_service_func) (struct thread *)) +{ + uint32_t lifetime; + struct rfapi_withdraw *wcb; + + if CHECK_FLAG + (bi->flags, BGP_INFO_REMOVED) + { + /* + * Already on the path to being withdrawn, + * should already have a timer set up to + * delete it. + */ + zlog_debug ("%s: already being withdrawn, do nothing", __func__); + return; + } + + rfapiGetVncLifetime (bi->attr, &lifetime); + zlog_debug ("%s: VNC lifetime is %u", __func__, lifetime); + + /* + * withdrawn routes get to hang around for a while + */ + SET_FLAG (bi->flags, BGP_INFO_REMOVED); + + /* set timer to remove the route later */ + lifetime = rfapiGetHolddownFromLifetime (lifetime); + zlog_debug ("%s: using timeout %u", __func__, lifetime); + + /* + * Stash import_table, node, and info for use by timer + * service routine, which is supposed to free the wcb. + */ + wcb = XCALLOC (MTYPE_RFAPI_WITHDRAW, sizeof (struct rfapi_withdraw)); + assert (wcb); + wcb->node = rn; + wcb->info = bi; + wcb->import_table = import_table; + + zlog_debug + ("%s: wcb values: node=%p, info=%p, import_table=%p (bi follows)", + __func__, wcb->node, wcb->info, wcb->import_table); + rfapiPrintBi (NULL, bi); + + + assert (bi->extra); + if (lifetime > UINT32_MAX / 1001) + { + /* sub-optimal case, but will probably never happen */ + bi->extra->vnc.import.timer = thread_add_timer (bm->master, + timer_service_func, + wcb, lifetime); + } + else + { + static uint32_t jitter; + uint32_t lifetime_msec; + + /* + * the goal here is to spread out the timers so they are + * sortable in the skip list + */ + if (++jitter >= 1000) + jitter = 0; + + lifetime_msec = (lifetime * 1000) + jitter; + + bi->extra->vnc.import.timer = thread_add_background (bm->master, + timer_service_func, + wcb, + lifetime_msec); + } + + /* re-sort route list (BGP_INFO_REMOVED routes are last) */ + if (((struct bgp_info *) rn->info)->next) + { + rfapiBgpInfoDetach (rn, bi); + rfapiBgpInfoAttachSorted (rn, bi, afi, safi); + } +} + + +typedef void (rfapi_bi_filtered_import_f) (struct rfapi_import_table *, + int, + struct peer *, + void *, + struct prefix *, + struct prefix *, + afi_t, + struct prefix_rd *, + struct attr *, + u_char, u_char, uint32_t *); + + +static void +rfapiExpireEncapNow ( + struct rfapi_import_table *it, + struct route_node *rn, + struct bgp_info *bi) +{ + struct rfapi_withdraw *wcb; + struct thread t; + + /* + * pretend we're an expiring timer + */ + wcb = XCALLOC (MTYPE_RFAPI_WITHDRAW, sizeof (struct rfapi_withdraw)); + wcb->info = bi; + wcb->node = rn; + wcb->import_table = it; + memset (&t, 0, sizeof (t)); + t.arg = wcb; + rfapiWithdrawTimerEncap (&t); /* frees wcb */ +} + +static int +rfapiGetNexthop (struct attr *attr, struct prefix *prefix) +{ + switch (BGP_MP_NEXTHOP_FAMILY (attr->extra->mp_nexthop_len)) + { + case AF_INET: + prefix->family = AF_INET; + prefix->prefixlen = 32; + prefix->u.prefix4 = attr->extra->mp_nexthop_global_in; + break; + case AF_INET6: + prefix->family = AF_INET6; + prefix->prefixlen = 128; + prefix->u.prefix6 = attr->extra->mp_nexthop_global; + break; + default: + zlog_debug ("%s: unknown attr->extra->mp_nexthop_len %d", __func__, + attr->extra->mp_nexthop_len); + return EINVAL; + } + return 0; +} + +/* + * import a bgp_info if its route target list intersects with the + * import table's route target list + */ +static void +rfapiBgpInfoFilteredImportEncap ( + struct rfapi_import_table *import_table, + int action, + struct peer *peer, + void *rfd, /* set for looped back routes */ + struct prefix *p, + struct prefix *aux_prefix, /* Unused for encap routes */ + afi_t afi, + struct prefix_rd *prd, + struct attr *attr, /* part of bgp_info */ + u_char type, /* part of bgp_info */ + u_char sub_type, /* part of bgp_info */ + uint32_t *label) /* part of bgp_info */ +{ + struct route_table *rt = NULL; + struct route_node *rn; + struct bgp_info *info_new; + struct bgp_info *bi; + struct bgp_info *next; + char buf[BUFSIZ]; + + struct prefix p_firstbi_old; + struct prefix p_firstbi_new; + int replacing = 0; + const char *action_str = NULL; + struct prefix un_prefix; + + struct bgp *bgp; + bgp = bgp_get_default (); /* assume 1 instance for now */ + + switch (action) + { + case FIF_ACTION_UPDATE: + action_str = "update"; + break; + case FIF_ACTION_WITHDRAW: + action_str = "withdraw"; + break; + case FIF_ACTION_KILL: + action_str = "kill"; + break; + default: + assert (0); + break; + } + + zlog_debug ("%s: entry: %s: prefix %s/%d", __func__, + action_str, + inet_ntop (p->family, &p->u.prefix, buf, BUFSIZ), p->prefixlen); + + memset (&p_firstbi_old, 0, sizeof (p_firstbi_old)); + memset (&p_firstbi_new, 0, sizeof (p_firstbi_new)); + + if (action == FIF_ACTION_UPDATE) + { + /* + * Compare rt lists. If no intersection, don't import this route + * On a withdraw, peer and RD are sufficient to determine if + * we should act. + */ + if (!attr || !attr->extra || !attr->extra->ecommunity) + { + + zlog_debug ("%s: attr, extra, or ecommunity missing, not importing", + __func__); + return; + } +#if RFAPI_REQUIRE_ENCAP_BEEC + if (!rfapiEcommunitiesMatchBeec (attr->extra->ecommunity)) + { + zlog_debug ("%s: it=%p: no match for BGP Encapsulation ecommunity", + __func__, import_table); + return; + } +#endif + if (!rfapiEcommunitiesIntersect (import_table->rt_import_list, + attr->extra->ecommunity)) + { + + zlog_debug ("%s: it=%p: no ecommunity intersection", + __func__, import_table); + return; + } + + /* + * Updates must also have a nexthop address + */ + memset (&un_prefix, 0, sizeof (un_prefix)); /* keep valgrind happy */ + if (rfapiGetNexthop (attr, &un_prefix)) + { + zlog_debug ("%s: missing nexthop address", __func__); + return; + } + } + + /* + * Figure out which radix tree the route would go into + */ + switch (afi) + { + case AFI_IP: + case AFI_IP6: + rt = import_table->imported_encap[afi]; + break; + + default: + zlog_err ("%s: bad afi %d", __func__, afi); + return; + } + + /* + * route_node_lookup returns a node only if there is at least + * one route attached. + */ + rn = route_node_lookup (rt, p); + +#if DEBUG_ENCAP_MONITOR + zlog_debug ("%s: initial encap lookup (it=%p) rn=%p", + __func__, import_table, rn); +#endif + + if (rn) + { + + RFAPI_CHECK_REFCOUNT (rn, SAFI_ENCAP, 1); + route_unlock_node (rn); /* undo lock in route_node_lookup */ + + + /* + * capture nexthop of first bi + */ + if (rn->info) + { + rfapiNexthop2Prefix (((struct bgp_info *) (rn->info))->attr, + &p_firstbi_old); + } + + for (bi = rn->info; bi; bi = bi->next) + { + + /* + * Does this bgp_info refer to the same route + * as we are trying to add? + */ + zlog_debug ("%s: comparing BI %p", __func__, bi); + + + /* + * Compare RDs + * + * RD of import table bi is in bi->extra->vnc.import.rd + * RD of info_orig is in prd + */ + if (!bi->extra) + { + zlog_debug ("%s: no bi->extra", __func__); + continue; + } + if (prefix_cmp ((struct prefix *) &bi->extra->vnc.import.rd, + (struct prefix *) prd)) + { + + zlog_debug ("%s: prd does not match", __func__); + continue; + } + + /* + * Compare peers + */ + if (bi->peer != peer) + { + zlog_debug ("%s: peer does not match", __func__); + continue; + } + + zlog_debug ("%s: found matching bi", __func__); + + /* Same route. Delete this bi, replace with new one */ + + if (action == FIF_ACTION_WITHDRAW) + { + + zlog_debug ("%s: withdrawing at prefix %s/%d", + __func__, + inet_ntop (rn->p.family, &rn->p.u.prefix, buf, + BUFSIZ), rn->p.prefixlen); + + rfapiBiStartWithdrawTimer (import_table, rn, bi, + afi, SAFI_ENCAP, + rfapiWithdrawTimerEncap); + + } + else + { + zlog_debug ("%s: %s at prefix %s/%d", + __func__, + ((action == + FIF_ACTION_KILL) ? "killing" : "replacing"), + inet_ntop (rn->p.family, &rn->p.u.prefix, buf, + BUFSIZ), rn->p.prefixlen); + + /* + * If this route is waiting to be deleted because of + * a previous withdraw, we must cancel its timer. + */ + if (CHECK_FLAG (bi->flags, BGP_INFO_REMOVED) + && bi->extra->vnc.import.timer) + { + + struct thread *t = + (struct thread *) bi->extra->vnc.import.timer; + struct rfapi_withdraw *wcb = t->arg; + + XFREE (MTYPE_RFAPI_WITHDRAW, wcb); + thread_cancel (t); + } + + if (action == FIF_ACTION_UPDATE) + { + rfapiBgpInfoDetach (rn, bi); + rfapiBgpInfoFree (bi); + replacing = 1; + } + else + { + /* + * Kill: do export stuff when removing bi + */ + struct rfapi_withdraw *wcb; + struct thread t; + + /* + * pretend we're an expiring timer + */ + wcb = + XCALLOC (MTYPE_RFAPI_WITHDRAW, + sizeof (struct rfapi_withdraw)); + wcb->info = bi; + wcb->node = rn; + wcb->import_table = import_table; + memset (&t, 0, sizeof (t)); + t.arg = wcb; + rfapiWithdrawTimerEncap (&t); /* frees wcb */ + } + } + + break; + } + } + + if (rn) + RFAPI_CHECK_REFCOUNT (rn, SAFI_ENCAP, replacing ? 1 : 0); + + if (action == FIF_ACTION_WITHDRAW || action == FIF_ACTION_KILL) + return; + + info_new = rfapiBgpInfoCreate (attr, peer, rfd, prd, type, sub_type, NULL); + + if (rn) + { + if (!replacing) + route_lock_node (rn); /* incr ref count for new BI */ + } + else + { + rn = route_node_get (rt, p); + } + + zlog_debug ("%s: (afi=%d, rn=%p) inserting at prefix %s/%d", + __func__, + afi, + rn, + inet_ntop (rn->p.family, &rn->p.u.prefix, buf, BUFSIZ), + rn->p.prefixlen); + + rfapiBgpInfoAttachSorted (rn, info_new, afi, SAFI_ENCAP); + + /* + * Delete holddown routes from same NVE. See details in + * rfapiBgpInfoFilteredImportVPN() + */ + for (bi = info_new->next; bi; bi = next) + { + + struct prefix pfx_un; + int un_match = 0; + + next = bi->next; + if (!CHECK_FLAG (bi->flags, BGP_INFO_REMOVED)) + continue; + + /* + * We already match the VN address (it is the prefix + * of the route node) + */ + + if (!rfapiGetNexthop (bi->attr, &pfx_un) && + prefix_same (&pfx_un, &un_prefix)) + { + + un_match = 1; + } + + if (!un_match) + continue; + + zlog_debug ("%s: removing holddown bi matching NVE of new route", + __func__); + if (bi->extra->vnc.import.timer) + { + struct thread *t = (struct thread *) bi->extra->vnc.import.timer; + struct rfapi_withdraw *wcb = t->arg; + + XFREE (MTYPE_RFAPI_WITHDRAW, wcb); + thread_cancel (t); + } + rfapiExpireEncapNow (import_table, rn, bi); + } + + rfapiNexthop2Prefix (((struct bgp_info *) (rn->info))->attr, + &p_firstbi_new); + + /* + * If the nexthop address of the selected Encap route (i.e., + * the UN address) has changed, then we must update the VPN + * routes that refer to this Encap route and possibly force + * rfapi callbacks. + */ + if (rfapiAttrNexthopAddrDifferent (&p_firstbi_old, &p_firstbi_new)) + { + + struct rfapi_monitor_encap *m; + struct rfapi_monitor_encap *mnext; + + struct route_node *referenced_vpn_prefix; + + /* + * Optimized approach: build radix tree on the fly to + * hold list of VPN nodes referenced by the ENCAP monitors + * + * The nodes in this table correspond to prefixes of VPN routes. + * The "info" pointer of the node points to a chain of + * struct rfapi_monitor_encap, each of which refers to a + * specific VPN node. + */ + struct route_table *referenced_vpn_table; + + referenced_vpn_table = route_table_init (); + assert (referenced_vpn_table); + + /* + * iterate over the set of monitors at this ENCAP node. + */ +#if DEBUG_ENCAP_MONITOR + zlog_debug ("%s: examining monitors at rn=%p", __func__, rn); +#endif + for (m = RFAPI_MONITOR_ENCAP (rn); m; m = m->next) + { + + /* + * For each referenced bi/route, copy the ENCAP route's + * nexthop to the VPN route's cached UN address field and set + * the address family of the cached UN address field. + */ + rfapiCopyUnEncap2VPN (info_new, m->bi); + if (!CHECK_FLAG (m->bi->flags, BGP_INFO_VALID)) + { + SET_FLAG (m->bi->flags, BGP_INFO_VALID); + if (VALID_INTERIOR_TYPE (m->bi->type)) + RFAPI_MONITOR_EXTERIOR (m->node)->valid_interior_count++; + vnc_import_bgp_exterior_add_route_interior (bgp, + import_table, + m->node, m->bi); + } + + /* + * Build a list of unique VPN nodes referenced by these monitors + * + * There could be more than one VPN node here with a given + * prefix. Those are currently in an unsorted linear list + * per prefix. + */ + + referenced_vpn_prefix = + route_node_get (referenced_vpn_table, &m->node->p); + assert (referenced_vpn_prefix); + for (mnext = referenced_vpn_prefix->info; mnext; + mnext = mnext->next) + { + + if (mnext->node == m->node) + break; + } + + if (mnext) + { + /* + * already have an entry for this VPN node + */ + route_unlock_node (referenced_vpn_prefix); + } + else + { + mnext = XCALLOC (MTYPE_RFAPI_MONITOR_ENCAP, + sizeof (struct rfapi_monitor_encap)); + assert (mnext); + mnext->node = m->node; + mnext->next = referenced_vpn_prefix->info; + referenced_vpn_prefix->info = mnext; + } + + } + + /* + * for each VPN node referenced in the ENCAP monitors: + */ + for (referenced_vpn_prefix = route_top (referenced_vpn_table); + referenced_vpn_prefix; + referenced_vpn_prefix = route_next (referenced_vpn_prefix)) + { + + while ((m = referenced_vpn_prefix->info)) + { + + struct route_node *n; + + rfapiMonitorMoveLonger (m->node); + for (n = m->node; n; n = n->parent) + { + //rfapiDoRouteCallback(import_table, n, NULL); + } + rfapiMonitorItNodeChanged (import_table, m->node, NULL); + + referenced_vpn_prefix->info = m->next; + route_unlock_node (referenced_vpn_prefix); + XFREE (MTYPE_RFAPI_MONITOR_ENCAP, m); + } + + } + route_table_finish (referenced_vpn_table); + } + + RFAPI_CHECK_REFCOUNT (rn, SAFI_ENCAP, 0); +} + +static void +rfapiExpireVpnNow ( + struct rfapi_import_table *it, + struct route_node *rn, + struct bgp_info *bi, + int lockoffset) +{ + struct rfapi_withdraw *wcb; + struct thread t; + + /* + * pretend we're an expiring timer + */ + wcb = XCALLOC (MTYPE_RFAPI_WITHDRAW, sizeof (struct rfapi_withdraw)); + wcb->info = bi; + wcb->node = rn; + wcb->import_table = it; + wcb->lockoffset = lockoffset; + memset (&t, 0, sizeof (t)); + t.arg = wcb; + rfapiWithdrawTimerVPN (&t); /* frees wcb */ +} + + +/* + * import a bgp_info if its route target list intersects with the + * import table's route target list + */ +void +rfapiBgpInfoFilteredImportVPN ( + struct rfapi_import_table *import_table, + int action, + struct peer *peer, + void *rfd, /* set for looped back routes */ + struct prefix *p, + struct prefix *aux_prefix, /* AFI_ETHER: optional IP */ + afi_t afi, + struct prefix_rd *prd, + struct attr *attr, /* part of bgp_info */ + u_char type, /* part of bgp_info */ + u_char sub_type, /* part of bgp_info */ + uint32_t *label) /* part of bgp_info */ +{ + struct route_table *rt = NULL; + struct route_node *rn; + struct route_node *n; + struct bgp_info *info_new; + struct bgp_info *bi; + struct bgp_info *next; + char buf[BUFSIZ]; + struct prefix vn_prefix; + struct prefix un_prefix; + int un_prefix_valid = 0; + struct route_node *ern; + int replacing = 0; + int original_had_routes = 0; + struct prefix original_nexthop; + const char *action_str = NULL; + int is_it_ce = 0; + + struct bgp *bgp; + bgp = bgp_get_default (); /* assume 1 instance for now */ + + switch (action) + { + case FIF_ACTION_UPDATE: + action_str = "update"; + break; + case FIF_ACTION_WITHDRAW: + action_str = "withdraw"; + break; + case FIF_ACTION_KILL: + action_str = "kill"; + break; + default: + assert (0); + break; + } + + if (import_table == bgp->rfapi->it_ce) + is_it_ce = 1; + + zlog_debug ("%s: entry: %s%s: prefix %s/%d: it %p, afi %s", __func__, + (is_it_ce ? "CE-IT " : ""), + action_str, + rfapi_ntop (p->family, &p->u.prefix, buf, BUFSIZ), + p->prefixlen, import_table, afi2str (afi)); + + VNC_ITRCCK; + + /* + * Compare rt lists. If no intersection, don't import this route + * On a withdraw, peer and RD are sufficient to determine if + * we should act. + */ + if (action == FIF_ACTION_UPDATE) + { + if (!attr || !attr->extra || !attr->extra->ecommunity) + { + + zlog_debug ("%s: attr, extra, or ecommunity missing, not importing", + __func__); + return; + } + if ((import_table != bgp->rfapi->it_ce) && + !rfapiEcommunitiesIntersect (import_table->rt_import_list, + attr->extra->ecommunity)) + { + + zlog_debug ("%s: it=%p: no ecommunity intersection", + __func__, import_table); + return; + } + + memset (&vn_prefix, 0, sizeof (vn_prefix)); /* keep valgrind happy */ + if (rfapiGetNexthop (attr, &vn_prefix)) + { + /* missing nexthop address would be a bad, bad thing */ + zlog_debug ("%s: missing nexthop", __func__); + return; + } + } + + /* + * Figure out which radix tree the route would go into + */ + switch (afi) + { + case AFI_IP: + case AFI_IP6: + case AFI_ETHER: + rt = import_table->imported_vpn[afi]; + break; + + default: + zlog_err ("%s: bad afi %d", __func__, afi); + return; + } + + /* clear it */ + memset (&original_nexthop, 0, sizeof (original_nexthop)); + + /* + * route_node_lookup returns a node only if there is at least + * one route attached. + */ + rn = route_node_lookup (rt, p); + + zlog_debug ("%s: rn=%p", __func__, rn); + + if (rn) + { + + RFAPI_CHECK_REFCOUNT (rn, SAFI_MPLS_VPN, 1); + route_unlock_node (rn); /* undo lock in route_node_lookup */ + + if (rn->info) + original_had_routes = 1; + + /* + * Look for same route (will have same RD and peer) + */ + bi = rfapiItBiIndexSearch (rn, prd, peer, aux_prefix); + + if (bi) + { + + /* + * This was an old test when we iterated over the + * BIs linearly. Since we're now looking up with + * RD and peer, comparing types should not be + * needed. Changed to assertion. + * + * Compare types. Doing so prevents a RFP-originated + * route from matching an imported route, for example. + */ + assert (bi->type == type); + + zlog_debug ("%s: found matching bi", __func__); + + /* + * In the special CE table, withdrawals occur without holddown + */ + if (import_table == bgp->rfapi->it_ce) + { + vnc_direct_bgp_del_route_ce (bgp, rn, bi); + if (action == FIF_ACTION_WITHDRAW) + action = FIF_ACTION_KILL; + } + + if (action == FIF_ACTION_WITHDRAW) + { + + int washolddown = CHECK_FLAG (bi->flags, BGP_INFO_REMOVED); + + zlog_debug ("%s: withdrawing at prefix %s/%d%s", + __func__, + rfapi_ntop (rn->p.family, &rn->p.u.prefix, buf, + BUFSIZ), rn->p.prefixlen, + (washolddown ? " (already being withdrawn)" : "")); + + VNC_ITRCCK; + if (!washolddown) + { + rfapiBiStartWithdrawTimer (import_table, rn, bi, + afi, SAFI_MPLS_VPN, + rfapiWithdrawTimerVPN); + + RFAPI_UPDATE_ITABLE_COUNT (bi, import_table, afi, -1); + import_table->holddown_count[afi] += 1; + } + VNC_ITRCCK; + } + else + { + zlog_debug ("%s: %s at prefix %s/%d", + __func__, + ((action == + FIF_ACTION_KILL) ? "killing" : "replacing"), + rfapi_ntop (rn->p.family, &rn->p.u.prefix, buf, + BUFSIZ), rn->p.prefixlen); + + /* + * If this route is waiting to be deleted because of + * a previous withdraw, we must cancel its timer. + */ + if (CHECK_FLAG (bi->flags, BGP_INFO_REMOVED) && + bi->extra->vnc.import.timer) + { + + struct thread *t = + (struct thread *) bi->extra->vnc.import.timer; + struct rfapi_withdraw *wcb = t->arg; + + XFREE (MTYPE_RFAPI_WITHDRAW, wcb); + thread_cancel (t); + + import_table->holddown_count[afi] -= 1; + RFAPI_UPDATE_ITABLE_COUNT (bi, import_table, afi, 1); + } + /* + * decrement remote count (if route is remote) because + * we are going to remove it below + */ + RFAPI_UPDATE_ITABLE_COUNT (bi, import_table, afi, -1); + if (action == FIF_ACTION_UPDATE) + { + replacing = 1; + + /* + * make copy of original nexthop so we can see if it changed + */ + rfapiGetNexthop (bi->attr, &original_nexthop); + + /* + * remove bi without doing any export processing + */ + if (CHECK_FLAG (bi->flags, BGP_INFO_VALID) + && VALID_INTERIOR_TYPE (bi->type)) + RFAPI_MONITOR_EXTERIOR (rn)->valid_interior_count--; + rfapiItBiIndexDel (rn, bi); + rfapiBgpInfoDetach (rn, bi); + rfapiMonitorEncapDelete (bi); + vnc_import_bgp_exterior_del_route_interior (bgp, + import_table, + rn, bi); + rfapiBgpInfoFree (bi); + } + else + { + /* Kill */ + /* + * remove bi and do export processing + */ + import_table->holddown_count[afi] += 1; + rfapiExpireVpnNow (import_table, rn, bi, 0); + } + + } + } + + } + + if (rn) + RFAPI_CHECK_REFCOUNT (rn, SAFI_MPLS_VPN, replacing ? 1 : 0); + + if (action == FIF_ACTION_WITHDRAW || action == FIF_ACTION_KILL) + { + VNC_ITRCCK; + return; + } + + info_new = rfapiBgpInfoCreate (attr, peer, rfd, prd, type, sub_type, label); + + /* + * lookup un address in encap table + */ + ern = route_node_match (import_table->imported_encap[afi], &vn_prefix); + if (ern) + { + rfapiCopyUnEncap2VPN (ern->info, info_new); + route_unlock_node (ern); /* undo lock in route_note_match */ + } + else + { + char buf[BUFSIZ]; + prefix2str (&vn_prefix, buf, sizeof (buf)); + buf[BUFSIZ - 1] = 0; + /* Not a big deal, just means VPN route got here first */ + zlog_debug ("%s: no encap route for vn addr %s", __func__, buf); + info_new->extra->vnc.import.un_family = 0; + } + + if (rn) + { + if (!replacing) + route_lock_node (rn); + } + else + { + /* + * No need to increment reference count, so only "get" + * if the node is not there already + */ + rn = route_node_get (rt, p); + } + + /* + * For ethernet routes, if there is an accompanying IP address, + * save it in the bi + */ + if ((AFI_ETHER == afi) && aux_prefix) + { + + zlog_debug ("%s: setting BI's aux_prefix", __func__); + info_new->extra->vnc.import.aux_prefix = *aux_prefix; + } + + zlog_debug ("%s: inserting bi %p at prefix %s/%d #%d", + __func__, + info_new, + rfapi_ntop (rn->p.family, &rn->p.u.prefix, buf, BUFSIZ), + rn->p.prefixlen, rn->lock); + + rfapiBgpInfoAttachSorted (rn, info_new, afi, SAFI_MPLS_VPN); + rfapiItBiIndexAdd (rn, info_new); + if (!rfapiGetUnAddrOfVpnBi (info_new, NULL)) + { + if (VALID_INTERIOR_TYPE (info_new->type)) + RFAPI_MONITOR_EXTERIOR (rn)->valid_interior_count++; + SET_FLAG (info_new->flags, BGP_INFO_VALID); + } + RFAPI_UPDATE_ITABLE_COUNT (info_new, import_table, afi, 1); + vnc_import_bgp_exterior_add_route_interior (bgp, import_table, rn, + info_new); + + if (import_table == bgp->rfapi->it_ce) + vnc_direct_bgp_add_route_ce (bgp, rn, info_new); + + zlog_debug ("%s: showing IT node", __func__); + rfapiShowItNode (NULL, rn); /* debug */ + + rfapiMonitorEncapAdd (import_table, &vn_prefix, rn, info_new); + + if (!rfapiGetUnAddrOfVpnBi (info_new, &un_prefix)) + { + + /* + * if we have a valid UN address (either via Encap route + * or via tunnel attribute), then we should attempt + * to move any monitors at less-specific nodes to this node + */ + rfapiMonitorMoveLonger (rn); + + un_prefix_valid = 1; + + } + + /* + * 101129 Enhancement: if we add a route (implication: it is not + * in holddown), delete all other routes from this nve at this + * node that are in holddown, regardless of peer. + * + * Reasons it's OK to do that: + * + * - if the holddown route being deleted originally came from BGP VPN, + * it is already gone from BGP (implication of holddown), so there + * won't be any added inconsistency with the BGP RIB. + * + * - once a fresh route is added at a prefix, any routes in holddown + * at that prefix will not show up in RFP responses, so deleting + * the holddown routes won't affect the contents of responses. + * + * - lifetimes are supposed to be consistent, so there should not + * be a case where the fresh route has a shorter lifetime than + * the holddown route, so we don't expect the fresh route to + * disappear and complete its holddown time before the existing + * holddown routes time out. Therefore, we won't have a situation + * where we expect the existing holddown routes to be hidden and + * then to reappear sometime later (as holddown routes) in a + * RFP response. + * + * Among other things, this would enable us to skirt the problem + * of local holddown routes that refer to NVE descriptors that + * have already been closed (if the same NVE triggers a subsequent + * rfapi_open(), the new peer is different and doesn't match the + * peer of the holddown route, so the stale holddown route still + * hangs around until it times out instead of just being replaced + * by the fresh route). + */ + /* + * We know that the new bi will have been inserted before any routes + * in holddown, so we can skip any that came before it + */ + for (bi = info_new->next; bi; bi = next) + { + + struct prefix pfx_vn; + struct prefix pfx_un; + int un_match = 0; + int remote_peer_match = 0; + + next = bi->next; + + /* + * Must be holddown + */ + if (!CHECK_FLAG (bi->flags, BGP_INFO_REMOVED)) + continue; + + /* + * Must match VN address (nexthop of VPN route) + */ + if (rfapiGetNexthop (bi->attr, &pfx_vn)) + continue; + if (!prefix_same (&pfx_vn, &vn_prefix)) + continue; + + if (un_prefix_valid && /* new route UN addr */ + !rfapiGetUnAddrOfVpnBi (bi, &pfx_un) && /* old route UN addr */ + prefix_same (&pfx_un, &un_prefix)) + { /* compare */ + un_match = 1; + } + if (!RFAPI_LOCAL_BI (bi) && !RFAPI_LOCAL_BI (info_new) && + sockunion_same (&bi->peer->su, &info_new->peer->su)) + { + /* old & new are both remote, same peer */ + remote_peer_match = 1; + } + + if (!un_match & !remote_peer_match) + continue; + + zlog_debug ("%s: removing holddown bi matching NVE of new route", + __func__); + if (bi->extra->vnc.import.timer) + { + struct thread *t = (struct thread *) bi->extra->vnc.import.timer; + struct rfapi_withdraw *wcb = t->arg; + + XFREE (MTYPE_RFAPI_WITHDRAW, wcb); + thread_cancel (t); + } + rfapiExpireVpnNow (import_table, rn, bi, 0); + } + + if (!original_had_routes) + { + /* + * We went from 0 usable routes to 1 usable route. Perform the + * "Adding a Route" export process. + */ + vnc_direct_bgp_add_prefix (bgp, import_table, rn); + vnc_zebra_add_prefix (bgp, import_table, rn); + } + else + { + /* + * Check for nexthop change event + * Note: the prefix_same() test below detects two situations: + * 1. route is replaced, new route has different nexthop + * 2. new route is added (original_nexthop is 0) + */ + struct prefix new_nexthop; + + rfapiGetNexthop (attr, &new_nexthop); + if (!prefix_same (&original_nexthop, &new_nexthop)) + { + /* + * nexthop change event + * vnc_direct_bgp_add_prefix() will recompute VN addr ecommunity + */ + vnc_direct_bgp_add_prefix (bgp, import_table, rn); + } + } + + if (!(bgp->rfapi_cfg->flags & BGP_VNC_CONFIG_CALLBACK_DISABLE)) + { + for (n = rn; n; n = n->parent) + { + //rfapiDoRouteCallback(import_table, n, NULL); + } + rfapiMonitorItNodeChanged (import_table, rn, NULL); + } + RFAPI_CHECK_REFCOUNT (rn, SAFI_MPLS_VPN, 0); + VNC_ITRCCK; +} + +static rfapi_bi_filtered_import_f * +rfapiBgpInfoFilteredImportFunction (safi_t safi) +{ + switch (safi) + { + case SAFI_MPLS_VPN: + case BGP_SAFI_VPN: + return rfapiBgpInfoFilteredImportVPN; + + case SAFI_ENCAP: + return rfapiBgpInfoFilteredImportEncap; + } + zlog_err ("%s: bad safi %d", __func__, safi); + return NULL; +} + +void +rfapiProcessUpdate ( + struct peer *peer, + void *rfd, /* set when looped from RFP/RFAPI */ + struct prefix *p, + struct prefix_rd *prd, + struct attr *attr, + afi_t afi, + safi_t safi, + u_char type, + u_char sub_type, + uint32_t *label) +{ + struct bgp *bgp; + struct rfapi *h; + struct rfapi_import_table *it; + int has_ip_route = 1; + uint32_t lni = 0; + + bgp = bgp_get_default (); /* assume 1 instance for now */ + assert (bgp); + + h = bgp->rfapi; + assert (h); + + /* + * look at high-order byte of RD. FF means MAC + * address is present (VNC L2VPN) + */ + if ((safi == SAFI_MPLS_VPN) && + (decode_rd_type(prd->val) == RD_TYPE_VNC_ETH)) + { + struct prefix pfx_mac_buf; + struct prefix pfx_nexthop_buf; + int rc; + + /* + * Set flag if prefix and nexthop are the same - don't + * add the route to normal IP-based import tables + */ + if (!rfapiGetNexthop (attr, &pfx_nexthop_buf)) + { + if (!prefix_cmp (&pfx_nexthop_buf, p)) + { + has_ip_route = 0; + } + } + + memset (&pfx_mac_buf, 0, sizeof (pfx_mac_buf)); + pfx_mac_buf.family = AF_ETHERNET; + pfx_mac_buf.prefixlen = 48; + memcpy (&pfx_mac_buf.u.prefix_eth.octet, prd->val + 2, 6); + + /* + * Find rt containing LNI (Logical Network ID), which + * _should_ always be present when mac address is present + */ + rc = rfapiEcommunityGetLNI (attr->extra->ecommunity, &lni); + + zlog_debug + ("%s: rfapiEcommunityGetLNI returned %d, lni=%d, attr=%p, attr->extra=%p", + __func__, rc, lni, attr, attr->extra); + if (attr && attr->extra && !rc) + { + it = rfapiMacImportTableGet (bgp, lni); + + rfapiBgpInfoFilteredImportVPN ( + it, + FIF_ACTION_UPDATE, + peer, + rfd, + &pfx_mac_buf, /* prefix */ + p, /* aux prefix: IP addr */ + AFI_ETHER, + prd, + attr, + type, + sub_type, + label); + } + + } + + if (!has_ip_route) + return; + + /* + * Iterate over all import tables; do a filtered import + * for the afi/safi combination + */ + for (it = h->imports; it; it = it->next) + { + (*rfapiBgpInfoFilteredImportFunction (safi)) ( + it, + FIF_ACTION_UPDATE, + peer, + rfd, + p, /* prefix */ + NULL, + afi, + prd, + attr, + type, + sub_type, + label); + } + + if (safi == SAFI_MPLS_VPN || safi == BGP_SAFI_VPN) + { + vnc_direct_bgp_rh_add_route (bgp, afi, p, peer, attr); + } + + if (safi == SAFI_MPLS_VPN) + { + rfapiBgpInfoFilteredImportVPN ( + bgp->rfapi->it_ce, + FIF_ACTION_UPDATE, + peer, + rfd, + p, /* prefix */ + NULL, + afi, + prd, + attr, + type, + sub_type, + label); + } +} + + +void +rfapiProcessWithdraw ( + struct peer *peer, + void *rfd, + struct prefix *p, + struct prefix_rd *prd, + struct attr *attr, + afi_t afi, + safi_t safi, + u_char type, + int kill) +{ + struct bgp *bgp; + struct rfapi *h; + struct rfapi_import_table *it; + + bgp = bgp_get_default (); /* assume 1 instance for now */ + assert (bgp); + + h = bgp->rfapi; + assert (h); + + /* + * look at high-order byte of RD. FF means MAC + * address is present (VNC L2VPN) + */ + if (h->import_mac != NULL && safi == SAFI_MPLS_VPN && + decode_rd_type(prd->val) == RD_TYPE_VNC_ETH) + { + struct prefix pfx_mac_buf; + void *cursor = NULL; + int rc; + + memset (&pfx_mac_buf, 0, sizeof (pfx_mac_buf)); + pfx_mac_buf.family = AF_ETHERNET; + pfx_mac_buf.prefixlen = 48; + memcpy (&pfx_mac_buf.u.prefix_eth, prd->val + 2, 6); + + /* + * withdraw does not contain attrs, so we don't have + * access to the route's LNI, which would ordinarily + * select the specific mac-based import table. Instead, + * we must iterate over all mac-based tables and rely + * on the RD to match. + * + * If this approach is too slow, add an index where + * key is {RD, peer} and value is the import table + */ + for (rc = skiplist_next (h->import_mac, NULL, (void **) &it, &cursor); + rc == 0; + rc = skiplist_next (h->import_mac, NULL, (void **) &it, &cursor)) + { + +#if DEBUG_L2_EXTRA + zlog_debug + ("%s: calling rfapiBgpInfoFilteredImportVPN(it=%p, afi=AFI_ETHER)", + __func__, it); +#endif + + rfapiBgpInfoFilteredImportVPN ( + it, + (kill ? FIF_ACTION_KILL : FIF_ACTION_WITHDRAW), + peer, + rfd, + &pfx_mac_buf, /* prefix */ + p, /* aux_prefix: IP */ + AFI_ETHER, + prd, + attr, + type, + 0, + NULL); /* sub_type & label unused for withdraw */ + } + } + + /* + * XXX For the case where the withdraw involves an L2 + * route with no IP information, we rely on the lack + * of RT-list intersection to filter out the withdraw + * from the IP-based import tables below + */ + + /* + * Iterate over all import tables; do a filtered import + * for the afi/safi combination + */ + + for (it = h->imports; it; it = it->next) + { + (*rfapiBgpInfoFilteredImportFunction (safi)) ( + it, + (kill ? FIF_ACTION_KILL : FIF_ACTION_WITHDRAW), + peer, + rfd, + p, /* prefix */ + NULL, + afi, + prd, + attr, + type, + 0, + NULL); /* sub_type & label unused for withdraw */ + } + + /* TBD the deletion should happen after the lifetime expires */ + if (safi == SAFI_MPLS_VPN || safi == BGP_SAFI_VPN) + vnc_direct_bgp_rh_del_route (bgp, afi, p, peer); + + if (safi == SAFI_MPLS_VPN) + { + rfapiBgpInfoFilteredImportVPN ( + bgp->rfapi->it_ce, + (kill ? FIF_ACTION_KILL : FIF_ACTION_WITHDRAW), + peer, + rfd, + p, /* prefix */ + NULL, + afi, + prd, + attr, + type, + 0, + NULL); /* sub_type & label unused for withdraw */ + } +} + +/* + * TBD optimized withdraw timer algorithm for case of many + * routes expiring at the same time due to peer drop. + */ +/* + * 1. Visit all BIs in all ENCAP import tables. + * + * a. If a bi's peer is the failed peer, remove the bi. + * b. If the removed ENCAP bi was first in the list of + * BIs at this ENCAP node, loop over all monitors + * at this node: + * + * (1) for each ENCAP monitor, loop over all its + * VPN node monitors and set their RFAPI_MON_FLAG_NEEDCALLBACK + * flags. + * + * 2. Visit all BIs in all VPN import tables. + * a. If a bi's peer is the failed peer, remove the bi. + * b. loop over all the VPN node monitors and set their + * RFAPI_MON_FLAG_NEEDCALLBACK flags + * c. If there are no BIs left at this VPN node, + * + */ + + +/* surprise, this gets called from peer_delete(), from rfapi_close() */ +static void +rfapiProcessPeerDownRt ( + struct peer *peer, + struct rfapi_import_table *import_table, + afi_t afi, + safi_t safi) +{ + struct route_node *rn; + struct bgp_info *bi; + struct route_table *rt; + int (*timer_service_func) (struct thread *); + + assert (afi == AFI_IP || afi == AFI_IP6); + + VNC_ITRCCK; + + switch (safi) + { + case SAFI_MPLS_VPN: + rt = import_table->imported_vpn[afi]; + timer_service_func = rfapiWithdrawTimerVPN; + break; + case SAFI_ENCAP: + rt = import_table->imported_encap[afi]; + timer_service_func = rfapiWithdrawTimerEncap; + break; + default: + assert (0); + } + + + for (rn = route_top (rt); rn; rn = route_next (rn)) + { + for (bi = rn->info; bi; bi = bi->next) + { + if (bi->peer == peer) + { + + if (CHECK_FLAG (bi->flags, BGP_INFO_REMOVED)) + { + /* already in holddown, skip */ + continue; + } + + if (safi == SAFI_MPLS_VPN) + { + RFAPI_UPDATE_ITABLE_COUNT (bi, import_table, afi, -1); + import_table->holddown_count[afi] += 1; + } + rfapiBiStartWithdrawTimer (import_table, rn, bi, + afi, safi, + timer_service_func); + } + } + } + VNC_ITRCCK; +} + +/* + * This gets called when a peer connection drops. We have to remove + * all the routes from this peer. + * + * Current approach is crude. TBD Optimize by setting fewer timers and + * grouping withdrawn routes so we can generate callbacks more + * efficiently. + */ +void +rfapiProcessPeerDown (struct peer *peer) +{ + struct bgp *bgp; + struct rfapi *h; + struct rfapi_import_table *it; + + /* + * If this peer is a "dummy" peer structure atached to a RFAPI + * nve_descriptor, we don't need to walk the import tables + * because the routes are already withdrawn by rfapi_close() + */ + if (CHECK_FLAG (peer->flags, PEER_FLAG_IS_RFAPI_HD)) + return; + + /* + * 1. Visit all BIs in all ENCAP import tables. + * Start withdraw timer on the BIs that match peer. + * + * 2. Visit All BIs in all VPN import tables. + * Start withdraw timer on the BIs that match peer. + */ + + bgp = bgp_get_default (); /* assume 1 instance for now */ + assert (bgp); + + h = bgp->rfapi; + assert (h); + + for (it = h->imports; it; it = it->next) + { + rfapiProcessPeerDownRt (peer, it, AFI_IP, SAFI_ENCAP); + rfapiProcessPeerDownRt (peer, it, AFI_IP6, SAFI_ENCAP); + rfapiProcessPeerDownRt (peer, it, AFI_IP, SAFI_MPLS_VPN); + rfapiProcessPeerDownRt (peer, it, AFI_IP6, SAFI_MPLS_VPN); + } + + if (h->it_ce) + { + rfapiProcessPeerDownRt (peer, h->it_ce, AFI_IP, SAFI_MPLS_VPN); + rfapiProcessPeerDownRt (peer, h->it_ce, AFI_IP6, SAFI_MPLS_VPN); + } +} + +/* + * Import an entire RIB (for an afi/safi) to an import table RIB, + * filtered according to the import table's RT list + * + * TBD: does this function need additions to match rfapiProcessUpdate() + * for, e.g., L2 handling? + */ +static void +rfapiBgpTableFilteredImport ( + struct bgp *bgp, + struct rfapi_import_table *it, + afi_t afi, + safi_t safi) +{ + struct bgp_node *rn1; + struct bgp_node *rn2; + + /* Only these SAFIs have 2-level RIBS */ + assert (safi == SAFI_MPLS_VPN || safi == SAFI_ENCAP); + + /* + * Now visit all the rd nodes and the nodes of all the + * route tables attached to them, and import the routes + * if they have matching route targets + */ + for (rn1 = bgp_table_top (bgp->rib[afi][safi]); + rn1; rn1 = bgp_route_next (rn1)) + { + + if (rn1->info) + { + for (rn2 = bgp_table_top (rn1->info); + rn2; rn2 = bgp_route_next (rn2)) + { + + struct bgp_info *bi; + + for (bi = rn2->info; bi; bi = bi->next) + { + u_int32_t label = 0; + + if (CHECK_FLAG (bi->flags, BGP_INFO_REMOVED)) + continue; + + if (bi->extra) + label = decode_label (bi->extra->tag); + (*rfapiBgpInfoFilteredImportFunction (safi)) ( + it, /* which import table */ + FIF_ACTION_UPDATE, + bi->peer, + NULL, + &rn2->p, /* prefix */ + NULL, + afi, + (struct prefix_rd *) &rn1->p, + bi->attr, + bi->type, + bi->sub_type, + &label); + } + } + } + } +} + + +/* per-bgp-instance rfapi data */ +struct rfapi * +bgp_rfapi_new (struct bgp *bgp) +{ + struct rfapi *h; + int afi; + struct rfapi_rfp_cfg *cfg = NULL; + struct rfapi_rfp_cb_methods *cbm = NULL; + + assert (bgp->rfapi_cfg == NULL); + + h = (struct rfapi *) XCALLOC (MTYPE_RFAPI, sizeof (struct rfapi)); + + for (afi = AFI_IP; afi < AFI_MAX; afi++) + { + /* ugly, to deal with addition of delegates, part of 0.99.24.1 merge */ + h->un[afi].delegate = route_table_get_default_delegate (); + } + + /* + * initialize the ce import table + */ + h->it_ce = + XCALLOC (MTYPE_RFAPI_IMPORTTABLE, sizeof (struct rfapi_import_table)); + h->it_ce->imported_vpn[AFI_IP] = route_table_init (); + h->it_ce->imported_vpn[AFI_IP6] = route_table_init (); + h->it_ce->imported_encap[AFI_IP] = route_table_init (); + h->it_ce->imported_encap[AFI_IP6] = route_table_init (); + rfapiBgpTableFilteredImport (bgp, h->it_ce, AFI_IP, SAFI_MPLS_VPN); + rfapiBgpTableFilteredImport (bgp, h->it_ce, AFI_IP6, SAFI_MPLS_VPN); + + /* + * Set up work queue for deferred rfapi_close operations + */ + h->deferred_close_q = work_queue_new (bm->master, "rfapi deferred close"); + h->deferred_close_q->spec.workfunc = rfapi_deferred_close_workfunc; + h->deferred_close_q->spec.data = h; + + h->rfp = rfp_start (bm->master, &cfg, &cbm); + bgp->rfapi_cfg = bgp_rfapi_cfg_new (cfg); + if (cbm != NULL) + { + h->rfp_methods = *cbm; + } + return h; +} + +void +bgp_rfapi_destroy (struct bgp *bgp, struct rfapi *h) +{ + if (bgp == NULL || h == NULL) + return; + + if (h->resolve_nve_nexthop) + { + skiplist_free (h->resolve_nve_nexthop); + h->resolve_nve_nexthop = NULL; + } + + route_table_finish (h->it_ce->imported_vpn[AFI_IP]); + route_table_finish (h->it_ce->imported_vpn[AFI_IP6]); + route_table_finish (h->it_ce->imported_encap[AFI_IP]); + route_table_finish (h->it_ce->imported_encap[AFI_IP6]); + + if (h->import_mac) + { + struct rfapi_import_table *it; + void *cursor; + int rc; + + for (cursor = NULL, + rc = skiplist_next (h->import_mac, NULL, (void **) &it, &cursor); + !rc; + rc = skiplist_next (h->import_mac, NULL, (void **) &it, &cursor)) + { + + rfapiImportTableFlush (it); + XFREE (MTYPE_RFAPI_IMPORTTABLE, it); + } + skiplist_free (h->import_mac); + h->import_mac = NULL; + } + + work_queue_free (h->deferred_close_q); + + if (h->rfp != NULL) + rfp_stop (h->rfp); + XFREE (MTYPE_RFAPI_IMPORTTABLE, h->it_ce); + XFREE (MTYPE_RFAPI, h); +} + +struct rfapi_import_table * +rfapiImportTableRefAdd (struct bgp *bgp, struct ecommunity *rt_import_list) +{ + struct rfapi *h; + struct rfapi_import_table *it; + afi_t afi; + + h = bgp->rfapi; + assert (h); + + for (it = h->imports; it; it = it->next) + { + if (ecommunity_cmp (it->rt_import_list, rt_import_list)) + break; + } + + zlog_debug ("%s: matched it=%p", __func__, it); + + if (!it) + { + it = + XCALLOC (MTYPE_RFAPI_IMPORTTABLE, sizeof (struct rfapi_import_table)); + assert (it); + it->next = h->imports; + h->imports = it; + + it->rt_import_list = ecommunity_dup (rt_import_list); + it->monitor_exterior_orphans = + skiplist_new (0, NULL, (void (*)(void *)) prefix_free); + + /* + * fill import route tables from RIBs + * + * Potential area for optimization. If this occurs when + * tables are large (e.g., the operator adds a nve group + * with a new RT list to a running system), it could take + * a while. + * + */ + for (afi = AFI_IP; afi < AFI_MAX; ++afi) + { + + it->imported_vpn[afi] = route_table_init (); + it->imported_encap[afi] = route_table_init (); + + rfapiBgpTableFilteredImport (bgp, it, afi, SAFI_MPLS_VPN); + rfapiBgpTableFilteredImport (bgp, it, afi, SAFI_ENCAP); + + vnc_import_bgp_exterior_redist_enable_it (bgp, afi, it); + } + } + + it->refcount += 1; + + return it; +} + +/* + * skiplist element free function + */ +static void +delete_rem_pfx_na_free (void *na) +{ + uint32_t *pCounter = ((struct rfapi_nve_addr *) na)->info; + + *pCounter += 1; + XFREE (MTYPE_RFAPI_NVE_ADDR, na); +} + +/* + * Common deleter for IP and MAC import tables + */ +static void +rfapiDeleteRemotePrefixesIt ( + struct bgp *bgp, + struct rfapi_import_table *it, + struct prefix *un, + struct prefix *vn, + struct prefix *p, + int delete_active, + int delete_holddown, + uint32_t *pARcount, + uint32_t *pAHcount, + uint32_t *pHRcount, + uint32_t *pHHcount, + struct skiplist *uniq_active_nves, + struct skiplist *uniq_holddown_nves) +{ + afi_t afi; + +#if DEBUG_L2_EXTRA + { + char buf_pfx[BUFSIZ]; + + if (p) + { + prefix2str (p, buf_pfx, BUFSIZ); + } + else + { + buf_pfx[0] = '*'; + buf_pfx[1] = 0; + } + + zlog_debug ("%s: entry, p=%s, delete_active=%d, delete_holddown=%d", + __func__, buf_pfx, delete_active, delete_holddown); + } +#endif + + for (afi = AFI_IP; afi < AFI_MAX; ++afi) + { + + struct route_table *rt; + struct route_node *rn; + + if (p && (family2afi (p->family) != afi)) + { + continue; + } + + rt = it->imported_vpn[afi]; + if (!rt) + continue; + + zlog_debug ("%s: scanning rt for afi=%d", __func__, afi); + + for (rn = route_top (rt); rn; rn = route_next (rn)) + { + struct bgp_info *bi; + struct bgp_info *next; + + if (VNC_DEBUG(IMPORT_DEL_REMOTE)) + { + char p1line[BUFSIZ]; + char p2line[BUFSIZ]; + + prefix2str (p, p1line, BUFSIZ); + prefix2str (&rn->p, p2line, BUFSIZ); + zlog_debug ("%s: want %s, have %s", __func__, p1line, p2line); + } + + if (p && prefix_cmp (p, &rn->p)) + continue; + + { + char buf_pfx[BUFSIZ]; + prefix2str (&rn->p, buf_pfx, BUFSIZ); + zlog_debug ("%s: rn pfx=%s", __func__, buf_pfx); + } + + /* TBD is this valid for afi == AFI_ETHER? */ + RFAPI_CHECK_REFCOUNT (rn, SAFI_MPLS_VPN, 1); + + for (bi = rn->info; bi; bi = next) + { + next = bi->next; + + struct prefix qpt; + struct prefix qct; + int qpt_valid = 0; + int qct_valid = 0; + int is_active = 0; + + zlog_debug ("%s: examining bi %p", __func__, bi); + + if (bi->attr) + { + if (!rfapiGetNexthop (bi->attr, &qpt)) + qpt_valid = 1; + } + if (vn) + { + if (!qpt_valid || !prefix_match (vn, &qpt)) + { +#if DEBUG_L2_EXTRA + zlog_debug + ("%s: continue at vn && !qpt_valid || !prefix_match(vn, &qpt)", + __func__); +#endif + continue; + } + } + + if (!rfapiGetUnAddrOfVpnBi (bi, &qct)) + qct_valid = 1; + + if (un) + { + if (!qct_valid || !prefix_match (un, &qct)) + { +#if DEBUG_L2_EXTRA + zlog_debug + ("%s: continue at un && !qct_valid || !prefix_match(un, &qct)", + __func__); +#endif + continue; + } + } + + + /* + * Blow bi away + */ + /* + * If this route is waiting to be deleted because of + * a previous withdraw, we must cancel its timer. + */ + if (CHECK_FLAG (bi->flags, BGP_INFO_REMOVED)) + { + if (!delete_holddown) + continue; + if (bi->extra->vnc.import.timer) + { + + struct thread *t = + (struct thread *) bi->extra->vnc.import.timer; + struct rfapi_withdraw *wcb = t->arg; + + wcb->import_table->holddown_count[afi] -= 1; + RFAPI_UPDATE_ITABLE_COUNT (bi, wcb->import_table, afi, + 1); + XFREE (MTYPE_RFAPI_WITHDRAW, wcb); + thread_cancel (t); + } + } + else + { + if (!delete_active) + continue; + is_active = 1; + } + + zlog_debug + ("%s: deleting bi %p (qct_valid=%d, qpt_valid=%d, delete_holddown=%d, delete_active=%d)", + __func__, bi, qct_valid, qpt_valid, delete_holddown, + delete_active); + + + /* + * add nve to list + */ + if (qct_valid && qpt_valid) + { + + struct rfapi_nve_addr na; + struct rfapi_nve_addr *nap; + + memset (&na, 0, sizeof (na)); + assert (!rfapiQprefix2Raddr (&qct, &na.un)); + assert (!rfapiQprefix2Raddr (&qpt, &na.vn)); + + if (skiplist_search ((is_active ? uniq_active_nves : + uniq_holddown_nves), &na, + (void **) &nap)) + { + char line[BUFSIZ]; + + nap = XCALLOC (MTYPE_RFAPI_NVE_ADDR, + sizeof (struct rfapi_nve_addr)); + assert (nap); + *nap = na; + nap->info = is_active ? pAHcount : pHHcount; + skiplist_insert ((is_active ? uniq_active_nves : + uniq_holddown_nves), nap, nap); + + rfapiNveAddr2Str (nap, line, BUFSIZ); + } + } + + vnc_direct_bgp_rh_del_route (bgp, afi, &rn->p, bi->peer); + + RFAPI_UPDATE_ITABLE_COUNT (bi, it, afi, -1); + it->holddown_count[afi] += 1; + rfapiExpireVpnNow (it, rn, bi, 1); + + zlog_debug ("%s: incrementing count (is_active=%d)", + __func__, is_active); + + if (is_active) + ++ * pARcount; + else + ++ * pHRcount; + } + } + } +} + + +/* + * For use by the "clear vnc prefixes" command + */ +/*------------------------------------------ + * rfapiDeleteRemotePrefixes + * + * UI helper: For use by the "clear vnc prefixes" command + * + * input: + * un if set, tunnel must match this prefix + * vn if set, nexthop prefix must match this prefix + * p if set, prefix must match this prefix + * + * output + * pARcount number of active routes deleted + * pAHcount number of active nves deleted + * pHRcount number of holddown routes deleted + * pHHcount number of holddown nves deleted + * + * return value: + * void + --------------------------------------------*/ +void +rfapiDeleteRemotePrefixes ( + struct prefix *un, + struct prefix *vn, + struct prefix *p, + int delete_active, + int delete_holddown, + uint32_t *pARcount, + uint32_t *pAHcount, + uint32_t *pHRcount, + uint32_t *pHHcount) +{ + struct bgp *bgp; + struct rfapi *h; + struct rfapi_import_table *it; + uint32_t deleted_holddown_route_count = 0; + uint32_t deleted_active_route_count = 0; + uint32_t deleted_holddown_nve_count = 0; + uint32_t deleted_active_nve_count = 0; + struct skiplist *uniq_holddown_nves; + struct skiplist *uniq_active_nves; + + VNC_ITRCCK; + + bgp = bgp_get_default (); /* assume 1 instance for now */ + /* If no bgp instantiated yet, no vnc prefixes exist */ + if (!bgp) + return; + + h = bgp->rfapi; + assert (h); + + uniq_holddown_nves = + skiplist_new (0, rfapi_nve_addr_cmp, delete_rem_pfx_na_free); + uniq_active_nves = + skiplist_new (0, rfapi_nve_addr_cmp, delete_rem_pfx_na_free); + + /* + * Iterate over all import tables; do a filtered import + * for the afi/safi combination + */ + + for (it = h->imports; it; it = it->next) + { + + zlog_debug + ("%s: calling rfapiDeleteRemotePrefixesIt() on (IP) import %p", + __func__, it); + + rfapiDeleteRemotePrefixesIt ( + bgp, + it, + un, + vn, + p, + delete_active, + delete_holddown, + &deleted_active_route_count, + &deleted_active_nve_count, + &deleted_holddown_route_count, + &deleted_holddown_nve_count, + uniq_active_nves, + uniq_holddown_nves); + } + + /* + * Now iterate over L2 import tables + */ + if (h->import_mac && !(p && (p->family != AF_ETHERNET))) + { + + void *cursor = NULL; + int rc; + + for (cursor = NULL, + rc = skiplist_next (h->import_mac, NULL, (void **) &it, &cursor); + !rc; + rc = skiplist_next (h->import_mac, NULL, (void **) &it, &cursor)) + { + + zlog_debug + ("%s: calling rfapiDeleteRemotePrefixesIt() on import_mac %p", + __func__, it); + + rfapiDeleteRemotePrefixesIt ( + bgp, + it, + un, + vn, + p, + delete_active, + delete_holddown, + &deleted_active_route_count, + &deleted_active_nve_count, + &deleted_holddown_route_count, + &deleted_holddown_nve_count, + uniq_active_nves, + uniq_holddown_nves); + } + } + + /* + * our custom element freeing function above counts as it deletes + */ + skiplist_free (uniq_holddown_nves); + skiplist_free (uniq_active_nves); + + if (pARcount) + *pARcount = deleted_active_route_count; + if (pAHcount) + *pAHcount = deleted_active_nve_count; + if (pHRcount) + *pHRcount = deleted_holddown_route_count; + if (pHHcount) + *pHHcount = deleted_holddown_nve_count; + + VNC_ITRCCK; +} + +/*------------------------------------------ + * rfapiCountRemoteRoutes + * + * UI helper: count VRF routes from BGP side + * + * input: + * + * output + * pALRcount count of active local routes + * pARRcount count of active remote routes + * pHRcount count of holddown routes + * pIRcount count of direct imported routes + * + * return value: + * void + --------------------------------------------*/ +void +rfapiCountAllItRoutes (int *pALRcount, /* active local routes */ + int *pARRcount, /* active remote routes */ + int *pHRcount, /* holddown routes */ + int *pIRcount) /* imported routes */ +{ + struct bgp *bgp; + struct rfapi *h; + struct rfapi_import_table *it; + afi_t afi; + + int total_active_local = 0; + int total_active_remote = 0; + int total_holddown = 0; + int total_imported = 0; + + bgp = bgp_get_default (); /* assume 1 instance for now */ + assert (bgp); + + h = bgp->rfapi; + assert (h); + + /* + * Iterate over all import tables; do a filtered import + * for the afi/safi combination + */ + + for (it = h->imports; it; it = it->next) + { + + for (afi = AFI_IP; afi < AFI_MAX; ++afi) + { + + total_active_local += it->local_count[afi]; + total_active_remote += it->remote_count[afi]; + total_holddown += it->holddown_count[afi]; + total_imported += it->imported_count[afi]; + + } + } + + void *cursor; + int rc; + + if (h->import_mac) + { + for (cursor = NULL, + rc = skiplist_next (h->import_mac, NULL, (void **) &it, &cursor); + !rc; + rc = skiplist_next (h->import_mac, NULL, (void **) &it, &cursor)) + { + + total_active_local += it->local_count[AFI_ETHER]; + total_active_remote += it->remote_count[AFI_ETHER]; + total_holddown += it->holddown_count[AFI_ETHER]; + total_imported += it->imported_count[AFI_ETHER]; + + } + } + + + if (pALRcount) + { + *pALRcount = total_active_local; + } + if (pARRcount) + { + *pARRcount = total_active_remote; + } + if (pHRcount) + { + *pHRcount = total_holddown; + } + if (pIRcount) + { + *pIRcount = total_imported; + } +} + +/*------------------------------------------ + * rfapiGetHolddownFromLifetime + * + * calculate holddown value based on lifetime + * + * input: + * lifetime lifetime + * + * return value: + * Holddown value based on lifetime, holddown_factor, + * and RFAPI_LIFETIME_INFINITE_WITHDRAW_DELAY + * + --------------------------------------------*/ +/* hold down time maxes out at RFAPI_LIFETIME_INFINITE_WITHDRAW_DELAY */ +uint32_t +rfapiGetHolddownFromLifetime (uint32_t lifetime) +{ + uint32_t factor; + struct bgp *bgp; + + bgp = bgp_get_default (); + if (bgp && bgp->rfapi_cfg) + factor = bgp->rfapi_cfg->rfp_cfg.holddown_factor; + else + factor = RFAPI_RFP_CFG_DEFAULT_HOLDDOWN_FACTOR; + + if (factor < 100 || lifetime < RFAPI_LIFETIME_INFINITE_WITHDRAW_DELAY) + lifetime = lifetime * factor / 100; + if (lifetime < RFAPI_LIFETIME_INFINITE_WITHDRAW_DELAY) + return lifetime; + else + return RFAPI_LIFETIME_INFINITE_WITHDRAW_DELAY; +} diff --git a/bgpd/rfapi/rfapi_import.h b/bgpd/rfapi/rfapi_import.h new file mode 100644 index 0000000000..3a1ae3573e --- /dev/null +++ b/bgpd/rfapi/rfapi_import.h @@ -0,0 +1,283 @@ +/* + * + * Copyright 2009-2016, LabN Consulting, L.L.C. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +/* + * File: rfapi_import.h + * Purpose: Handle import of routes from BGP to RFAPI + */ + +#ifndef QUAGGA_HGP_RFAPI_IMPORT_H +#define QUAGGA_HGP_RFAPI_IMPORT_H + +#include "lib/thread.h" + +/* + * These are per-rt-import-list + * + * routes are not segregated by RD - the RD is stored in bgp_info_extra + * and is needed to determine if two prefixes are the same. + */ +struct rfapi_import_table +{ + struct rfapi_import_table *next; + struct ecommunity *rt_import_list; /* copied from nve grp */ + int refcount; /* nve grps and nves */ + uint32_t l2_logical_net_id; /* L2 only: EVPN Eth Seg Id */ + struct route_table *imported_vpn[AFI_MAX]; + struct rfapi_monitor_vpn *vpn0_queries[AFI_MAX]; + struct rfapi_monitor_eth *eth0_queries; + struct route_table *imported_encap[AFI_MAX]; + struct skiplist *monitor_exterior_orphans; + int local_count[AFI_MAX]; + int remote_count[AFI_MAX]; + int holddown_count[AFI_MAX]; + int imported_count[AFI_MAX]; +}; + +#define RFAPI_LOCAL_BI(bi) \ + (((bi)->type == ZEBRA_ROUTE_BGP) && ((bi)->sub_type == BGP_ROUTE_RFP)) + +#define RFAPI_DIRECT_IMPORT_BI(bi) \ + (((bi)->type == ZEBRA_ROUTE_BGP_DIRECT) || ((bi)->type == ZEBRA_ROUTE_BGP_DIRECT_EXT)) + +#define RFAPI_UPDATE_ITABLE_COUNT(bi, itable, afi, cnt) \ + if (RFAPI_LOCAL_BI(bi)) { \ + (itable)->local_count[(afi)] += (cnt); \ + } else { \ + if (RFAPI_DIRECT_IMPORT_BI(bi)) \ + (itable)->imported_count[(afi)] += (cnt); \ + else \ + (itable)->remote_count[(afi)] += (cnt); \ + } + +extern uint8_t +rfapiRfpCost (struct attr *attr); + +extern void +rfapiDebugBacktrace (void); + +extern void +rfapiCheckRouteCount (void); + +/* + * Print BI in an Import Table + */ +extern void +rfapiPrintBi (void *stream, struct bgp_info *bi); + +extern void +rfapiShowImportTable ( + void *stream, + const char *label, + struct route_table *rt, + int isvpn); + + +extern void +rfapiImportTableRefDelByIt ( + struct bgp *bgp, + struct rfapi_import_table *it_target); + + +/* + * Construct an rfapi nexthop list based on the routes attached to + * the specified node. + * + * If there are any routes that do NOT have BGP_INFO_REMOVED set, + * return those only. If there are ONLY routes with BGP_INFO_REMOVED, + * then return those, and also include all the non-removed routes from the + * next less-specific node (i.e., this node's parent) at the end. + */ +extern struct rfapi_next_hop_entry * +rfapiRouteNode2NextHopList ( + struct route_node *rn, + uint32_t lifetime, /* put into nexthop entries */ + struct rfapi_ip_addr *exclude_vnaddr, /* omit routes to same NVE */ + struct route_table *rfd_rib_table, /* preload this NVE rib table */ + struct prefix *pfx_target_original); /* query target */ + +extern struct rfapi_next_hop_entry * +rfapiRouteTable2NextHopList ( + struct route_table *rt, + uint32_t lifetime, /* put into nexthop entries */ + struct rfapi_ip_addr *exclude_vnaddr, /* omit routes to same NVE */ + struct route_table *rfd_rib_table, /* preload this NVE rib table */ + struct prefix *pfx_target_original); /* query target */ + +extern struct rfapi_next_hop_entry * +rfapiEthRouteTable2NextHopList ( + uint32_t logical_net_id, + struct rfapi_ip_prefix *rprefix, + uint32_t lifetime, /* put into nexthop entries */ + struct rfapi_ip_addr *exclude_vnaddr, /* omit routes to same NVE */ + struct route_table *rib_route_table,/* preload NVE rib node */ + struct prefix *pfx_target_original); /* query target */ + +extern int +rfapiEcommunitiesIntersect (struct ecommunity *e1, struct ecommunity *e2); + +extern void +rfapiCheckRefcount (struct route_node *rn, safi_t safi, int lockoffset); + +extern int +rfapiHasNonRemovedRoutes (struct route_node *rn); + +extern int +rfapiProcessDeferredClose (struct thread *t); + +extern int +rfapiGetUnAddrOfVpnBi (struct bgp_info *bi, struct prefix *p); + +extern void +rfapiNexthop2Prefix (struct attr *attr, struct prefix *p); + +extern void +rfapiUnicastNexthop2Prefix ( + afi_t afi, + struct attr *attr, + struct prefix *p); + +/* Filtered Import Function actions */ +#define FIF_ACTION_UPDATE 0 +#define FIF_ACTION_WITHDRAW 1 +#define FIF_ACTION_KILL 2 + +extern void +rfapiBgpInfoFilteredImportVPN ( + struct rfapi_import_table *import_table, + int action, + struct peer *peer, + void *rfd, /* set for looped back routes */ + struct prefix *p, + struct prefix *aux_prefix, /* AFI_ETHER: optional IP */ + afi_t afi, + struct prefix_rd *prd, + struct attr *attr, /* part of bgp_info */ + u_char type, /* part of bgp_info */ + u_char sub_type, /* part of bgp_info */ + uint32_t *label); /* part of bgp_info */ + +extern struct rfapi_next_hop_entry * +rfapiEthRouteNode2NextHopList ( + struct route_node *rn, + struct rfapi_ip_prefix *rprefix, + uint32_t lifetime, /* put into nexthop entries */ + struct rfapi_ip_addr *exclude_vnaddr, /* omit routes to same NVE */ + struct route_table *rib_route_table,/* preload NVE rib table */ + struct prefix *pfx_target_original); /* query target */ + +extern struct rfapi_import_table * +rfapiMacImportTableGetNoAlloc ( + struct bgp *bgp, + uint32_t lni); + +extern struct rfapi_import_table * +rfapiMacImportTableGet ( + struct bgp *bgp, + uint32_t lni); + +extern int +rfapiGetL2o ( + struct attr *attr, + struct rfapi_l2address_option *l2o); + +extern int rfapiEcommunityGetLNI ( + struct ecommunity *ecom, + uint32_t *lni); + + +/* enable for debugging; disable for performance */ +#if 0 +#define RFAPI_CHECK_REFCOUNT(rn, safi, lo) rfapiCheckRefcount((rn),(safi),(lo)) +#else +#define RFAPI_CHECK_REFCOUNT(rn, safi, lo) {} +#endif + +/*------------------------------------------ + * rfapiDeleteRemotePrefixes + * + * UI helper: For use by the "clear vnc prefixes" command + * + * input: + * un if set, tunnel must match this prefix + * vn if set, nexthop prefix must match this prefix + * p if set, prefix must match this prefix + * + * output + * pARcount number of active routes deleted + * pAHcount number of active nves deleted + * pHRcount number of holddown routes deleted + * pHHcount number of holddown nves deleted + * + * return value: + * void + --------------------------------------------*/ +extern void +rfapiDeleteRemotePrefixes ( + struct prefix *un, + struct prefix *vn, + struct prefix *p, + int delete_active, + int delete_holddown, + uint32_t *pARcount, /* active routes */ + uint32_t *pAHcount, /* active nves */ + uint32_t *pHRcount, /* holddown routes */ + uint32_t *pHHcount); /* holddown nves */ + +/*------------------------------------------ + * rfapiCountAllItRoutes + * + * UI helper: count VRF routes from BGP side + * + * input: + * + * output + * pARcount count of active routes + * pHRcount count of holddown routes + * pIRcount count of holddown routes + * + * return value: + * void + --------------------------------------------*/ +extern void +rfapiCountAllItRoutes ( + int *pALRcount, /* active local routes */ + int *pARRcount, /* active remote routes */ + int *pHRcount, /* holddown routes */ + int *pIRcount); /* direct imported routes */ + +/*------------------------------------------ + * rfapiGetHolddownFromLifetime + * + * calculate holddown value based on lifetime + * + * input: + * lifetime lifetime + * + * return value: + * Holddown value based on lifetime, holddown_factor, + * and RFAPI_LIFETIME_INFINITE_WITHDRAW_DELAY + * + --------------------------------------------*/ +extern uint32_t +rfapiGetHolddownFromLifetime (uint32_t lifetime); + +#endif /* QUAGGA_HGP_RFAPI_IMPORT_H */ diff --git a/bgpd/rfapi/rfapi_monitor.c b/bgpd/rfapi/rfapi_monitor.c new file mode 100644 index 0000000000..216b45eec8 --- /dev/null +++ b/bgpd/rfapi/rfapi_monitor.c @@ -0,0 +1,1701 @@ +/* + * + * Copyright 2009-2016, LabN Consulting, L.L.C. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +/* + * File: rfapi_monitor.c + */ + +/* TBD remove unneeded includes */ + +#include + +#include "lib/zebra.h" +#include "lib/prefix.h" +#include "lib/table.h" +#include "lib/vty.h" +#include "lib/memory.h" +#include "lib/log.h" +#include "lib/table.h" +#include "lib/skiplist.h" + +#include "bgpd/bgpd.h" + +#include "bgpd/rfapi/bgp_rfapi_cfg.h" +#include "bgpd/rfapi/rfapi.h" +#include "bgpd/rfapi/rfapi_backend.h" + +#include "bgpd/rfapi/rfapi.h" +#include "bgpd/rfapi/rfapi_import.h" +#include "bgpd/rfapi/vnc_import_bgp.h" +#include "bgpd/rfapi/rfapi_private.h" +#include "bgpd/rfapi/rfapi_monitor.h" +#include "bgpd/rfapi/rfapi_vty.h" +#include "bgpd/rfapi/rfapi_rib.h" + +#define DEBUG_L2_EXTRA 0 +#define DEBUG_DUP_CHECK 0 +#define DEBUG_ETH_SL 0 + +static void +rfapiMonitorTimerRestart (struct rfapi_monitor_vpn *m); + +static void +rfapiMonitorEthTimerRestart (struct rfapi_monitor_eth *m); + +/* + * Forward declarations + */ +static void +rfapiMonitorEthDetachImport (struct bgp *bgp, struct rfapi_monitor_eth *mon); + +#if DEBUG_ETH_SL +/* + * Debug function, special case + */ +void +rfapiMonitorEthSlCheck( + struct route_node *rn, + const char *tag1, + const char *tag2) +{ + struct route_node *rn_saved = NULL; + static struct skiplist *sl_saved = NULL; + struct skiplist *sl; + + if (!rn) + return; + + if (rn_saved && (rn != rn_saved)) + return; + + if (!rn_saved) + rn_saved = rn; + + sl = RFAPI_MONITOR_ETH(rn); + if (sl || sl_saved) + { + zlog_debug("%s[%s%s]: rn=%p, rn->lock=%d, old sl=%p, new sl=%p", + __func__, (tag1? tag1: ""), (tag2? tag2: ""), rn, rn->lock, + sl_saved, sl); + sl_saved = sl; + } +} +#endif + +/* + * Debugging function that aborts when it finds monitors whose + * "next" pointer * references themselves + */ +void +rfapiMonitorLoopCheck (struct rfapi_monitor_vpn *mchain) +{ + struct rfapi_monitor_vpn *m; + + for (m = mchain; m; m = m->next) + assert (m != m->next); +} + +#if DEBUG_DUP_CHECK +/* + * Debugging code: see if a monitor is mentioned more than once + * in a HD's monitor list + */ +void +rfapiMonitorDupCheck (struct bgp *bgp) +{ + struct listnode *hnode; + struct rfapi_descriptor *rfd; + + for (ALL_LIST_ELEMENTS_RO (&bgp->rfapi->descriptors, hnode, rfd)) + { + struct route_node *mrn; + + if (!rfd->mon) + continue; + + for (mrn = route_top (rfd->mon); mrn; mrn = route_next (mrn)) + { + struct rfapi_monitor_vpn *m; + for (m = (struct rfapi_monitor_vpn *) (mrn->info); m; m = m->next) + m->dcount = 0; + } + } + + for (ALL_LIST_ELEMENTS_RO (&bgp->rfapi->descriptors, hnode, rfd)) + { + struct route_node *mrn; + + if (!rfd->mon) + continue; + + for (mrn = route_top (rfd->mon); mrn; mrn = route_next (mrn)) + { + struct rfapi_monitor_vpn *m; + + for (m = (struct rfapi_monitor_vpn *) (mrn->info); m; m = m->next) + assert (++m->dcount == 1); + } + } +} +#endif + +/* debug */ +void +rfapiMonitorCleanCheck (struct bgp *bgp) +{ + struct listnode *hnode; + struct rfapi_descriptor *rfd; + + for (ALL_LIST_ELEMENTS_RO (&bgp->rfapi->descriptors, hnode, rfd)) + { + assert (!rfd->import_table->vpn0_queries[AFI_IP]); + assert (!rfd->import_table->vpn0_queries[AFI_IP6]); + + struct route_node *rn; + + for (rn = route_top (rfd->import_table->imported_vpn[AFI_IP]); rn; + rn = route_next (rn)) + { + + assert (!RFAPI_MONITOR_VPN (rn)); + } + for (rn = route_top (rfd->import_table->imported_vpn[AFI_IP6]); rn; + rn = route_next (rn)) + { + + assert (!RFAPI_MONITOR_VPN (rn)); + } + } +} + +/* debug */ +void +rfapiMonitorCheckAttachAllowed (void) +{ + struct bgp *bgp = bgp_get_default (); + assert (!(bgp->rfapi_cfg->flags & BGP_VNC_CONFIG_CALLBACK_DISABLE)); +} + +void +rfapiMonitorExtraFlush (safi_t safi, struct route_node *rn) +{ + struct rfapi_it_extra *hie; + struct rfapi_monitor_vpn *v; + struct rfapi_monitor_vpn *v_next; + struct rfapi_monitor_encap *e = NULL; + struct rfapi_monitor_encap *e_next = NULL; + + if (!rn) + return; + + if (!rn->aggregate) + return; + + hie = (struct rfapi_it_extra *) (rn->aggregate); + + switch (safi) + { + case SAFI_ENCAP: + for (e = hie->u.encap.e; e; e = e_next) + { + e_next = e->next; + e->next = NULL; + XFREE (MTYPE_RFAPI_MONITOR_ENCAP, e); + route_unlock_node (rn); + } + hie->u.encap.e = NULL; + break; + + case SAFI_MPLS_VPN: + for (v = hie->u.vpn.v; v; v = v_next) + { + v_next = v->next; + v->next = NULL; + XFREE (MTYPE_RFAPI_MONITOR, e); + route_unlock_node (rn); + } + hie->u.vpn.v = NULL; + if (hie->u.vpn.e.source) + { + while (!skiplist_delete_first (hie->u.vpn.e.source)) + { + route_unlock_node (rn); + } + skiplist_free (hie->u.vpn.e.source); + hie->u.vpn.e.source = NULL; + route_unlock_node (rn); + } + if (hie->u.vpn.idx_rd) + { + /* looping through bi->extra->vnc.import.rd is tbd */ + while (!skiplist_delete_first (hie->u.vpn.idx_rd)) + { + route_unlock_node (rn); + } + skiplist_free (hie->u.vpn.idx_rd); + hie->u.vpn.idx_rd = NULL; + route_unlock_node (rn); + } + if (hie->u.vpn.mon_eth) + { + while (!skiplist_delete_first (hie->u.vpn.mon_eth)) + { + route_unlock_node (rn); + } + skiplist_free (hie->u.vpn.mon_eth); + hie->u.vpn.mon_eth = NULL; + route_unlock_node (rn); + } + break; + + default: + assert (0); + } + XFREE (MTYPE_RFAPI_IT_EXTRA, hie); + rn->aggregate = NULL; + route_unlock_node (rn); +} + +/* + * If the child lists are empty, release the rfapi_it_extra struct + */ +void +rfapiMonitorExtraPrune (safi_t safi, struct route_node *rn) +{ + struct rfapi_it_extra *hie; + + if (!rn) + return; + + if (!rn->aggregate) + return; + + hie = (struct rfapi_it_extra *) (rn->aggregate); + + switch (safi) + { + case SAFI_ENCAP: + if (hie->u.encap.e) + return; + break; + + case SAFI_MPLS_VPN: + if (hie->u.vpn.v) + return; + if (hie->u.vpn.mon_eth) + { + if (skiplist_count (hie->u.vpn.mon_eth)) + return; + skiplist_free (hie->u.vpn.mon_eth); + hie->u.vpn.mon_eth = NULL; + route_unlock_node (rn); /* uncount skiplist */ + } + if (hie->u.vpn.e.source) + { + if (skiplist_count (hie->u.vpn.e.source)) + return; + skiplist_free (hie->u.vpn.e.source); + hie->u.vpn.e.source = NULL; + route_unlock_node (rn); + } + if (hie->u.vpn.idx_rd) + { + if (skiplist_count (hie->u.vpn.idx_rd)) + return; + skiplist_free (hie->u.vpn.idx_rd); + hie->u.vpn.idx_rd = NULL; + route_unlock_node (rn); + } + if (hie->u.vpn.mon_eth) + { + if (skiplist_count (hie->u.vpn.mon_eth)) + return; + skiplist_free (hie->u.vpn.mon_eth); + hie->u.vpn.mon_eth = NULL; + route_unlock_node (rn); + } + break; + + default: + assert (0); + } + XFREE (MTYPE_RFAPI_IT_EXTRA, hie); + rn->aggregate = NULL; + route_unlock_node (rn); +} + +/* + * returns locked node + */ +struct route_node * +rfapiMonitorGetAttachNode (struct rfapi_descriptor *rfd, struct prefix *p) +{ + afi_t afi; + struct route_node *rn; + + if (RFAPI_0_PREFIX (p)) + { + assert (1); + } + + afi = family2afi (p->family); + assert (afi); + + /* + * It's possible that even though there is a route at this node, + * there are no routes with valid UN addresses (i.e,. with no + * valid tunnel routes). Check for that and walk back up the + * tree if necessary. + * + * When the outer loop completes, the matched node, if any, is + * locked (i.e., its reference count has been incremented) to + * account for the VPN monitor we are about to attach. + * + * if a monitor is moved to another node, there must be + * corresponding unlock/locks + */ + for (rn = route_node_match (rfd->import_table->imported_vpn[afi], p); rn;) + { + + struct bgp_info *bi; + struct prefix pfx_dummy; + + /* TBD update this code to use new valid_interior_count */ + for (bi = rn->info; bi; bi = bi->next) + { + /* + * If there is a cached ENCAP UN address, it's a usable + * VPN route + */ + if (bi->extra && bi->extra->vnc.import.un_family) + { + break; + } + + /* + * Or if there is a valid Encap Attribute tunnel subtlv address, + * it's a usable VPN route. + */ + if (!rfapiGetVncTunnelUnAddr (bi->attr, &pfx_dummy)) + { + break; + } + } + if (bi) + break; + + route_unlock_node (rn); + if ((rn = rn->parent)) + { + route_lock_node (rn); + } + } + + if (!rn) + { + struct prefix pfx_default; + + memset (&pfx_default, 0, sizeof (pfx_default)); + pfx_default.family = p->family; + + /* creates default node if none exists, and increments ref count */ + rn = + route_node_get (rfd->import_table->imported_vpn[afi], &pfx_default); + } + + return rn; +} + +/* + * If this function happens to attach the monitor to a radix tree + * node (as opposed to the 0-prefix list), the node pointer is + * returned (for the benefit of caller which might like to use it + * to generate an immediate query response). + */ +static struct route_node * +rfapiMonitorAttachImport (struct rfapi_descriptor *rfd, + struct rfapi_monitor_vpn *m) +{ + struct route_node *rn; + + rfapiMonitorCheckAttachAllowed (); + + if (RFAPI_0_PREFIX (&m->p)) + { + /* + * Add new monitor entry to vpn0 list + */ + afi_t afi; + + afi = family2afi (m->p.family); + assert (afi); + + m->next = rfd->import_table->vpn0_queries[afi]; + rfd->import_table->vpn0_queries[afi] = m; + zlog_debug ("%s: attached monitor %p to vpn0 list", __func__, m); + return NULL; + } + + /* + * Attach new monitor entry to import table node + */ + rn = rfapiMonitorGetAttachNode (rfd, &m->p); /* returns locked rn */ + m->node = rn; + m->next = RFAPI_MONITOR_VPN (rn); + RFAPI_MONITOR_VPN_W_ALLOC (rn) = m; + RFAPI_CHECK_REFCOUNT (rn, SAFI_MPLS_VPN, 0); + zlog_debug ("%s: attached monitor %p to rn %p", __func__, m, rn); + return rn; +} + + +/* + * reattach monitors for this HD to import table + */ +void +rfapiMonitorAttachImportHd (struct rfapi_descriptor *rfd) +{ + struct route_node *mrn; + + if (!rfd->mon) + { + /* + * No monitors for this HD + */ + return; + } + + for (mrn = route_top (rfd->mon); mrn; mrn = route_next (mrn)) + { + + if (!mrn->info) + continue; + + (void) rfapiMonitorAttachImport (rfd, + (struct rfapi_monitor_vpn + *) (mrn->info)); + } +} + +/* + * Adds a monitor for a query to the NVE descriptor's list + * and, if callbacks are enabled, attaches it to the import table. + * + * If we happened to locate the import table radix tree attachment + * point, return it so the caller can use it to generate a query + * response without repeating the lookup. Note that when callbacks + * are disabled, this function will not perform a lookup, and the + * caller will have to do its own lookup. + */ +struct route_node * +rfapiMonitorAdd ( + struct bgp *bgp, + struct rfapi_descriptor *rfd, + struct prefix *p) +{ + struct rfapi_monitor_vpn *m; + struct route_node *rn; + + /* + * Initialize nve's monitor list if needed + * NB use the same radix tree for IPv4 and IPv6 targets. + * The prefix will always have full-length mask (/32, /128) + * or be 0/0 so they won't get mixed up. + */ + if (!rfd->mon) + { + rfd->mon = route_table_init (); + } + rn = route_node_get (rfd->mon, p); + if (rn->info) + { + /* + * received this query before, no further action needed + */ + rfapiMonitorTimerRestart ((struct rfapi_monitor_vpn *) rn->info); + route_unlock_node (rn); + return NULL; + } + + /* + * New query for this nve, record it in the HD + */ + rn->info = XCALLOC (MTYPE_RFAPI_MONITOR, sizeof (struct rfapi_monitor_vpn)); + m = (struct rfapi_monitor_vpn *) (rn->info); + m->rfd = rfd; + prefix_copy (&m->p, p); + + ++rfd->monitor_count; + ++bgp->rfapi->monitor_count; + + rfapiMonitorTimerRestart (m); + + if (bgp->rfapi_cfg->flags & BGP_VNC_CONFIG_CALLBACK_DISABLE) + { + /* + * callbacks turned off, so don't attach monitor to import table + */ + return NULL; + } + + + /* + * attach to import table + */ + return rfapiMonitorAttachImport (rfd, m); +} + +/* + * returns monitor pointer if found, NULL if not + */ +static struct rfapi_monitor_vpn * +rfapiMonitorDetachImport (struct rfapi_monitor_vpn *m) +{ + struct rfapi_monitor_vpn *prev; + struct rfapi_monitor_vpn *this = NULL; + + if (RFAPI_0_PREFIX (&m->p)) + { + afi_t afi; + + /* + * 0-prefix monitors are stored in a special list and not + * in the import VPN tree + */ + + afi = family2afi (m->p.family); + assert (afi); + + if (m->rfd->import_table) + { + for (prev = NULL, this = m->rfd->import_table->vpn0_queries[afi]; + this; prev = this, this = this->next) + { + + if (this == m) + break; + } + if (this) + { + if (!prev) + { + m->rfd->import_table->vpn0_queries[afi] = this->next; + } + else + { + prev->next = this->next; + } + } + } + } + else + { + + if (m->node) + { + for (prev = NULL, + this = RFAPI_MONITOR_VPN (m->node); + this; prev = this, this = this->next) + { + + if (this == m) + break; + } + if (this) + { + if (prev) + { + prev->next = this->next; + } + else + { + RFAPI_MONITOR_VPN_W_ALLOC (m->node) = this->next; + } + RFAPI_CHECK_REFCOUNT (m->node, SAFI_MPLS_VPN, 1); + route_unlock_node (m->node); + } + m->node = NULL; + } + } + return this; +} + + +void +rfapiMonitorDetachImportHd (struct rfapi_descriptor *rfd) +{ + struct route_node *rn; + + if (!rfd->mon) + return; + + for (rn = route_top (rfd->mon); rn; rn = route_next (rn)) + { + if (rn->info) + { + rfapiMonitorDetachImport ((struct rfapi_monitor_vpn *) (rn->info)); + } + } +} + +void +rfapiMonitorDel ( + struct bgp *bgp, + struct rfapi_descriptor *rfd, + struct prefix *p) +{ + struct route_node *rn; + struct rfapi_monitor_vpn *m; + + assert (rfd->mon); + rn = route_node_get (rfd->mon, p); /* locks node */ + m = rn->info; + + assert (m); + + /* + * remove from import table + */ + if (!(bgp->rfapi_cfg->flags & BGP_VNC_CONFIG_CALLBACK_DISABLE)) + { + rfapiMonitorDetachImport (m); + } + + if (m->timer) + { + thread_cancel (m->timer); + m->timer = NULL; + } + + /* + * remove from rfd list + */ + XFREE (MTYPE_RFAPI_MONITOR, m); + rn->info = NULL; + route_unlock_node (rn); /* undo original lock when created */ + route_unlock_node (rn); /* undo lock in route_node_get */ + + --rfd->monitor_count; + --bgp->rfapi->monitor_count; +} + +/* + * returns count of monitors deleted + */ +int +rfapiMonitorDelHd (struct rfapi_descriptor *rfd) +{ + struct route_node *rn; + struct bgp *bgp; + int count = 0; + + zlog_debug ("%s: entry rfd=%p", __func__, rfd); + + bgp = bgp_get_default (); + + if (rfd->mon) + { + for (rn = route_top (rfd->mon); rn; rn = route_next (rn)) + { + struct rfapi_monitor_vpn *m; + if ((m = rn->info)) + { + if (!(bgp->rfapi_cfg->flags & BGP_VNC_CONFIG_CALLBACK_DISABLE)) + { + rfapiMonitorDetachImport (m); + } + + if (m->timer) + { + thread_cancel (m->timer); + m->timer = NULL; + } + + XFREE (MTYPE_RFAPI_MONITOR, m); + rn->info = NULL; + route_unlock_node (rn); /* undo original lock when created */ + ++count; + --rfd->monitor_count; + --bgp->rfapi->monitor_count; + } + } + route_table_finish (rfd->mon); + rfd->mon = NULL; + } + + if (rfd->mon_eth) + { + + struct rfapi_monitor_eth *mon_eth; + + while (!skiplist_first (rfd->mon_eth, NULL, (void **) &mon_eth)) + { + + int rc; + + if (!(bgp->rfapi_cfg->flags & BGP_VNC_CONFIG_CALLBACK_DISABLE)) + { + rfapiMonitorEthDetachImport (bgp, mon_eth); + } + else + { +#if DEBUG_L2_EXTRA + zlog_debug + ("%s: callbacks disabled, not attempting to detach mon_eth %p", + __func__, mon_eth); +#endif + } + + if (mon_eth->timer) + { + thread_cancel (mon_eth->timer); + mon_eth->timer = NULL; + } + + /* + * remove from rfd list + */ + rc = skiplist_delete (rfd->mon_eth, mon_eth, mon_eth); + assert (!rc); + + zlog_debug ("%s: freeing mon_eth %p", __func__, mon_eth); + XFREE (MTYPE_RFAPI_MONITOR_ETH, mon_eth); + + ++count; + --rfd->monitor_count; + --bgp->rfapi->monitor_count; + } + skiplist_free (rfd->mon_eth); + rfd->mon_eth = NULL; + + } + + return count; +} + +void +rfapiMonitorResponseRemovalOff (struct bgp *bgp) +{ + if (bgp->rfapi_cfg->flags & BGP_VNC_CONFIG_RESPONSE_REMOVAL_DISABLE) + { + return; + } + bgp->rfapi_cfg->flags |= BGP_VNC_CONFIG_RESPONSE_REMOVAL_DISABLE; +} + +void +rfapiMonitorResponseRemovalOn (struct bgp *bgp) +{ + if (!(bgp->rfapi_cfg->flags & BGP_VNC_CONFIG_RESPONSE_REMOVAL_DISABLE)) + { + return; + } + bgp->rfapi_cfg->flags &= ~BGP_VNC_CONFIG_RESPONSE_REMOVAL_DISABLE; +} + +static int +rfapiMonitorTimerExpire (struct thread *t) +{ + struct rfapi_monitor_vpn *m = t->arg; + + /* forget reference to thread, it's gone */ + m->timer = NULL; + + /* delete the monitor */ + rfapiMonitorDel (bgp_get_default (), m->rfd, &m->p); + + return 0; +} + +static void +rfapiMonitorTimerRestart (struct rfapi_monitor_vpn *m) +{ + if (m->timer) + { + unsigned long remain = thread_timer_remain_second (m->timer); + + /* unexpected case, but avoid wraparound problems below */ + if (remain > m->rfd->response_lifetime) + return; + + /* don't restart if we just restarted recently */ + if (m->rfd->response_lifetime - remain < 2) + return; + + thread_cancel (m->timer); + m->timer = NULL; + } + + { + char buf[BUFSIZ]; + + zlog_debug ("%s: target %s life %u", __func__, + rfapi_ntop (m->p.family, m->p.u.val, buf, BUFSIZ), + m->rfd->response_lifetime); + } + m->timer = thread_add_timer (bm->master, rfapiMonitorTimerExpire, m, + m->rfd->response_lifetime); +} + +/* + * called when an updated response is sent to the NVE. Per + * ticket 255, restart timers for any monitors that could have + * been responsible for the response, i.e., any monitors for + * the exact prefix or a parent of it. + */ +void +rfapiMonitorTimersRestart (struct rfapi_descriptor *rfd, struct prefix *p) +{ + struct route_node *rn; + + if (AF_ETHERNET == p->family) + { + struct rfapi_monitor_eth *mon_eth; + int rc; + void *cursor; + + /* + * XXX match any LNI + */ + for (cursor = NULL, + rc = + skiplist_next (rfd->mon_eth, NULL, (void **) &mon_eth, &cursor); + rc == 0; + rc = + skiplist_next (rfd->mon_eth, NULL, (void **) &mon_eth, &cursor)) + { + + if (!memcmp (mon_eth->macaddr.octet, p->u.prefix_eth.octet, + ETHER_ADDR_LEN)) + { + + rfapiMonitorEthTimerRestart (mon_eth); + + } + } + + } + else + { + for (rn = route_top (rfd->mon); rn; rn = route_next (rn)) + { + struct rfapi_monitor_vpn *m; + + if (!((m = rn->info))) + continue; + + /* NB order of test is significant ! */ + if (!m->node || prefix_match (&m->node->p, p)) + { + rfapiMonitorTimerRestart (m); + } + } + } +} + +/* + * Find monitors at this node and all its parents. Call + * rfapiRibUpdatePendingNode with this node and all corresponding NVEs. + */ +void +rfapiMonitorItNodeChanged ( + struct rfapi_import_table *import_table, + struct route_node *it_node, + struct rfapi_monitor_vpn *monitor_list) /* for base it node, NULL=all */ +{ + struct skiplist *nves_seen; + struct route_node *rn = it_node; + struct bgp *bgp = bgp_get_default (); + afi_t afi = family2afi (rn->p.family); +#if DEBUG_L2_EXTRA + char buf_prefix[BUFSIZ]; +#endif + + assert (bgp); + assert (import_table); + + nves_seen = skiplist_new (0, NULL, NULL); + +#if DEBUG_L2_EXTRA + prefix2str (&it_node->p, buf_prefix, BUFSIZ); + zlog_debug ("%s: it=%p, it_node=%p, it_node->prefix=%s", + __func__, import_table, it_node, buf_prefix); +#endif + + if (AFI_ETHER == afi) + { + struct rfapi_monitor_eth *m; + struct skiplist *sl; + void *cursor; + int rc; + + if ((sl = RFAPI_MONITOR_ETH (rn))) + { + + for (cursor = NULL, + rc = skiplist_next (sl, NULL, (void **) &m, (void **) &cursor); + !rc; + rc = skiplist_next (sl, NULL, (void **) &m, (void **) &cursor)) + { + + if (skiplist_search (nves_seen, m->rfd, NULL)) + { + /* + * Haven't done this NVE yet. Add to "seen" list. + */ + assert (!skiplist_insert (nves_seen, m->rfd, NULL)); + + /* + * update its RIB + */ + rfapiRibUpdatePendingNode(bgp, m->rfd, import_table, + it_node, m->rfd->response_lifetime); + } + } + } + + } + else + { + + struct rfapi_monitor_vpn *m; + + if (monitor_list) + { + m = monitor_list; + } + else + { + m = RFAPI_MONITOR_VPN (rn); + } + + do + { + /* + * If we have reached the root node (parent==NULL) and there + * are no routes here (info==NULL), and the IT node that + * changed was not the root node (it_node->parent != NULL), + * then any monitors at this node are here because they had + * no match at all. Therefore, do not send route updates to them + * because we haven't sent them an initial route. + */ + if (!rn->parent && !rn->info && it_node->parent) + break; + + for (; m; m = m->next) + { + + if (RFAPI_0_PREFIX (&m->p)) + { + /* shouldn't happen, but be safe */ + continue; + } + if (skiplist_search (nves_seen, m->rfd, NULL)) + { + /* + * Haven't done this NVE yet. Add to "seen" list. + */ + assert (!skiplist_insert (nves_seen, m->rfd, NULL)); + + { + char buf_attach_pfx[BUFSIZ]; + char buf_target_pfx[BUFSIZ]; + + prefix2str (&m->node->p, buf_attach_pfx, BUFSIZ); + prefix2str (&m->p, buf_target_pfx, BUFSIZ); + zlog_debug + ("%s: update rfd %p attached to pfx %s (targ=%s)", + __func__, m->rfd, buf_attach_pfx, buf_target_pfx); + } + + /* + * update its RIB + */ + rfapiRibUpdatePendingNode(bgp, m->rfd, import_table, + it_node, m->rfd->response_lifetime); + } + } + rn = rn->parent; + if (rn) + m = RFAPI_MONITOR_VPN (rn); + } + while (rn); + } + + /* + * All-routes L2 monitors + */ + if (AFI_ETHER == afi) + { + struct rfapi_monitor_eth *e; + +#if DEBUG_L2_EXTRA + zlog_debug ("%s: checking L2 all-routes monitors", __func__); +#endif + + for (e = import_table->eth0_queries; e; e = e->next) + { +#if DEBUG_L2_EXTRA + zlog_debug ("%s: checking eth0 mon=%p", __func__, e); +#endif + if (skiplist_search (nves_seen, e->rfd, NULL)) + { + /* + * Haven't done this NVE yet. Add to "seen" list. + */ + assert (!skiplist_insert (nves_seen, e->rfd, NULL)); + + /* + * update its RIB + */ +#if DEBUG_L2_EXTRA + zlog_debug ("%s: found L2 all-routes monitor %p", __func__, e); +#endif + rfapiRibUpdatePendingNode (bgp, e->rfd, import_table, it_node, + e->rfd->response_lifetime); + } + } + } + else + { + struct rfapi_monitor_vpn *m; + + /* + * All-routes IPv4. IPv6 monitors + */ + for (m = import_table->vpn0_queries[afi]; m; m = m->next) + { + if (skiplist_search (nves_seen, m->rfd, NULL)) + { + /* + * Haven't done this NVE yet. Add to "seen" list. + */ + assert (!skiplist_insert (nves_seen, m->rfd, NULL)); + + /* + * update its RIB + */ + rfapiRibUpdatePendingNode (bgp, m->rfd, import_table, it_node, + m->rfd->response_lifetime); + } + } + } + + skiplist_free (nves_seen); +} + +/* + * For the listed monitors, update new node and its subtree, but + * omit old node and its subtree + */ +void +rfapiMonitorMovedUp ( + struct rfapi_import_table *import_table, + struct route_node *old_node, + struct route_node *new_node, + struct rfapi_monitor_vpn *monitor_list) +{ + struct bgp *bgp = bgp_get_default (); + struct rfapi_monitor_vpn *m; + + assert (new_node); + assert (old_node); + assert (new_node != old_node); + + /* + * If new node is 0/0 and there is no route there, don't + * generate an update because it will not contain any + * routes including the target. + */ + if (!new_node->parent && !new_node->info) + { + zlog_debug ("%s: new monitor at 0/0 and no routes, no updates", + __func__); + return; + } + + for (m = monitor_list; m; m = m->next) + { + rfapiRibUpdatePendingNode (bgp, m->rfd, import_table, new_node, + m->rfd->response_lifetime); + rfapiRibUpdatePendingNodeSubtree (bgp, m->rfd, import_table, new_node, + old_node, m->rfd->response_lifetime); + } +} + +static int +rfapiMonitorEthTimerExpire (struct thread *t) +{ + struct rfapi_monitor_eth *m = t->arg; + + /* forget reference to thread, it's gone */ + m->timer = NULL; + + /* delete the monitor */ + rfapiMonitorEthDel (bgp_get_default (), m->rfd, &m->macaddr, + m->logical_net_id); + + return 0; +} + +static void +rfapiMonitorEthTimerRestart (struct rfapi_monitor_eth *m) +{ + if (m->timer) + { + unsigned long remain = thread_timer_remain_second (m->timer); + + /* unexpected case, but avoid wraparound problems below */ + if (remain > m->rfd->response_lifetime) + return; + + /* don't restart if we just restarted recently */ + if (m->rfd->response_lifetime - remain < 2) + return; + + thread_cancel (m->timer); + m->timer = NULL; + } + + { + char buf[BUFSIZ]; + + zlog_debug ("%s: target %s life %u", __func__, + rfapiEthAddr2Str (&m->macaddr, buf, BUFSIZ), + m->rfd->response_lifetime); + } + m->timer = thread_add_timer (bm->master, rfapiMonitorEthTimerExpire, m, + m->rfd->response_lifetime); +} + +static int +mon_eth_cmp (void *a, void *b) +{ + struct rfapi_monitor_eth *m1; + struct rfapi_monitor_eth *m2; + + int i; + + m1 = (struct rfapi_monitor_eth *) a; + m2 = (struct rfapi_monitor_eth *) b; + + /* + * compare ethernet addresses + */ + for (i = 0; i < ETHER_ADDR_LEN; ++i) + { + if (m1->macaddr.octet[i] != m2->macaddr.octet[i]) + return (m1->macaddr.octet[i] - m2->macaddr.octet[i]); + } + + /* + * compare LNIs + */ + return (m1->logical_net_id - m2->logical_net_id); +} + +static void +rfapiMonitorEthAttachImport ( + struct rfapi_import_table *it, + struct route_node *rn, /* it node attach point if non-0 */ + struct rfapi_monitor_eth *mon) /* monitor struct to attach */ +{ + struct skiplist *sl; + int rc; + + zlog_debug ("%s: it=%p", __func__, it); + + rfapiMonitorCheckAttachAllowed (); + + if (RFAPI_0_ETHERADDR (&mon->macaddr)) + { + /* + * These go on a different list + */ + mon->next = it->eth0_queries; + it->eth0_queries = mon; +#if DEBUG_L2_EXTRA + zlog_debug ("%s: attached monitor %p to eth0 list", __func__, mon); +#endif + return; + } + + if (rn == NULL) + { +#if DEBUG_L2_EXTRA + zlog_debug ("%s: rn is null!", __func__); +#endif + return; + } + + /* + * Get sl to attach to + */ + sl = RFAPI_MONITOR_ETH_W_ALLOC (rn); + if (!sl) + { + sl = RFAPI_MONITOR_ETH_W_ALLOC (rn) = skiplist_new (0, NULL, NULL); + route_lock_node(rn); /* count skiplist mon_eth */ + } + +#if DEBUG_L2_EXTRA + zlog_debug ("%s: rn=%p, rn->lock=%d, sl=%p, attaching eth mon %p", + __func__, rn, rn->lock, sl, mon); +#endif + + rc = skiplist_insert (sl, (void *) mon, (void *) mon); + assert (!rc); + + /* count eth monitor */ + route_lock_node(rn); +} + +/* + * reattach monitors for this HD to import table + */ +static void +rfapiMonitorEthAttachImportHd (struct bgp *bgp, struct rfapi_descriptor *rfd) +{ + void *cursor; + struct rfapi_monitor_eth *mon; + int rc; + + if (!rfd->mon_eth) + { + /* + * No monitors for this HD + */ + return; + } + + for (cursor = NULL, + rc = skiplist_next (rfd->mon_eth, NULL, (void **) &mon, &cursor); + rc == 0; + rc = skiplist_next (rfd->mon_eth, NULL, (void **) &mon, &cursor)) + { + + struct rfapi_import_table *it; + struct prefix pfx_mac_buf; + struct route_node *rn; + + it = rfapiMacImportTableGet (bgp, mon->logical_net_id); + assert (it); + + memset ((void *) &pfx_mac_buf, 0, sizeof (struct prefix)); + pfx_mac_buf.family = AF_ETHERNET; + pfx_mac_buf.prefixlen = 48; + pfx_mac_buf.u.prefix_eth = mon->macaddr; + + rn = route_node_get (it->imported_vpn[AFI_ETHER], &pfx_mac_buf); + assert (rn); + + (void) rfapiMonitorEthAttachImport (it, rn, mon); + } +} + +static void +rfapiMonitorEthDetachImport ( + struct bgp *bgp, + struct rfapi_monitor_eth *mon) /* monitor struct to detach */ +{ + struct rfapi_import_table *it; + struct prefix pfx_mac_buf; + struct skiplist *sl; + struct route_node *rn; + int rc; + + it = rfapiMacImportTableGet (bgp, mon->logical_net_id); + assert (it); + + if (RFAPI_0_ETHERADDR (&mon->macaddr)) + { + struct rfapi_monitor_eth *prev; + struct rfapi_monitor_eth *this = NULL; + + for (prev = NULL, + this = it->eth0_queries; this; prev = this, this = this->next) + { + + if (this == mon) + break; + } + if (this) + { + if (!prev) + { + it->eth0_queries = this->next; + } + else + { + prev->next = this->next; + } + } +#if DEBUG_L2_EXTRA + zlog_debug ("%s: it=%p, LNI=%d, detached eth0 mon %p", + __func__, it, mon->logical_net_id, mon); +#endif + return; + } + + memset ((void *) &pfx_mac_buf, 0, sizeof (struct prefix)); + pfx_mac_buf.family = AF_ETHERNET; + pfx_mac_buf.prefixlen = 48; + pfx_mac_buf.u.prefix_eth = mon->macaddr; + + rn = route_node_get (it->imported_vpn[AFI_ETHER], &pfx_mac_buf); + assert (rn); + +#if DEBUG_L2_EXTRA + char buf_prefix[BUFSIZ]; + prefix2str (&rn->p, buf_prefix, BUFSIZ); +#endif + + /* + * Get sl to detach from + */ + sl = RFAPI_MONITOR_ETH (rn); +#if DEBUG_L2_EXTRA + zlog_debug ("%s: it=%p, rn=%p, rn->lock=%d, sl=%p, pfx=%s, LNI=%d, detaching eth mon %p", + __func__, it, rn, rn->lock, sl, buf_prefix, mon->logical_net_id, mon); +#endif + assert (sl); + + + rc = skiplist_delete (sl, (void *) mon, (void *) mon); + assert (!rc); + + /* uncount eth monitor */ + route_unlock_node(rn); +} + +struct route_node * +rfapiMonitorEthAdd ( + struct bgp *bgp, + struct rfapi_descriptor *rfd, + struct ethaddr *macaddr, + uint32_t logical_net_id) +{ + int rc; + struct rfapi_monitor_eth mon_buf; + struct rfapi_monitor_eth *val; + struct rfapi_import_table *it; + struct route_node *rn = NULL; + struct prefix pfx_mac_buf; + + if (!rfd->mon_eth) + { + rfd->mon_eth = skiplist_new (0, mon_eth_cmp, NULL); + } + + it = rfapiMacImportTableGet (bgp, logical_net_id); + assert (it); + + /* + * Get route node in import table. Here is where we attach the + * monitor. + * + * Look it up now because we return it to caller regardless of + * whether we create a new monitor or not. + */ + memset ((void *) &pfx_mac_buf, 0, sizeof (struct prefix)); + pfx_mac_buf.family = AF_ETHERNET; + pfx_mac_buf.prefixlen = 48; + pfx_mac_buf.u.prefix_eth = *macaddr; + + if (!RFAPI_0_ETHERADDR (macaddr)) + { + rn = route_node_get (it->imported_vpn[AFI_ETHER], &pfx_mac_buf); + assert (rn); + } + + memset ((void *) &mon_buf, 0, sizeof (mon_buf)); + mon_buf.rfd = rfd; + mon_buf.macaddr = *macaddr; + mon_buf.logical_net_id = logical_net_id; + + { + char buf[BUFSIZ]; + + zlog_debug ("%s: LNI=%d: rfd=%p, pfx=%s", + __func__, logical_net_id, rfd, + rfapi_ntop (pfx_mac_buf.family, pfx_mac_buf.u.val, buf, + BUFSIZ)); + } + + + /* + * look up query + */ + rc = skiplist_search (rfd->mon_eth, (void *) &mon_buf, (void **) &val); + if (!rc) + { + /* + * Found monitor - we have seen this query before + * restart timer + */ + zlog_debug ("%s: already present in rfd->mon_eth, not adding", + __func__); + rfapiMonitorEthTimerRestart (val); + return rn; + } + + /* + * New query + */ + val = XCALLOC (MTYPE_RFAPI_MONITOR_ETH, sizeof (struct rfapi_monitor_eth)); + assert (val); + *val = mon_buf; + + ++rfd->monitor_count; + ++bgp->rfapi->monitor_count; + + rc = skiplist_insert (rfd->mon_eth, val, val); + +#if DEBUG_L2_EXTRA + zlog_debug ("%s: inserted rfd=%p mon_eth=%p, rc=%d", __func__, rfd, val, + rc); +#endif + + /* + * start timer + */ + rfapiMonitorEthTimerRestart (val); + + if (bgp->rfapi_cfg->flags & BGP_VNC_CONFIG_CALLBACK_DISABLE) + { + /* + * callbacks turned off, so don't attach monitor to import table + */ +#if DEBUG_L2_EXTRA + zlog_debug + ("%s: callbacks turned off, not attaching mon_eth %p to import table", + __func__, val); +#endif + return rn; + } + + /* + * attach to import table + */ + rfapiMonitorEthAttachImport (it, rn, val); + + return rn; +} + +void +rfapiMonitorEthDel ( + struct bgp *bgp, + struct rfapi_descriptor *rfd, + struct ethaddr *macaddr, + uint32_t logical_net_id) +{ + struct rfapi_monitor_eth *val; + struct rfapi_monitor_eth mon_buf; + int rc; + + zlog_debug ("%s: entry rfd=%p", __func__, rfd); + + assert (rfd->mon_eth); + + memset ((void *) &mon_buf, 0, sizeof (mon_buf)); + mon_buf.macaddr = *macaddr; + mon_buf.logical_net_id = logical_net_id; + + rc = skiplist_search (rfd->mon_eth, (void *) &mon_buf, (void **) &val); + assert (!rc); + + /* + * remove from import table + */ + if (!(bgp->rfapi_cfg->flags & BGP_VNC_CONFIG_CALLBACK_DISABLE)) + { + rfapiMonitorEthDetachImport (bgp, val); + } + + if (val->timer) + { + thread_cancel (val->timer); + val->timer = NULL; + } + + /* + * remove from rfd list + */ + rc = skiplist_delete (rfd->mon_eth, val, val); + assert (!rc); + +#if DEBUG_L2_EXTRA + zlog_debug ("%s: freeing mon_eth %p", __func__, val); +#endif + XFREE (MTYPE_RFAPI_MONITOR_ETH, val); + + --rfd->monitor_count; + --bgp->rfapi->monitor_count; +} + + +void +rfapiMonitorCallbacksOff (struct bgp *bgp) +{ + struct rfapi_import_table *it; + afi_t afi; + struct route_table *rt; + struct route_node *rn; + void *cursor; + int rc; + struct rfapi *h = bgp->rfapi; + + if (bgp->rfapi_cfg->flags & BGP_VNC_CONFIG_CALLBACK_DISABLE) + { + /* + * Already off. + */ + return; + } + bgp->rfapi_cfg->flags |= BGP_VNC_CONFIG_CALLBACK_DISABLE; + +#if DEBUG_L2_EXTRA + zlog_debug ("%s: turned off callbacks", __func__); +#endif + + if (h == NULL) + return; + /* + * detach monitors from import VPN tables. The monitors + * will still be linked in per-nve monitor lists. + */ + for (it = h->imports; it; it = it->next) + { + for (afi = AFI_IP; afi < AFI_MAX; ++afi) + { + + struct rfapi_monitor_vpn *m; + struct rfapi_monitor_vpn *next; + + rt = it->imported_vpn[afi]; + + for (rn = route_top (rt); rn; rn = route_next (rn)) + { + m = RFAPI_MONITOR_VPN (rn); + if (RFAPI_MONITOR_VPN (rn)) + RFAPI_MONITOR_VPN_W_ALLOC (rn) = NULL; + for (; m; m = next) + { + next = m->next; + m->next = NULL; /* gratuitous safeness */ + m->node = NULL; + route_unlock_node (rn); /* uncount */ + } + } + + for (m = it->vpn0_queries[afi]; m; m = next) + { + next = m->next; + m->next = NULL; /* gratuitous safeness */ + m->node = NULL; + } + it->vpn0_queries[afi] = NULL; /* detach first monitor */ + } + } + + /* + * detach monitors from import Eth tables. The monitors + * will still be linked in per-nve monitor lists. + */ + + /* + * Loop over ethernet import tables + */ + for (cursor = NULL, + rc = skiplist_next (h->import_mac, NULL, (void **) &it, &cursor); + !rc; rc = skiplist_next (h->import_mac, NULL, (void **) &it, &cursor)) + { + struct rfapi_monitor_eth *e; + struct rfapi_monitor_eth *enext; + + /* + * The actual route table + */ + rt = it->imported_vpn[AFI_ETHER]; + + /* + * Find non-0 monitors (i.e., actual addresses, not FTD monitors) + */ + for (rn = route_top (rt); rn; rn = route_next (rn)) + { + struct skiplist *sl; + + sl = RFAPI_MONITOR_ETH (rn); + while (!skiplist_delete_first(sl)) + { + route_unlock_node (rn); /* uncount monitor */ + } + } + + /* + * Find 0-monitors (FTD queries) + */ + for (e = it->eth0_queries; e; e = enext) + { +#if DEBUG_L2_EXTRA + zlog_debug ("%s: detaching eth0 mon %p", __func__, e); +#endif + enext = e->next; + e->next = NULL; /* gratuitous safeness */ + } + it->eth0_queries = NULL; /* detach first monitor */ + } +} + +void +rfapiMonitorCallbacksOn (struct bgp *bgp) +{ + struct listnode *hnode; + struct rfapi_descriptor *rfd; + + if (!(bgp->rfapi_cfg->flags & BGP_VNC_CONFIG_CALLBACK_DISABLE)) + { + /* + * Already on. It's important that we don't try to reattach + * monitors that are already attached because, in the interest + * of performance, there is no checking at the lower level + * whether a monitor is already attached. It leads to + * corrupted chains (e.g., looped pointers) + */ + return; + } + bgp->rfapi_cfg->flags &= ~BGP_VNC_CONFIG_CALLBACK_DISABLE; +#if DEBUG_L2_EXTRA + zlog_debug ("%s: turned on callbacks", __func__); +#endif + if (bgp->rfapi == NULL) + return; + + /* + * reattach monitors + */ + for (ALL_LIST_ELEMENTS_RO (&bgp->rfapi->descriptors, hnode, rfd)) + { + + rfapiMonitorAttachImportHd (rfd); + rfapiMonitorEthAttachImportHd (bgp, rfd); + } +} diff --git a/bgpd/rfapi/rfapi_monitor.h b/bgpd/rfapi/rfapi_monitor.h new file mode 100644 index 0000000000..be04b0f09d --- /dev/null +++ b/bgpd/rfapi/rfapi_monitor.h @@ -0,0 +1,217 @@ +/* + * + * Copyright 2009-2016, LabN Consulting, L.L.C. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#ifndef QUAGGA_HGP_RFAPI_MONITOR_H +#define QUAGGA_HGP_RFAPI_MONITOR_H + +#include "lib/zebra.h" +#include "lib/prefix.h" +#include "lib/table.h" + +/* + * These get attached to the nodes in an import table (using "aggregate" ptr) + * to indicate which nves are interested in a prefix/target + */ +struct rfapi_monitor_vpn +{ + struct rfapi_monitor_vpn *next; /* chain from struct route_node */ + struct rfapi_descriptor *rfd; /* which NVE requested the route */ + struct prefix p; /* constant: pfx in original request */ + struct route_node *node; /* node we're currently attached to */ + uint32_t flags; +#define RFAPI_MON_FLAG_NEEDCALLBACK 0x00000001 /* deferred callback */ + + //int dcount; /* debugging counter */ + void *timer; +}; + +struct rfapi_monitor_encap +{ + struct rfapi_monitor_encap *next; + struct rfapi_monitor_encap *prev; + struct route_node *node; /* VPN node */ + struct bgp_info *bi; /* VPN bi */ + struct route_node *rn; /* parent node */ +}; + +struct rfapi_monitor_eth +{ + struct rfapi_monitor_eth *next; /* for use in vpn0_queries list */ + struct rfapi_descriptor *rfd; /* which NVE requested the route */ + struct ethaddr macaddr; + uint32_t logical_net_id; + void *timer; +}; + +/* + * This is referenced by the "aggregate" field of a route node + * in an RFAPI import table. + * + * node lock/unlock: + * - one lock increment for this structure itself + * - one lock per chained struct rfapi_monitor_vpn + * - one lock for the mon_eth skiplist itself + * - one lock per mon_eth skiplist entry + * - one lock for the ext skiplist itself + * - one lock for each ext skiplist entry + * remember to free skiplist when freeing rfapi_it_extra + * - one lock per chained struct rfapi_monitor_encap + * + */ +struct rfapi_it_extra +{ + union + { + struct + { + struct rfapi_monitor_vpn *v; + struct skiplist *idx_rd; /* RD index */ + struct skiplist *mon_eth; /* ether queries */ + struct + { + /* routes with UN addrs, either cached encap or Encap TLV */ + int valid_interior_count; + + /* unicast exterior routes, key=bi, val=allocated prefix */ + struct skiplist *source; + } e; + } vpn; + struct + { + struct rfapi_monitor_encap *e; + } encap; + } u; +}; + +#define RFAPI_IT_EXTRA_GET(rn) ((struct rfapi_it_extra *)( \ + (rn)->aggregate? (rn)->aggregate: \ + (route_lock_node(rn), (rn)->aggregate = \ + XCALLOC(MTYPE_RFAPI_IT_EXTRA,sizeof(struct rfapi_it_extra))))) + +#define RFAPI_RDINDEX(rn) \ + ((rn)->aggregate ? RFAPI_IT_EXTRA_GET(rn)->u.vpn.idx_rd : NULL) + +#define RFAPI_RDINDEX_W_ALLOC(rn) (RFAPI_IT_EXTRA_GET(rn)->u.vpn.idx_rd) + +#define RFAPI_MONITOR_ETH(rn) \ + ((rn)->aggregate ? RFAPI_IT_EXTRA_GET(rn)->u.vpn.mon_eth : NULL) + +#define RFAPI_MONITOR_ETH_W_ALLOC(rn) (RFAPI_IT_EXTRA_GET(rn)->u.vpn.mon_eth) + +#define RFAPI_MONITOR_VPN(rn) \ + ((rn)->aggregate ? RFAPI_IT_EXTRA_GET(rn)->u.vpn.v : NULL) + +#define RFAPI_MONITOR_VPN_W_ALLOC(rn) (RFAPI_IT_EXTRA_GET(rn)->u.vpn.v) + +#define RFAPI_MONITOR_ENCAP(rn) \ + ((rn)->aggregate ? RFAPI_IT_EXTRA_GET(rn)->u.encap.e : NULL) + +#define RFAPI_MONITOR_ENCAP_W_ALLOC(rn) (RFAPI_IT_EXTRA_GET(rn)->u.encap.e) + +#define RFAPI_MONITOR_EXTERIOR(rn) (&(RFAPI_IT_EXTRA_GET(rn)->u.vpn.e)) + +#define RFAPI_HAS_MONITOR_EXTERIOR(rn) (rn && rn->aggregate && \ + ((struct rfapi_it_extra *)(rn->aggregate))->u.vpn.e.source && \ + !skiplist_first(((struct rfapi_it_extra *)(rn->aggregate))-> \ + u.vpn.e.source, NULL, NULL)) + +extern void +rfapiMonitorLoopCheck (struct rfapi_monitor_vpn *mchain); + +extern void +rfapiMonitorCleanCheck (struct bgp *bgp); + +extern void +rfapiMonitorCheckAttachAllowed (void); + +extern void +rfapiMonitorExtraFlush (safi_t safi, struct route_node *rn); + +extern struct route_node * +rfapiMonitorGetAttachNode (struct rfapi_descriptor *rfd, struct prefix *p); + +extern void +rfapiMonitorAttachImportHd (struct rfapi_descriptor *rfd); + +extern struct route_node * +rfapiMonitorAdd ( + struct bgp *bgp, + struct rfapi_descriptor *rfd, + struct prefix *p); + +extern void +rfapiMonitorDetachImportHd (struct rfapi_descriptor *rfd); + +extern void +rfapiMonitorDel ( + struct bgp *bgp, + struct rfapi_descriptor *rfd, + struct prefix *p); + +extern int +rfapiMonitorDelHd (struct rfapi_descriptor *rfd); + +extern void +rfapiMonitorCallbacksOff (struct bgp *bgp); + +extern void +rfapiMonitorCallbacksOn (struct bgp *bgp); + +extern void +rfapiMonitorResponseRemovalOff (struct bgp *bgp); + +extern void +rfapiMonitorResponseRemovalOn (struct bgp *bgp); + +extern void +rfapiMonitorExtraPrune (safi_t safi, struct route_node *rn); + +extern void +rfapiMonitorTimersRestart (struct rfapi_descriptor *rfd, struct prefix *p); + +extern void +rfapiMonitorItNodeChanged ( + struct rfapi_import_table *import_table, + struct route_node *it_node, + struct rfapi_monitor_vpn *monitor_list); + +extern void +rfapiMonitorMovedUp ( + struct rfapi_import_table *import_table, + struct route_node *old_node, + struct route_node *new_node, + struct rfapi_monitor_vpn *monitor_list); + +extern struct route_node * +rfapiMonitorEthAdd ( + struct bgp *bgp, + struct rfapi_descriptor *rfd, + struct ethaddr *macaddr, + uint32_t logical_net_id); + +extern void +rfapiMonitorEthDel ( + struct bgp *bgp, + struct rfapi_descriptor *rfd, + struct ethaddr *macaddr, + uint32_t logical_net_id); + +#endif /* QUAGGA_HGP_RFAPI_MONITOR_H */ diff --git a/bgpd/rfapi/rfapi_nve_addr.c b/bgpd/rfapi/rfapi_nve_addr.c new file mode 100644 index 0000000000..ad34ff26c8 --- /dev/null +++ b/bgpd/rfapi/rfapi_nve_addr.c @@ -0,0 +1,175 @@ +/* + * + * Copyright 2009-2016, LabN Consulting, L.L.C. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + + +#include "lib/zebra.h" +#include "lib/prefix.h" +#include "lib/table.h" +#include "lib/vty.h" +#include "lib/memory.h" +#include "lib/skiplist.h" + + +#include "bgpd/bgpd.h" + +#include "bgpd/rfapi/bgp_rfapi_cfg.h" +#include "bgpd/rfapi/rfapi.h" +#include "bgpd/rfapi/rfapi_backend.h" + +#include "bgpd/rfapi/rfapi_import.h" +#include "bgpd/rfapi/rfapi_private.h" +#include "bgpd/rfapi/rfapi_nve_addr.h" +#include "bgpd/rfapi/rfapi_vty.h" + +#define DEBUG_NVE_ADDR 0 + +void rfapiNveAddr2Str (struct rfapi_nve_addr *, char *, int); + + +#if DEBUG_NVE_ADDR +static void +logdifferent (const char *tag, + struct rfapi_nve_addr *a, struct rfapi_nve_addr *b) +{ + char a_str[BUFSIZ]; + char b_str[BUFSIZ]; + + rfapiNveAddr2Str (a, a_str, BUFSIZ); + rfapiNveAddr2Str (b, b_str, BUFSIZ); + zlog_debug ("%s: [%s] [%s]", tag, a_str, b_str); +} +#endif + + +int +rfapi_nve_addr_cmp (void *k1, void *k2) +{ + struct rfapi_nve_addr *a = (struct rfapi_nve_addr *) k1; + struct rfapi_nve_addr *b = (struct rfapi_nve_addr *) k2; + int ret = 0; + + if (!a || !b) + { +#if DEBUG_NVE_ADDR + zlog_debug ("%s: missing address a=%p b=%p", __func__, a, b); +#endif + return (a - b); + } + if (a->un.addr_family != b->un.addr_family) + { +#if DEBUG_NVE_ADDR + zlog_debug ("diff: UN addr fam a->un.af=%d, b->un.af=%d", + a->un.addr_family, b->un.addr_family); +#endif + return (a->un.addr_family - b->un.addr_family); + } + if (a->un.addr_family == AF_INET) + { + ret = IPV4_ADDR_CMP (&a->un.addr.v4, &b->un.addr.v4); + if (ret != 0) + { +#if DEBUG_NVE_ADDR + logdifferent ("diff: UN addr", a, b); +#endif + return ret; + } + } + else if (a->un.addr_family == AF_INET6) + { + ret = IPV6_ADDR_CMP (&a->un.addr.v6, &b->un.addr.v6); + if (ret == 0) + { +#if DEBUG_NVE_ADDR + logdifferent ("diff: UN addr", a, b); +#endif + return ret; + } + } + else + { + assert (0); + } + if (a->vn.addr_family != b->vn.addr_family) + { +#if DEBUG_NVE_ADDR + zlog_debug ("diff: pT addr fam a->vn.af=%d, b->vn.af=%d", + a->vn.addr_family, b->vn.addr_family); +#endif + return (a->vn.addr_family - b->vn.addr_family); + } + if (a->vn.addr_family == AF_INET) + { + ret = IPV4_ADDR_CMP (&a->vn.addr.v4, &b->vn.addr.v4); + if (ret != 0) + { +#if DEBUG_NVE_ADDR + logdifferent ("diff: VN addr", a, b); +#endif + return ret; + } + } + else if (a->vn.addr_family == AF_INET6) + { + ret = IPV6_ADDR_CMP (&a->vn.addr.v6, &b->vn.addr.v6); + if (ret == 0) + { +#if DEBUG_NVE_ADDR + logdifferent ("diff: VN addr", a, b); +#endif + return ret; + } + } + else + { + assert (0); + } + return 0; +} + +void +rfapiNveAddr2Str (struct rfapi_nve_addr *na, char *buf, int bufsize) +{ + char *p = buf; + int r; + +#define REMAIN (bufsize - (p-buf)) +#define INCP {p += (r > REMAIN)? REMAIN: r;} + + if (bufsize < 1) + return; + + r = snprintf (p, REMAIN, "VN="); + INCP; + + if (!rfapiRfapiIpAddr2Str (&na->vn, p, REMAIN)) + goto done; + + buf[bufsize - 1] = 0; + p = buf + strlen (buf); + + r = snprintf (p, REMAIN, ", UN="); + INCP; + + rfapiRfapiIpAddr2Str (&na->un, p, REMAIN); + +done: + buf[bufsize - 1] = 0; +} diff --git a/bgpd/rfapi/rfapi_nve_addr.h b/bgpd/rfapi/rfapi_nve_addr.h new file mode 100644 index 0000000000..2b2d2b50d4 --- /dev/null +++ b/bgpd/rfapi/rfapi_nve_addr.h @@ -0,0 +1,43 @@ +/* + * + * Copyright 2009-2016, LabN Consulting, L.L.C. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#ifndef _QUAGGA_BGP_RFAPI_NVE_ADDR_H +#define _QUAGGA_BGP_RFAPI_NVE_ADDR_H + +#include "rfapi.h" + +struct rfapi_nve_addr +{ + struct rfapi_ip_addr vn; + struct rfapi_ip_addr un; + void *info; +}; + + +extern int +rfapi_nve_addr_cmp (void *k1, void *k2); + +extern void +rfapiNveAddr2Str (struct rfapi_nve_addr *na, char *buf, int bufsize); + + + +#endif /* _QUAGGA_BGP_RFAPI_NVE_ADDR_H */ diff --git a/bgpd/rfapi/rfapi_private.h b/bgpd/rfapi/rfapi_private.h new file mode 100644 index 0000000000..33390c4f55 --- /dev/null +++ b/bgpd/rfapi/rfapi_private.h @@ -0,0 +1,455 @@ +/* + * + * Copyright 2009-2016, LabN Consulting, L.L.C. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +/* + * Internal definitions for RFAPI. Not for use by other code + */ + +#ifndef _QUAGGA_BGP_RFAPI_PRIVATE_H +#define _QUAGGA_BGP_RFAPI_PRIVATE_H + +#include "lib/linklist.h" +#include "lib/skiplist.h" +#include "lib/workqueue.h" + +#include "bgpd/bgp_attr.h" + +#include "rfapi.h" + +/* + * RFAPI Advertisement Data Block + * + * Holds NVE prefix advertisement information + */ +struct rfapi_adb +{ + struct prefix prefix_ip; + struct prefix prefix_eth; /* now redundant with l2o */ + struct prefix_rd prd; + uint32_t lifetime; + uint8_t cost; + struct rfapi_l2address_option l2o; +}; + +/* + * Lists of rfapi_adb. Each rfapi_adb is referenced twice: + * + * 1. each is referenced in by_lifetime + * 2. each is referenced by exactly one of: ipN_by_prefix, ip0_by_ether + */ +struct rfapi_advertised_prefixes +{ + struct skiplist *ipN_by_prefix; /* all except 0/32, 0/128 */ + struct skiplist *ip0_by_ether; /* ip prefix 0/32, 0/128 */ + struct skiplist *by_lifetime; /* all */ +}; + + +struct rfapi_descriptor +{ + struct route_node *un_node; /* backref to un table */ + + struct rfapi_descriptor *next; /* next vn_addr */ + + /* supplied by client */ + struct bgp *bgp; /* from rfp_start_val */ + struct rfapi_ip_addr vn_addr; + struct rfapi_ip_addr un_addr; + rfapi_response_cb_t *response_cb; /* override per-bgp response_cb */ + void *cookie; /* for callbacks */ + struct rfapi_tunneltype_option default_tunneltype_option; + + /* supplied by matched configuration */ + struct prefix_rd rd; + struct ecommunity *rt_export_list; + uint32_t response_lifetime; + + /* list of prefixes currently being advertised by this nve */ + struct rfapi_advertised_prefixes advertised; + + time_t open_time; + + uint32_t max_prefix_lifetime; + uint32_t min_prefix_lifetime; + + /* reference to this nve's import table */ + struct rfapi_import_table *import_table; + + uint32_t monitor_count; + struct route_table *mon; /* rfapi_monitors */ + struct skiplist *mon_eth; /* ethernet monitors */ + + /* + * rib RIB as seen by NVE + * rib_pending RIB containing nodes with updated info chains + * rsp_times last time we sent response containing pfx + */ + uint32_t rib_prefix_count; /* pfxes with routes */ + struct route_table *rib[AFI_MAX]; + struct route_table *rib_pending[AFI_MAX]; + struct work_queue *updated_responses_queue; + struct route_table *rsp_times[AFI_MAX]; + + uint32_t rsp_counter; /* dedup initial rsp */ + time_t rsp_time; /* dedup initial rsp */ + time_t ftd_last_allowed_time; /* FTD filter */ + + unsigned int stat_count_nh_reachable; + unsigned int stat_count_nh_removal; + + /* + * points to the original nve group structure that matched + * when this nve_descriptor was created. We use this pointer + * in rfapi_close() to find the nve group structure and + * delete its reference back to us. + * + * If the nve group structure is deleted (via configuration + * change) while this nve_descriptor exists, this rfg pointer + * will be set to NULL. + */ + struct rfapi_nve_group_cfg *rfg; + + /* + * This ~7kB structure is here to permit multiple routes for + * a prefix to be injected to BGP. There are at least two + * situations where such conditions obtain: + * + * When an VNC route is exported to BGP on behalf of the set of + * NVEs that belong to the export NVE group, it is replicated + * so that there is one route per NVE (and the route's nexthop + * is the NVE's VN address). + * + * Each of these routes being injected to BGP must have a distinct + * peer pointer (otherwise, if they have the same peer pointer, each + * route will be considered an implicit waithdraw of the previous + * route injected from that peer, and the new route will replace + * rather than augment the old one(s)). + */ + struct peer *peer; + + uint32_t flags; +#define RFAPI_HD_FLAG_CALLBACK_SCHEDULED_AFI_IP 0x00000001 +#define RFAPI_HD_FLAG_CALLBACK_SCHEDULED_AFI_IP6 0x00000002 +#define RFAPI_HD_FLAG_CALLBACK_SCHEDULED_AFI_ETHER 0x00000004 +#define RFAPI_HD_FLAG_PROVISIONAL 0x00000008 +#define RFAPI_HD_FLAG_CLOSING_ADMINISTRATIVELY 0x00000010 +}; + +#define RFAPI_QUEUED_FLAG(afi) ( \ + ((afi) == AFI_IP)? RFAPI_HD_FLAG_CALLBACK_SCHEDULED_AFI_IP: \ + (((afi) == AFI_IP6)? RFAPI_HD_FLAG_CALLBACK_SCHEDULED_AFI_IP6: \ + (((afi) == AFI_ETHER)? RFAPI_HD_FLAG_CALLBACK_SCHEDULED_AFI_ETHER: \ + (assert(0), 0) ))) + + +struct rfapi_global_stats +{ + time_t last_reset; + unsigned int max_descriptors; + + unsigned int count_unknown_nves; + + unsigned int count_queries; + unsigned int count_queries_failed; + + unsigned int max_responses; /* semantics? */ + + unsigned int count_registrations; + unsigned int count_registrations_failed; + + unsigned int count_updated_response_updates; + unsigned int count_updated_response_deletes; +}; + +/* + * There is one of these per BGP instance. + * + * Radix tree is indexed by un address; follow chain and + * check vn address to get exact match. + */ +struct rfapi +{ + struct route_table un[AFI_MAX]; + struct rfapi_import_table *imports; /* IPv4, IPv6 */ + struct list descriptors;/* debug & resolve-nve imports */ + + struct rfapi_global_stats stat; + + /* + * callbacks into RFP, set at startup time (bgp_rfapi_new() gets + * values from rfp_start()) or via rfapi_rfp_set_cb_methods() + * (otherwise NULL). Note that the response_cb method can also + * be overridden per-rfd (currently used only for debug/test scenarios) + */ + struct rfapi_rfp_cb_methods rfp_methods; + + /* + * Import tables for Ethernet over IPSEC + * + * The skiplist keys are LNIs. Values are pointers + * to struct rfapi_import_table. + */ + struct skiplist *import_mac; /* L2 */ + + /* + * when exporting plain routes ("registered-nve" mode) to + * bgp unicast or zebra, we need to keep track of information + * related to expiring the routes according to the VNC lifetime + */ + struct route_table *rt_export_bgp[AFI_MAX]; + struct route_table *rt_export_zebra[AFI_MAX]; + + /* + * For VNC->BGP unicast exports in CE mode, we need a + * routing table that collects all of the VPN routes + * in a single tree. The VPN rib is split up according + * to RD first, so we can't use that. This is an import + * table that matches all RTs. + */ + struct rfapi_import_table *it_ce; + + /* + * when importing bgp-direct routes in resolve-nve mode, + * this list maps unicast route nexthops to their bgp_infos + * in the unicast table + */ + struct skiplist *resolve_nve_nexthop; + + /* + * Descriptors for which rfapi_close() was called during a callback. + * They will be closed after the callback finishes. + */ + struct work_queue *deferred_close_q; + + /* + * For "show vnc responses" + */ + uint32_t response_immediate_count; + uint32_t response_updated_count; + uint32_t monitor_count; + + uint32_t rib_prefix_count_total; + uint32_t rib_prefix_count_total_max; + + uint32_t flags; +#define RFAPI_INCALLBACK 0x00000001 + void *rfp; /* from rfp_start */ +}; + +#define RFAPI_RIB_PREFIX_COUNT_INCR(rfd, rfapi) do { \ + ++(rfd)->rib_prefix_count; \ + ++(rfapi)->rib_prefix_count_total; \ + if ((rfapi)->rib_prefix_count_total > (rfapi)->rib_prefix_count_total_max) \ + ++(rfapi)->rib_prefix_count_total_max; \ + } while (0) + +#define RFAPI_RIB_PREFIX_COUNT_DECR(rfd, rfapi) do { \ + --(rfd)->rib_prefix_count; \ + --(rfapi)->rib_prefix_count_total; \ + } while (0) + +#define RFAPI_0_PREFIX(prefix) ( \ + (((prefix)->family == AF_INET)? (prefix)->u.prefix4.s_addr == 0: \ + (((prefix)->family == AF_INET6)? \ + (IN6_IS_ADDR_UNSPECIFIED(&(prefix)->u.prefix6)) : 0)) \ +) + +#define RFAPI_0_ETHERADDR(ea) ( \ + ((ea)->octet[0] | (ea)->octet[1] | (ea)->octet[2] | \ + (ea)->octet[3] | (ea)->octet[4] | (ea)->octet[5]) == 0) + +#define RFAPI_HOST_PREFIX(prefix) ( \ + ((prefix)->family == AF_INET)? ((prefix)->prefixlen == 32): \ + (((prefix)->family == AF_INET6)? ((prefix)->prefixlen == 128): 0) ) + +extern void +rfapiQprefix2Rprefix ( + struct prefix *qprefix, + struct rfapi_ip_prefix *rprefix); + +extern int +rfapi_find_rfd ( + struct bgp *bgp, + struct rfapi_ip_addr *vn_addr, + struct rfapi_ip_addr *un_addr, + struct rfapi_descriptor **rfd); + +extern void +add_vnc_route ( + struct rfapi_descriptor *rfd, /* cookie + UN addr for VPN */ + struct bgp *bgp, + int safi, + struct prefix *p, + struct prefix_rd *prd, + struct rfapi_ip_addr *nexthop, + uint32_t *local_pref, /* host byte order */ + uint32_t *lifetime, /* host byte order */ + struct bgp_tea_options *rfp_options, + struct rfapi_un_option *options_un, + struct rfapi_vn_option *options_vn, + struct ecommunity *rt_export_list, + uint32_t *med, + uint32_t *label, + uint8_t type, + uint8_t sub_type, + int flags); +#define RFAPI_AHR_NO_TUNNEL_SUBTLV 0x00000001 +#define RFAPI_AHR_RFPOPT_IS_VNCTLV 0x00000002 /* hack! */ +#if 0 /* unused? */ +# define RFAPI_AHR_SET_PFX_TO_NEXTHOP 0x00000004 +#endif + +extern void +del_vnc_route ( + struct rfapi_descriptor *rfd, + struct peer *peer, + struct bgp *bgp, + safi_t safi, + struct prefix *p, + struct prefix_rd *prd, + uint8_t type, + uint8_t sub_type, + struct rfapi_nexthop *lnh, + int kill); + +extern int +rfapiCliGetPrefixAddr (struct vty *vty, const char *str, struct prefix *p); + +extern int +rfapiGetVncLifetime (struct attr *attr, uint32_t * lifetime); + +extern int +rfapiGetTunnelType (struct attr *attr, bgp_encap_types *type); + +extern int +rfapiGetVncTunnelUnAddr (struct attr *attr, struct prefix *p); + +extern int +rfapi_reopen (struct rfapi_descriptor *rfd, struct bgp *bgp); + +extern void +vnc_import_bgp_add_rfp_host_route_mode_resolve_nve ( + struct bgp *bgp, + struct rfapi_descriptor *rfd, + struct prefix *prefix); + +extern void +vnc_import_bgp_del_rfp_host_route_mode_resolve_nve ( + struct bgp *bgp, + struct rfapi_descriptor *rfd, + struct prefix *prefix); + +extern void +rfapiFreeBgpTeaOptionChain (struct bgp_tea_options *p); + +extern struct rfapi_vn_option * +rfapiVnOptionsDup (struct rfapi_vn_option *orig); + +extern struct rfapi_un_option * +rfapiUnOptionsDup (struct rfapi_un_option *orig); + +extern struct bgp_tea_options * +rfapiOptionsDup (struct bgp_tea_options *orig); + +extern int +rfapi_ip_addr_cmp (struct rfapi_ip_addr *a1, struct rfapi_ip_addr *a2); + +extern uint32_t +rfp_cost_to_localpref (uint8_t cost); + +extern int +rfapi_set_autord_from_vn (struct prefix_rd *rd, struct rfapi_ip_addr *vn); + +extern void +rfapiAdbFree (struct rfapi_adb *adb); + +extern struct rfapi_nexthop * +rfapi_nexthop_new (struct rfapi_nexthop *copyme); + +extern void +rfapi_nexthop_free (void *goner); + +extern struct rfapi_vn_option * +rfapi_vn_options_dup (struct rfapi_vn_option *existing); + +extern void +rfapi_un_options_free (struct rfapi_un_option *goner); + +extern void +rfapi_vn_options_free (struct rfapi_vn_option *goner); + +/*------------------------------------------ + * rfapi_extract_l2o + * + * Find Layer 2 options in an option chain + * + * input: + * pHop option chain + * + * output: + * l2o layer 2 options extracted + * + * return value: + * 0 OK + * 1 no options found + * + --------------------------------------------*/ +extern int +rfapi_extract_l2o ( + struct bgp_tea_options *pHop, /* chain of options */ + struct rfapi_l2address_option *l2o); /* return extracted value */ + +/* + * compaitibility to old quagga_time call + * time_t value in terms of stabilised absolute time. + * replacement for POSIX time() + */ +extern time_t rfapi_time (time_t *t); + +DECLARE_MGROUP(RFAPI) +DECLARE_MTYPE(RFAPI_CFG) +DECLARE_MTYPE(RFAPI_GROUP_CFG) +DECLARE_MTYPE(RFAPI_L2_CFG) +DECLARE_MTYPE(RFAPI_RFP_GROUP_CFG) +DECLARE_MTYPE(RFAPI) +DECLARE_MTYPE(RFAPI_DESC) +DECLARE_MTYPE(RFAPI_IMPORTTABLE) +DECLARE_MTYPE(RFAPI_MONITOR) +DECLARE_MTYPE(RFAPI_MONITOR_ENCAP) +DECLARE_MTYPE(RFAPI_NEXTHOP) +DECLARE_MTYPE(RFAPI_VN_OPTION) +DECLARE_MTYPE(RFAPI_UN_OPTION) +DECLARE_MTYPE(RFAPI_WITHDRAW) +DECLARE_MTYPE(RFAPI_RFG_NAME) +DECLARE_MTYPE(RFAPI_ADB) +DECLARE_MTYPE(RFAPI_ETI) +DECLARE_MTYPE(RFAPI_NVE_ADDR) +DECLARE_MTYPE(RFAPI_PREFIX_BAG) +DECLARE_MTYPE(RFAPI_IT_EXTRA) +DECLARE_MTYPE(RFAPI_INFO) +DECLARE_MTYPE(RFAPI_ADDR) +DECLARE_MTYPE(RFAPI_UPDATED_RESPONSE_QUEUE) +DECLARE_MTYPE(RFAPI_RECENT_DELETE) +DECLARE_MTYPE(RFAPI_L2ADDR_OPT) +DECLARE_MTYPE(RFAPI_AP) +DECLARE_MTYPE(RFAPI_MONITOR_ETH) + +#endif /* _QUAGGA_BGP_RFAPI_PRIVATE_H */ diff --git a/bgpd/rfapi/rfapi_rib.c b/bgpd/rfapi/rfapi_rib.c new file mode 100644 index 0000000000..896b5f50a8 --- /dev/null +++ b/bgpd/rfapi/rfapi_rib.c @@ -0,0 +1,2531 @@ +/* + * + * Copyright 2009-2016, LabN Consulting, L.L.C. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +/* + * File: rfapi_rib.c + * Purpose: maintain per-nve ribs and generate change lists + */ + +#include + +#include "lib/zebra.h" +#include "lib/prefix.h" +#include "lib/table.h" +#include "lib/vty.h" +#include "lib/memory.h" +#include "lib/log.h" +#include "lib/skiplist.h" +#include "lib/workqueue.h" + +#include "bgpd/bgpd.h" +#include "bgpd/bgp_route.h" +#include "bgpd/bgp_ecommunity.h" +#include "bgpd/bgp_mplsvpn.h" +#include "bgpd/bgp_vnc_types.h" + +#include "bgpd/rfapi/rfapi.h" +#include "bgpd/rfapi/bgp_rfapi_cfg.h" +#include "bgpd/rfapi/rfapi_import.h" +#include "bgpd/rfapi/rfapi_private.h" +#include "bgpd/rfapi/rfapi_vty.h" +#include "bgpd/rfapi/vnc_import_bgp.h" +#include "bgpd/rfapi/rfapi_rib.h" +#include "bgpd/rfapi/rfapi_monitor.h" +#include "bgpd/rfapi/rfapi_encap_tlv.h" + +#define DEBUG_PROCESS_PENDING_NODE 0 +#define DEBUG_PENDING_DELETE_ROUTE 0 +#define DEBUG_NHL 0 +#define DEBUG_RIB_SL_RD 0 + +/* forward decl */ +#if DEBUG_NHL +static void +rfapiRibShowRibSl (void *stream, struct prefix *pfx, struct skiplist *sl); +#endif + +/* + * RIB + * --- + * Model of the set of routes currently in the NVE's RIB. + * + * node->info ptr to "struct skiplist". + * MUST be NULL if there are no routes. + * key = ptr to struct prefix {vn} + * val = ptr to struct rfapi_info + * skiplist.del = NULL + * skiplist.cmp = vnc_prefix_cmp + * + * node->aggregate ptr to "struct skiplist". + * key = ptr to struct prefix {vn} + * val = ptr to struct rfapi_info + * skiplist.del = rfapi_info_free + * skiplist.cmp = vnc_prefix_cmp + * + * This skiplist at "aggregate" + * contains the routes recently + * deleted + * + * + * Pending RIB + * ----------- + * Sparse list of prefixes that need to be updated. Each node + * will have the complete set of routes for the prefix. + * + * node->info ptr to "struct list" (lib/linklist.h) + * "Cost List" + * List of routes sorted lowest cost first. + * This list is how the new complete set + * of routes should look. + * Set if there are updates to the prefix; + * MUST be NULL if there are no updates. + * + * .data = ptr to struct rfapi_info + * list.cmp = NULL (sorted manually) + * list.del = rfapi_info_free + * + * Special case: if node->info is 1, it means + * "delete all routes at this prefix". + * + * node->aggregate ptr to struct skiplist + * key = ptr to struct prefix {vn} (part of ri) + * val = struct rfapi_info + * skiplist.cmp = vnc_prefix_cmp + * skiplist.del = NULL + * + * ptlist is rewritten anew each time + * rfapiRibUpdatePendingNode() is called + * + * THE ptlist VALUES ARE REFERENCES TO THE + * rfapi_info STRUCTS IN THE node->info LIST. + */ + +/* + * iterate over RIB to count responses, compare with running counters + */ +void +rfapiRibCheckCounts ( + int checkstats, /* validate rfd & global counts */ + unsigned int offset) /* number of ri's held separately */ +{ + struct rfapi_descriptor *rfd; + struct listnode *node; + + struct bgp *bgp = bgp_get_default (); + + uint32_t t_pfx_active = 0; + uint32_t t_pfx_deleted = 0; + + uint32_t t_ri_active = 0; + uint32_t t_ri_deleted = 0; + uint32_t t_ri_pend = 0; + + unsigned int alloc_count; + + /* + * loop over NVEs + */ + for (ALL_LIST_ELEMENTS_RO (&bgp->rfapi->descriptors, node, rfd)) + { + + afi_t afi; + uint32_t pfx_active = 0; + uint32_t pfx_deleted = 0; + + for (afi = AFI_IP; afi < AFI_MAX; ++afi) + { + + struct route_node *rn; + + for (rn = route_top (rfd->rib[afi]); rn; rn = route_next (rn)) + { + + struct skiplist *sl = rn->info; + struct skiplist *dsl = rn->aggregate; + uint32_t ri_active = 0; + uint32_t ri_deleted = 0; + + if (sl) + { + ri_active = skiplist_count (sl); + assert (ri_active); + t_ri_active += ri_active; + ++pfx_active; + ++t_pfx_active; + } + + if (dsl) + { + ri_deleted = skiplist_count (dsl); + t_ri_deleted += ri_deleted; + ++pfx_deleted; + ++t_pfx_deleted; + } + } + for (rn = route_top (rfd->rib_pending[afi]); rn; + rn = route_next (rn)) + { + + struct list *l = rn->info; /* sorted by cost */ + struct skiplist *sl = rn->aggregate; + uint32_t ri_pend_cost = 0; + uint32_t ri_pend_uniq = 0; + + if (sl) + { + ri_pend_uniq = skiplist_count (sl); + } + + if (l && (l != (void *) 1)) + { + ri_pend_cost = l->count; + t_ri_pend += l->count; + } + + assert (ri_pend_uniq == ri_pend_cost); + } + } + + if (checkstats) + { + if (pfx_active != rfd->rib_prefix_count) + { + zlog_debug ("%s: rfd %p actual pfx count %u != running %u", + __func__, rfd, pfx_active, rfd->rib_prefix_count); + assert (0); + } + } + } + + if (checkstats && bgp && bgp->rfapi) + { + if (t_pfx_active != bgp->rfapi->rib_prefix_count_total) + { + zlog_debug ("%s: actual total pfx count %u != running %u", + __func__, t_pfx_active, + bgp->rfapi->rib_prefix_count_total); + assert (0); + } + } + + /* + * Check against memory allocation count + */ + alloc_count = mtype_stats_alloc (MTYPE_RFAPI_INFO); + assert (t_ri_active + t_ri_deleted + t_ri_pend + offset == alloc_count); +} + +static struct rfapi_info * +rfapi_info_new () +{ + return XCALLOC (MTYPE_RFAPI_INFO, sizeof (struct rfapi_info)); +} + +void +rfapiFreeRfapiUnOptionChain (struct rfapi_un_option *p) +{ + while (p) + { + struct rfapi_un_option *next; + + next = p->next; + XFREE (MTYPE_RFAPI_UN_OPTION, p); + p = next; + } +} + +void +rfapiFreeRfapiVnOptionChain (struct rfapi_vn_option *p) +{ + while (p) + { + struct rfapi_vn_option *next; + + next = p->next; + XFREE (MTYPE_RFAPI_VN_OPTION, p); + p = next; + } +} + + +static void +rfapi_info_free (struct rfapi_info *goner) +{ + if (goner) + { + if (goner->tea_options) + { + rfapiFreeBgpTeaOptionChain (goner->tea_options); + goner->tea_options = NULL; + } + if (goner->un_options) + { + rfapiFreeRfapiUnOptionChain (goner->un_options); + goner->un_options = NULL; + } + if (goner->vn_options) + { + rfapiFreeRfapiVnOptionChain (goner->vn_options); + goner->vn_options = NULL; + } + if (goner->timer) + { + struct rfapi_rib_tcb *tcb; + + tcb = ((struct thread *) goner->timer)->arg; + thread_cancel ((struct thread *) goner->timer); + XFREE (MTYPE_RFAPI_RECENT_DELETE, tcb); + goner->timer = NULL; + } + XFREE (MTYPE_RFAPI_INFO, goner); + } +} + +/* + * Timer control block for recently-deleted and expired routes + */ +struct rfapi_rib_tcb +{ + struct rfapi_descriptor *rfd; + struct skiplist *sl; + struct rfapi_info *ri; + struct route_node *rn; + int flags; +#define RFAPI_RIB_TCB_FLAG_DELETED 0x00000001 +}; + +/* + * remove route from rib + */ +static int +rfapiRibExpireTimer (struct thread *t) +{ + struct rfapi_rib_tcb *tcb = t->arg; + + RFAPI_RIB_CHECK_COUNTS (1, 0); + + /* + * Forget reference to thread. Otherwise rfapi_info_free() will + * attempt to free thread pointer as an option chain + */ + tcb->ri->timer = NULL; + + /* "deleted" skiplist frees ri, "active" doesn't */ + assert (!skiplist_delete (tcb->sl, &tcb->ri->rk, NULL)); + if (!tcb->sl->del) + { + /* + * XXX in this case, skiplist has no delete function: we must + * therefore delete rfapi_info explicitly. + */ + rfapi_info_free (tcb->ri); + } + + if (skiplist_empty (tcb->sl)) + { + if (CHECK_FLAG (tcb->flags, RFAPI_RIB_TCB_FLAG_DELETED)) + tcb->rn->aggregate = NULL; + else + { + struct bgp *bgp = bgp_get_default (); + tcb->rn->info = NULL; + RFAPI_RIB_PREFIX_COUNT_DECR (tcb->rfd, bgp->rfapi); + } + skiplist_free (tcb->sl); + route_unlock_node (tcb->rn); + } + + XFREE (MTYPE_RFAPI_RECENT_DELETE, tcb); + + RFAPI_RIB_CHECK_COUNTS (1, 0); + + return 0; +} + +static void +rfapiRibStartTimer ( + struct rfapi_descriptor *rfd, + struct rfapi_info *ri, + struct route_node *rn, /* route node attached to */ + int deleted) +{ + struct thread *t = ri->timer; + struct rfapi_rib_tcb *tcb = NULL; + char buf_prefix[BUFSIZ]; + + if (t) + { + tcb = t->arg; + thread_cancel (t); + ri->timer = NULL; + } + else + { + tcb = + XCALLOC (MTYPE_RFAPI_RECENT_DELETE, sizeof (struct rfapi_rib_tcb)); + } + tcb->rfd = rfd; + tcb->ri = ri; + tcb->rn = rn; + if (deleted) + { + tcb->sl = (struct skiplist *) rn->aggregate; + SET_FLAG (tcb->flags, RFAPI_RIB_TCB_FLAG_DELETED); + } + else + { + tcb->sl = (struct skiplist *) rn->info; + UNSET_FLAG (tcb->flags, RFAPI_RIB_TCB_FLAG_DELETED); + } + + prefix2str (&rn->p, buf_prefix, BUFSIZ); + zlog_debug ("%s: rfd %p pfx %s life %u", __func__, rfd, buf_prefix, + ri->lifetime); + ri->timer = thread_add_timer (bm->master, rfapiRibExpireTimer, + tcb, ri->lifetime); + assert (ri->timer); +} + +/* + * Compares two s + */ +static int +rfapi_rib_key_cmp (void *k1, void *k2) +{ + struct rfapi_rib_key *a = (struct rfapi_rib_key *) k1; + struct rfapi_rib_key *b = (struct rfapi_rib_key *) k2; + int ret; + + if (!a || !b) + return (a - b); + + ret = vnc_prefix_cmp (&a->vn, &b->vn); + if (ret) + return ret; + + ret = vnc_prefix_cmp(&a->rd, &b->rd); + if (ret) + return ret; + + ret = vnc_prefix_cmp (&a->aux_prefix, &b->aux_prefix); + + return ret; +} + + +/* + * Note: this function will claim that two option chains are + * different unless their option items are in identical order. + * The consequence is that RFP updated responses can be sent + * unnecessarily, or that they might contain nexthop items + * that are not strictly needed. + * + * This function could be modified to compare option chains more + * thoroughly, but it's not clear that the extra compuation would + * be worth it. + */ +static int +bgp_tea_options_cmp (struct bgp_tea_options *a, struct bgp_tea_options *b) +{ + int rc; + + if (!a || !b) + { + return (a - b); + } + + if (a->type != b->type) + return (a->type - b->type); + if (a->length != b->length) + return (a->length = b->length); + if ((rc = memcmp (a->value, b->value, a->length))) + return rc; + if (!a->next != !b->next) + { /* logical xor */ + return (a->next - b->next); + } + if (a->next) + return bgp_tea_options_cmp (a->next, b->next); + return 0; + +} + +static int +rfapi_info_cmp (struct rfapi_info *a, struct rfapi_info *b) +{ + int rc; + + if (!a || !b) + return (a - b); + + if ((rc = rfapi_rib_key_cmp (&a->rk, &b->rk))) + return rc; + + if ((rc = vnc_prefix_cmp (&a->un, &b->un))) + return rc; + + if (a->cost != b->cost) + return (a->cost - b->cost); + + if (a->lifetime != b->lifetime) + return (a->lifetime - b->lifetime); + + if ((rc = bgp_tea_options_cmp (a->tea_options, b->tea_options))) + return rc; + + return 0; +} + +void +rfapiRibClear (struct rfapi_descriptor *rfd) +{ + struct bgp *bgp = bgp_get_default (); + afi_t afi; + +#if DEBUG_L2_EXTRA + zlog_debug ("%s: rfd=%p", __func__, rfd); +#endif + + for (afi = AFI_IP; afi < AFI_MAX; ++afi) + { + struct route_node *pn; + struct route_node *rn; + + if (rfd->rib_pending[afi]) + { + for (pn = route_top (rfd->rib_pending[afi]); pn; + pn = route_next (pn)) + { + if (pn->aggregate) + { + /* + * free references into the rfapi_info structures before + * freeing the structures themselves + */ + skiplist_free ((struct skiplist *) (pn->aggregate)); + pn->aggregate = NULL; + route_unlock_node (pn); /* skiplist deleted */ + } + /* + * free the rfapi_info structures + */ + if (pn->info) + { + if (pn->info != (void *) 1) + { + list_delete ((struct list *) (pn->info)); + } + pn->info = NULL; + route_unlock_node (pn); /* linklist or 1 deleted */ + } + } + } + if (rfd->rib[afi]) + { + for (rn = route_top (rfd->rib[afi]); rn; rn = route_next (rn)) + { + if (rn->info) + { + + struct rfapi_info *ri; + + while (0 == + skiplist_first ((struct skiplist *) rn->info, NULL, + (void **) &ri)) + { + + rfapi_info_free (ri); + skiplist_delete_first ((struct skiplist *) rn->info); + } + skiplist_free ((struct skiplist *) rn->info); + rn->info = NULL; + route_unlock_node (rn); + RFAPI_RIB_PREFIX_COUNT_DECR (rfd, bgp->rfapi); + } + if (rn->aggregate) + { + + struct rfapi_info *ri_del; + + /* delete skiplist & contents */ + while (!skiplist_first ((struct skiplist *) (rn->aggregate), + NULL, (void **) &ri_del)) + { + + /* sl->del takes care of ri_del */ + skiplist_delete_first ( + (struct skiplist *) (rn->aggregate)); + } + skiplist_free ((struct skiplist *) (rn->aggregate)); + + rn->aggregate = NULL; + route_unlock_node (rn); + } + } + } + } + if (rfd->updated_responses_queue) + { + work_queue_free (rfd->updated_responses_queue); + rfd->updated_responses_queue = NULL; + } +} + +/* + * Release all dynamically-allocated memory that is part of an HD's RIB + */ +void +rfapiRibFree (struct rfapi_descriptor *rfd) +{ + afi_t afi; + + + /* + * NB rfd is typically detached from master list, so is not included + * in the count performed by RFAPI_RIB_CHECK_COUNTS + */ + + /* + * Free routes attached to radix trees + */ + rfapiRibClear (rfd); + + /* Now the uncounted rfapi_info's are freed, so the check should succeed */ + RFAPI_RIB_CHECK_COUNTS (1, 0); + + /* + * Free radix trees + */ + for (afi = AFI_IP; afi < AFI_MAX; ++afi) + { + route_table_finish (rfd->rib_pending[afi]); + rfd->rib_pending[afi] = NULL; + + route_table_finish (rfd->rib[afi]); + rfd->rib[afi] = NULL; + + /* NB route_table_finish frees only prefix nodes, not chained info */ + route_table_finish (rfd->rsp_times[afi]); + rfd->rib[afi] = NULL; + } +} + +/* + * Copies struct bgp_info to struct rfapi_info, except for rk fields and un + */ +static void +rfapiRibBi2Ri( + struct bgp_info *bi, + struct rfapi_info *ri, + uint32_t lifetime) +{ + struct bgp_attr_encap_subtlv *pEncap; + + ri->cost = rfapiRfpCost (bi->attr); + ri->lifetime = lifetime; + + /* This loop based on rfapiRouteInfo2NextHopEntry() */ + for (pEncap = bi->attr->extra->vnc_subtlvs; pEncap; pEncap = pEncap->next) + { + struct bgp_tea_options *hop; + + switch (pEncap->type) + { + case BGP_VNC_SUBTLV_TYPE_LIFETIME: + /* use configured lifetime, not attr lifetime */ + break; + + case BGP_VNC_SUBTLV_TYPE_RFPOPTION: + hop = XCALLOC (MTYPE_BGP_TEA_OPTIONS, + sizeof (struct bgp_tea_options)); + assert (hop); + hop->type = pEncap->value[0]; + hop->length = pEncap->value[1]; + hop->value = XCALLOC (MTYPE_BGP_TEA_OPTIONS_VALUE, + pEncap->length - 2); + assert (hop->value); + memcpy (hop->value, pEncap->value + 2, pEncap->length - 2); + if (hop->length > pEncap->length - 2) + { + zlog_warn ("%s: VNC subtlv length mismatch: " + "RFP option says %d, attr says %d " + "(shrinking)", + __func__, hop->length, pEncap->length - 2); + hop->length = pEncap->length - 2; + } + hop->next = ri->tea_options; + ri->tea_options = hop; + break; + + default: + break; + } + } + + rfapi_un_options_free (ri->un_options); /* maybe free old version */ + ri->un_options = rfapi_encap_tlv_to_un_option (bi->attr); + + /* + * VN options + */ + if (bi->extra && + decode_rd_type(bi->extra->vnc.import.rd.val) == RD_TYPE_VNC_ETH) + { + /* ethernet route */ + + struct rfapi_vn_option *vo; + + vo = XCALLOC (MTYPE_RFAPI_VN_OPTION, sizeof (struct rfapi_vn_option)); + assert (vo); + + vo->type = RFAPI_VN_OPTION_TYPE_L2ADDR; + + /* copy from RD already stored in bi, so we don't need it_node */ + memcpy (&vo->v.l2addr.macaddr, bi->extra->vnc.import.rd.val+2, + ETHER_ADDR_LEN); + + if (bi->attr && bi->attr->extra) + { + (void) rfapiEcommunityGetLNI (bi->attr->extra->ecommunity, + &vo->v.l2addr.logical_net_id); + } + + /* local_nve_id comes from RD */ + vo->v.l2addr.local_nve_id = bi->extra->vnc.import.rd.val[1]; + + /* label comes from MP_REACH_NLRI label */ + vo->v.l2addr.label = decode_label (bi->extra->tag); + + rfapi_vn_options_free (ri->vn_options); /* maybe free old version */ + ri->vn_options = vo; + } + + /* + * If there is an auxiliary IP address (L2 can have it), copy it + */ + if (bi && bi->extra && bi->extra->vnc.import.aux_prefix.family) + { + ri->rk.aux_prefix = bi->extra->vnc.import.aux_prefix; + } +} + +/* + * rfapiRibPreloadBi + * + * Install route into NVE RIB model so as to be consistent with + * caller's response to rfapi_query(). + * + * Also: return indication to caller whether this specific route + * should be included in the response to the NVE according to + * the following tests: + * + * 1. If there were prior duplicates of this route in this same + * query response, don't include the route. + * + * RETURN VALUE: + * + * 0 OK to include route in response + * !0 do not include route in response + */ +int +rfapiRibPreloadBi( + struct route_node *rfd_rib_node, /* NULL = don't preload or filter */ + struct prefix *pfx_vn, + struct prefix *pfx_un, + uint32_t lifetime, + struct bgp_info *bi) +{ + struct rfapi_descriptor *rfd; + struct skiplist *slRibPt = NULL; + struct rfapi_info *ori = NULL; + struct rfapi_rib_key rk; + struct route_node *trn; + afi_t afi; + + if (!rfd_rib_node) + return 0; + + afi = family2afi(rfd_rib_node->p.family); + + rfd = (struct rfapi_descriptor *)(rfd_rib_node->table->info); + + memset((void *)&rk, 0, sizeof(rk)); + rk.vn = *pfx_vn; + rk.rd = bi->extra->vnc.import.rd; + + /* + * If there is an auxiliary IP address (L2 can have it), copy it + */ + if (bi->extra->vnc.import.aux_prefix.family) + { + rk.aux_prefix = bi->extra->vnc.import.aux_prefix; + } + + /* + * is this route already in NVE's RIB? + */ + slRibPt = (struct skiplist *) rfd_rib_node->info; + + if (slRibPt && !skiplist_search (slRibPt, &rk, (void **) &ori)) + { + + if ((ori->rsp_counter == rfd->rsp_counter) && + (ori->last_sent_time == rfd->rsp_time)) + { + return -1; /* duplicate in this response */ + } + + /* found: update contents of existing route in RIB */ + ori->un = *pfx_un; + rfapiRibBi2Ri(bi, ori, lifetime); + } + else + { + /* not found: add new route to RIB */ + ori = rfapi_info_new (); + ori->rk = rk; + ori->un = *pfx_un; + rfapiRibBi2Ri(bi, ori, lifetime); + + if (!slRibPt) + { + slRibPt = skiplist_new (0, rfapi_rib_key_cmp, NULL); + rfd_rib_node->info = slRibPt; + route_lock_node (rfd_rib_node); + RFAPI_RIB_PREFIX_COUNT_INCR (rfd, rfd->bgp->rfapi); + } + skiplist_insert (slRibPt, &ori->rk, ori); + } + + ori->last_sent_time = rfapi_time (NULL); + + /* + * poke timer + */ + RFAPI_RIB_CHECK_COUNTS (0, 0); + rfapiRibStartTimer (rfd, ori, rfd_rib_node, 0); + RFAPI_RIB_CHECK_COUNTS (0, 0); + + /* + * Update last sent time for prefix + */ + trn = route_node_get (rfd->rsp_times[afi], &rfd_rib_node->p); /* locks trn */ + trn->info = (void *) (uintptr_t) bgp_clock (); + if (trn->lock > 1) + route_unlock_node (trn); + + return 0; +} + +/* + * Frees rfapi_info items at node + * + * Adjust 'rib' and 'rib_pending' as follows: + * + * If rib_pending node->info is 1 (magic value): + * callback: NHL = RIB NHL with lifetime = withdraw_lifetime_value + * RIB = remove all routes at the node + * DONE + * + * For each item at rib node: + * if not present in pending node, move RIB item to "delete list" + * + * For each item at pending rib node: + * if present (same vn/un) in rib node with same lifetime & options, drop + * matching item from pending node + * + * For each remaining item at pending rib node, add or replace item + * at rib node. + * + * Construct NHL as concatenation of pending list + delete list + * + * Clear pending node + */ +static void +process_pending_node ( + struct bgp *bgp, + struct rfapi_descriptor *rfd, + afi_t afi, + struct route_node *pn, /* pending node */ + struct rfapi_next_hop_entry **head, + struct rfapi_next_hop_entry **tail) +{ + struct listnode *node = NULL; + struct listnode *nnode = NULL; + struct rfapi_info *ri = NULL; /* happy valgrind */ + struct rfapi_ip_prefix hp = { 0 }; /* pfx to put in NHE */ + struct route_node *rn = NULL; + struct skiplist *slRibPt = NULL; /* rib list */ + struct skiplist *slPendPt = NULL; + struct list *lPendCost = NULL; + struct list *delete_list = NULL; + int printedprefix = 0; + char buf_prefix[BUFSIZ]; + int rib_node_started_nonempty = 0; + int sendingsomeroutes = 0; + +#if DEBUG_PROCESS_PENDING_NODE + unsigned int count_rib_initial = 0; + unsigned int count_pend_vn_initial = 0; + unsigned int count_pend_cost_initial = 0; +#endif + + assert (pn); + prefix2str (&pn->p, buf_prefix, BUFSIZ); + zlog_debug ("%s: afi=%d, %s pn->info=%p", + __func__, afi, buf_prefix, pn->info); + + if (AFI_ETHER != afi) + { + rfapiQprefix2Rprefix (&pn->p, &hp); + } + + RFAPI_RIB_CHECK_COUNTS (1, 0); + + /* + * Find corresponding RIB node + */ + rn = route_node_get (rfd->rib[afi], &pn->p); /* locks rn */ + + /* + * RIB skiplist has key=rfapi_addr={vn,un}, val = rfapi_info, + * skiplist.del = NULL + */ + slRibPt = (struct skiplist *) rn->info; + if (slRibPt) + rib_node_started_nonempty = 1; + + slPendPt = (struct skiplist *) (pn->aggregate); + lPendCost = (struct list *) (pn->info); + +#if DEBUG_PROCESS_PENDING_NODE + /* debugging */ + if (slRibPt) + count_rib_initial = skiplist_count (slRibPt); + + if (slPendPt) + count_pend_vn_initial = skiplist_count (slPendPt); + + if (lPendCost && lPendCost != (struct list *) 1) + count_pend_cost_initial = lPendCost->count; +#endif + + + /* + * Handle special case: delete all routes at prefix + */ + if (lPendCost == (struct list *) 1) + { + zlog_debug ("%s: lPendCost=1 => delete all", __func__); + if (slRibPt && !skiplist_empty (slRibPt)) + { + delete_list = list_new (); + while (0 == skiplist_first (slRibPt, NULL, (void **) &ri)) + { + + char buf[BUFSIZ]; + char buf2[BUFSIZ]; + + listnode_add (delete_list, ri); + zlog_debug ("%s: after listnode_add, delete_list->count=%d", + __func__, delete_list->count); + rfapiFreeBgpTeaOptionChain (ri->tea_options); + ri->tea_options = NULL; + + if (ri->timer) + { + struct rfapi_rib_tcb *tcb; + + tcb = ((struct thread *) ri->timer)->arg; + thread_cancel (ri->timer); + XFREE (MTYPE_RFAPI_RECENT_DELETE, tcb); + ri->timer = NULL; + } + + prefix2str (&ri->rk.vn, buf, BUFSIZ); + prefix2str (&ri->un, buf2, BUFSIZ); + zlog_debug + ("%s: put dl pfx=%s vn=%s un=%s cost=%d life=%d vn_options=%p", + __func__, buf_prefix, buf, buf2, ri->cost, ri->lifetime, + ri->vn_options); + + skiplist_delete_first (slRibPt); + } + + assert (skiplist_empty (slRibPt)); + + skiplist_free (slRibPt); + rn->info = slRibPt = NULL; + route_unlock_node (rn); + + lPendCost = pn->info = NULL; + route_unlock_node (pn); + + goto callback; + } + if (slRibPt) + { + skiplist_free (slRibPt); + rn->info = NULL; + route_unlock_node (rn); + } + + assert (!slPendPt); + if (slPendPt) + { /* TBD I think we can toss this block */ + skiplist_free (slPendPt); + pn->aggregate = NULL; + route_unlock_node (pn); + } + + pn->info = NULL; + route_unlock_node (pn); + + route_unlock_node (rn); /* route_node_get() */ + + if (rib_node_started_nonempty) + { + RFAPI_RIB_PREFIX_COUNT_DECR (rfd, bgp->rfapi); + } + + RFAPI_RIB_CHECK_COUNTS (1, 0); + + return; + } + + zlog_debug ("%s: lPendCost->count=%d, slRibPt->count=%d", + __func__, + (lPendCost ? (int) lPendCost->count : -1), + (slRibPt ? (int) slRibPt->count : -1)); + + /* + * Iterate over routes at RIB Node. + * If not found at Pending Node, delete from RIB Node and add to deletelist + * If found at Pending Node + * If identical rfapi_info, delete from Pending Node + */ + if (slRibPt) + { + void *cursor = NULL; + struct rfapi_info *ori; + + /* + * Iterate over RIB List + * + */ + while (!skiplist_next (slRibPt, NULL, (void **) &ori, &cursor)) + { + + if (skiplist_search (slPendPt, &ori->rk, (void **) &ri)) + { + /* + * Not in Pending list, so it should be deleted + */ + if (!delete_list) + delete_list = list_new (); + listnode_add (delete_list, ori); + rfapiFreeBgpTeaOptionChain (ori->tea_options); + ori->tea_options = NULL; + if (ori->timer) + { + struct rfapi_rib_tcb *tcb; + + tcb = ((struct thread *) ori->timer)->arg; + thread_cancel (ori->timer); + XFREE (MTYPE_RFAPI_RECENT_DELETE, tcb); + ori->timer = NULL; + } + +#if DEBUG_PROCESS_PENDING_NODE + /* deleted from slRibPt below, after we're done iterating */ + zlog_debug + ("%s: slRibPt ri %p not matched in pending list, delete", + __func__, ori); +#endif + + } + else + { + /* + * Found in pending list. If same lifetime, cost, options, + * then remove from pending list because the route + * hasn't changed. + */ + if (!rfapi_info_cmp (ori, ri)) + { + skiplist_delete (slPendPt, &ri->rk, NULL); + assert (lPendCost); + if (lPendCost) + { + /* linear walk: might need optimization */ + listnode_delete (lPendCost, ri); /* XXX doesn't free data! bug? */ + rfapi_info_free (ri); /* grr... */ + } + } +#if DEBUG_PROCESS_PENDING_NODE + zlog_debug ("%s: slRibPt ri %p matched in pending list, %s", + __func__, ori, + (same ? "same info" : "different info")); +#endif + } + } + /* + * Go back and delete items from RIB + */ + if (delete_list) + { + for (ALL_LIST_ELEMENTS_RO (delete_list, node, ri)) + { + zlog_debug ("%s: deleting ri %p from slRibPt", __func__, ri); + assert (!skiplist_delete (slRibPt, &ri->rk, NULL)); + } + if (skiplist_empty (slRibPt)) + { + skiplist_free (slRibPt); + slRibPt = rn->info = NULL; + route_unlock_node (rn); + } + } + } + + RFAPI_RIB_CHECK_COUNTS (0, (delete_list ? delete_list->count : 0)); + + /* + * Iterate over routes at Pending Node + * + * If {vn} found at RIB Node, update RIB Node route contents to match PN + * If {vn} NOT found at RIB Node, add copy to RIB Node + */ + if (lPendCost) + { + for (ALL_LIST_ELEMENTS_RO (lPendCost, node, ri)) + { + + struct rfapi_info *ori; + + if (slRibPt && !skiplist_search (slRibPt, &ri->rk, (void **) &ori)) + { + + /* found: update contents of existing route in RIB */ + ori->un = ri->un; + ori->cost = ri->cost; + ori->lifetime = ri->lifetime; + rfapiFreeBgpTeaOptionChain (ori->tea_options); + ori->tea_options = rfapiOptionsDup (ri->tea_options); + ori->last_sent_time = rfapi_time (NULL); + + rfapiFreeRfapiVnOptionChain (ori->vn_options); + ori->vn_options = rfapiVnOptionsDup (ri->vn_options); + + rfapiFreeRfapiUnOptionChain (ori->un_options); + ori->un_options = rfapiUnOptionsDup (ri->un_options); + + zlog_debug + ("%s: matched lPendCost item %p in slRibPt, rewrote", + __func__, ri); + + } + else + { + + char buf_rd[BUFSIZ]; + + /* not found: add new route to RIB */ + ori = rfapi_info_new (); + ori->rk = ri->rk; + ori->un = ri->un; + ori->cost = ri->cost; + ori->lifetime = ri->lifetime; + ori->tea_options = rfapiOptionsDup (ri->tea_options); + ori->last_sent_time = rfapi_time (NULL); + ori->vn_options = rfapiVnOptionsDup (ri->vn_options); + ori->un_options = rfapiUnOptionsDup (ri->un_options); + + if (!slRibPt) + { + slRibPt = skiplist_new (0, rfapi_rib_key_cmp, NULL); + rn->info = slRibPt; + route_lock_node (rn); + } + skiplist_insert (slRibPt, &ori->rk, ori); + +#if DEBUG_RIB_SL_RD + prefix_rd2str(&ori->rk.rd, buf_rd, sizeof(buf_rd)); +#else + buf_rd[0] = 0; +#endif + + zlog_debug ("%s: nomatch lPendCost item %p in slRibPt, added (rd=%s)", + __func__, ri, buf_rd); + } + + /* + * poke timer + */ + RFAPI_RIB_CHECK_COUNTS (0, (delete_list ? delete_list->count : 0)); + rfapiRibStartTimer (rfd, ori, rn, 0); + RFAPI_RIB_CHECK_COUNTS (0, (delete_list ? delete_list->count : 0)); + } + } + + +callback: + /* + * Construct NHL as concatenation of pending list + delete list + */ + + + RFAPI_RIB_CHECK_COUNTS (0, (delete_list ? delete_list->count : 0)); + + if (lPendCost) + { + + char buf[BUFSIZ]; + char buf2[BUFSIZ]; + + zlog_debug ("%s: lPendCost->count now %d", __func__, lPendCost->count); + zlog_debug ("%s: For prefix %s (a)", __func__, buf_prefix); + printedprefix = 1; + + for (ALL_LIST_ELEMENTS (lPendCost, node, nnode, ri)) + { + + struct rfapi_next_hop_entry *new; + struct route_node *trn; + + new = + XCALLOC (MTYPE_RFAPI_NEXTHOP, + sizeof (struct rfapi_next_hop_entry)); + assert (new); + + if (ri->rk.aux_prefix.family) + { + rfapiQprefix2Rprefix (&ri->rk.aux_prefix, &new->prefix); + } + else + { + new->prefix = hp; + if (AFI_ETHER == afi) + { + /* hp is 0; need to set length to match AF of vn */ + new->prefix.length = + (ri->rk.vn.family == AF_INET) ? 32 : 128; + } + } + new->prefix.cost = ri->cost; + new->lifetime = ri->lifetime; + rfapiQprefix2Raddr (&ri->rk.vn, &new->vn_address); + rfapiQprefix2Raddr (&ri->un, &new->un_address); + /* free option chain from ri */ + rfapiFreeBgpTeaOptionChain (ri->tea_options); + + ri->tea_options = NULL; /* option chain was transferred to NHL */ + + new->vn_options = ri->vn_options; + ri->vn_options = NULL; /* option chain was transferred to NHL */ + + new->un_options = ri->un_options; + ri->un_options = NULL; /* option chain was transferred to NHL */ + + if (*tail) + (*tail)->next = new; + *tail = new; + if (!*head) + { + *head = new; + } + sendingsomeroutes = 1; + + ++rfd->stat_count_nh_reachable; + ++bgp->rfapi->stat.count_updated_response_updates; + + /* + * update this NVE's timestamp for this prefix + */ + trn = route_node_get (rfd->rsp_times[afi], &pn->p); /* locks trn */ + trn->info = (void *) (uintptr_t) bgp_clock (); + if (trn->lock > 1) + route_unlock_node (trn); + + rfapiRfapiIpAddr2Str (&new->vn_address, buf, BUFSIZ); + rfapiRfapiIpAddr2Str (&new->un_address, buf2, BUFSIZ); + zlog_debug ("%s: add vn=%s un=%s cost=%d life=%d", __func__, + buf, buf2, new->prefix.cost, new->lifetime); + } + } + + RFAPI_RIB_CHECK_COUNTS (0, (delete_list ? delete_list->count : 0)); + + if (delete_list) + { + + char buf[BUFSIZ]; + char buf2[BUFSIZ]; + + if (!printedprefix) + { + zlog_debug ("%s: For prefix %s (d)", __func__, buf_prefix); + printedprefix = 1; + } + zlog_debug ("%s: delete_list has %d elements", + __func__, delete_list->count); + + RFAPI_RIB_CHECK_COUNTS (0, delete_list->count); + if (!CHECK_FLAG (bgp->rfapi_cfg->flags, + BGP_VNC_CONFIG_RESPONSE_REMOVAL_DISABLE)) + { + + for (ALL_LIST_ELEMENTS (delete_list, node, nnode, ri)) + { + + struct rfapi_next_hop_entry *new; + struct rfapi_info *ri_del; + + RFAPI_RIB_CHECK_COUNTS (0, delete_list->count); + new = XCALLOC (MTYPE_RFAPI_NEXTHOP, + sizeof (struct rfapi_next_hop_entry)); + assert (new); + + if (ri->rk.aux_prefix.family) + { + rfapiQprefix2Rprefix (&ri->rk.aux_prefix, &new->prefix); + } + else + { + new->prefix = hp; + if (AFI_ETHER == afi) + { + /* hp is 0; need to set length to match AF of vn */ + new->prefix.length = + (ri->rk.vn.family == AF_INET) ? 32 : 128; + } + } + + new->prefix.cost = ri->cost; + new->lifetime = RFAPI_REMOVE_RESPONSE_LIFETIME; + rfapiQprefix2Raddr (&ri->rk.vn, &new->vn_address); + rfapiQprefix2Raddr (&ri->un, &new->un_address); + + new->vn_options = ri->vn_options; + ri->vn_options = NULL; /* option chain was transferred to NHL */ + + new->un_options = ri->un_options; + ri->un_options = NULL; /* option chain was transferred to NHL */ + + if (*tail) + (*tail)->next = new; + *tail = new; + if (!*head) + { + *head = new; + } + ++rfd->stat_count_nh_removal; + ++bgp->rfapi->stat.count_updated_response_deletes; + + rfapiRfapiIpAddr2Str (&new->vn_address, buf, BUFSIZ); + rfapiRfapiIpAddr2Str (&new->un_address, buf2, BUFSIZ); + zlog_debug ("%s: DEL vn=%s un=%s cost=%d life=%d", __func__, + buf, buf2, new->prefix.cost, new->lifetime); + + RFAPI_RIB_CHECK_COUNTS (0, delete_list->count); + /* + * Update/add to list of recent deletions at this prefix + */ + if (!rn->aggregate) + { + rn->aggregate = skiplist_new (0, rfapi_rib_key_cmp, + (void (*)(void *)) + rfapi_info_free); + route_lock_node (rn); + } + RFAPI_RIB_CHECK_COUNTS (0, delete_list->count); + + /* sanity check lifetime */ + if (ri->lifetime > RFAPI_LIFETIME_INFINITE_WITHDRAW_DELAY) + ri->lifetime = RFAPI_LIFETIME_INFINITE_WITHDRAW_DELAY; + + RFAPI_RIB_CHECK_COUNTS (0, delete_list->count); + /* cancel normal expire timer */ + if (ri->timer) + { + struct rfapi_rib_tcb *tcb; + + tcb = ((struct thread *) ri->timer)->arg; + thread_cancel ((struct thread *) ri->timer); + XFREE (MTYPE_RFAPI_RECENT_DELETE, tcb); + ri->timer = NULL; + } + RFAPI_RIB_CHECK_COUNTS (0, delete_list->count); + + /* + * Look in "recently-deleted" list + */ + if (skiplist_search ((struct skiplist *) (rn->aggregate), + &ri->rk, (void **) &ri_del)) + { + + int rc; + + RFAPI_RIB_CHECK_COUNTS (0, delete_list->count); + /* + * NOT in "recently-deleted" list + */ + list_delete_node (delete_list, node); /* does not free ri */ + rc = skiplist_insert ((struct skiplist *) (rn->aggregate), + &ri->rk, ri); + assert (!rc); + + RFAPI_RIB_CHECK_COUNTS (0, delete_list->count); + rfapiRibStartTimer (rfd, ri, rn, 1); + RFAPI_RIB_CHECK_COUNTS (0, delete_list->count); + ri->last_sent_time = rfapi_time (NULL); +#if DEBUG_RIB_SL_RD + { + char buf_rd[BUFSIZ]; + prefix_rd2str(&ri->rk.rd, buf_rd, sizeof(buf_rd)); + zlog_debug("%s: move route to recently deleted list, rd=%s", + __func__, buf_rd); + } +#endif + + } + else + { + /* + * IN "recently-deleted" list + */ + RFAPI_RIB_CHECK_COUNTS (0, delete_list->count); + rfapiRibStartTimer (rfd, ri_del, rn, 1); + RFAPI_RIB_CHECK_COUNTS (0, delete_list->count); + ri->last_sent_time = rfapi_time (NULL); + + } + } + } + else + { + zlog_debug ("%s: response removal disabled, omitting removals", + __func__); + } + + delete_list->del = (void (*)(void *)) rfapi_info_free; + list_delete (delete_list); + } + + RFAPI_RIB_CHECK_COUNTS (0, 0); + + /* + * Reset pending lists. The final route_unlock_node() will probably + * cause the pending node to be released. + */ + if (slPendPt) + { + skiplist_free (slPendPt); + pn->aggregate = NULL; + route_unlock_node (pn); + } + if (lPendCost) + { + list_delete (lPendCost); + pn->info = NULL; + route_unlock_node (pn); + } + RFAPI_RIB_CHECK_COUNTS (0, 0); + + if (rib_node_started_nonempty) + { + if (!rn->info) + { + RFAPI_RIB_PREFIX_COUNT_DECR (rfd, bgp->rfapi); + } + } + else + { + if (rn->info) + { + RFAPI_RIB_PREFIX_COUNT_INCR (rfd, bgp->rfapi); + } + } + + if (sendingsomeroutes) + rfapiMonitorTimersRestart (rfd, &pn->p); + + route_unlock_node (rn); /* route_node_get() */ + + RFAPI_RIB_CHECK_COUNTS (1, 0); +} + +/* + * regardless of targets, construct a single callback by doing + * only one traversal of the pending RIB + * + * + * Do callback + * + */ +static void +rib_do_callback_onepass (struct rfapi_descriptor *rfd, afi_t afi) +{ + struct bgp *bgp = bgp_get_default (); + struct rfapi_next_hop_entry *head = NULL; + struct rfapi_next_hop_entry *tail = NULL; + struct route_node *rn; + +#if DEBUG_L2_EXTRA + zlog_debug ("%s: rfd=%p, afi=%d", __func__, rfd, afi); +#endif + + if (!rfd->rib_pending[afi]) + return; + + assert (bgp->rfapi); + + for (rn = route_top (rfd->rib_pending[afi]); rn; rn = route_next (rn)) + { + process_pending_node (bgp, rfd, afi, rn, &head, &tail); + } + + if (head) + { + rfapi_response_cb_t *f; + +#if DEBUG_NHL + zlog_debug ("%s: response callback NHL follows:", __func__); + rfapiPrintNhl (NULL, head); +#endif + + if (rfd->response_cb) + f = rfd->response_cb; + else + f = bgp->rfapi->rfp_methods.response_cb; + + bgp->rfapi->flags |= RFAPI_INCALLBACK; + zlog_debug ("%s: invoking updated response callback", __func__); + (*f) (head, rfd->cookie); + bgp->rfapi->flags &= ~RFAPI_INCALLBACK; + ++bgp->rfapi->response_updated_count; + } +} + +static wq_item_status +rfapiRibDoQueuedCallback (struct work_queue *wq, void *data) +{ + struct rfapi_descriptor *rfd; + afi_t afi; + uint32_t queued_flag; + + RFAPI_RIB_CHECK_COUNTS (1, 0); + + rfd = ((struct rfapi_updated_responses_queue *) data)->rfd; + afi = ((struct rfapi_updated_responses_queue *) data)->afi; + + /* Make sure the HD wasn't closed after the work item was scheduled */ + if (rfapi_check (rfd)) + return WQ_SUCCESS; + + rib_do_callback_onepass (rfd, afi); + + queued_flag = RFAPI_QUEUED_FLAG (afi); + + UNSET_FLAG (rfd->flags, queued_flag); + + RFAPI_RIB_CHECK_COUNTS (1, 0); + + return WQ_SUCCESS; +} + +static void +rfapiRibQueueItemDelete (struct work_queue *wq, void *data) +{ + XFREE (MTYPE_RFAPI_UPDATED_RESPONSE_QUEUE, data); +} + +static void +updated_responses_queue_init (struct rfapi_descriptor *rfd) +{ + if (rfd->updated_responses_queue) + return; + + rfd->updated_responses_queue = work_queue_new (bm->master, + "rfapi updated responses"); + assert (rfd->updated_responses_queue); + + rfd->updated_responses_queue->spec.workfunc = rfapiRibDoQueuedCallback; + rfd->updated_responses_queue->spec.del_item_data = rfapiRibQueueItemDelete; + rfd->updated_responses_queue->spec.max_retries = 0; + rfd->updated_responses_queue->spec.hold = 1; +} + +/* + * Called when an import table node is modified. Construct a + * new complete nexthop list, sorted by cost (lowest first), + * based on the import table node. + * + * Filter out duplicate nexthops (vn address). There should be + * only one UN address per VN address from the point of view of + * a given import table, so we can probably ignore UN addresses + * while filtering. + * + * Based on rfapiNhlAddNodeRoutes() + */ +void +rfapiRibUpdatePendingNode ( + struct bgp *bgp, + struct rfapi_descriptor *rfd, + struct rfapi_import_table *it, /* needed for L2 */ + struct route_node *it_node, + uint32_t lifetime) +{ + struct prefix *prefix; + struct bgp_info *bi; + struct route_node *pn; + afi_t afi; + uint32_t queued_flag; + int count = 0; + char buf[BUFSIZ]; + + zlog_debug ("%s: entry", __func__); + + if (CHECK_FLAG (bgp->rfapi_cfg->flags, BGP_VNC_CONFIG_CALLBACK_DISABLE)) + return; + + zlog_debug ("%s: callbacks are not disabled", __func__); + + RFAPI_RIB_CHECK_COUNTS (1, 0); + + prefix = &it_node->p; + afi = family2afi (prefix->family); + prefix2str (prefix, buf, BUFSIZ); + zlog_debug ("%s: prefix=%s", __func__, buf); + + pn = route_node_get (rfd->rib_pending[afi], prefix); + assert (pn); + + zlog_debug ("%s: pn->info=%p, pn->aggregate=%p", __func__, pn->info, + pn->aggregate); + + if (pn->aggregate) + { + /* + * free references into the rfapi_info structures before + * freeing the structures themselves + */ + skiplist_free ((struct skiplist *) (pn->aggregate)); + pn->aggregate = NULL; + route_unlock_node (pn); /* skiplist deleted */ + } + + + /* + * free the rfapi_info structures + */ + if (pn->info) + { + if (pn->info != (void *) 1) + { + list_delete ((struct list *) (pn->info)); + } + pn->info = NULL; + route_unlock_node (pn); /* linklist or 1 deleted */ + } + + /* + * The BIs in the import table are already sorted by cost + */ + for (bi = it_node->info; bi; bi = bi->next) + { + + struct rfapi_info *ri; + struct prefix pfx_nh; + + if (!bi->attr) + { + /* shouldn't happen */ + /* TBD increment error stats counter */ + continue; + } + if (!bi->extra) + { + /* shouldn't happen */ + /* TBD increment error stats counter */ + continue; + } + + rfapiNexthop2Prefix (bi->attr, &pfx_nh); + + /* + * Omit route if nexthop is self + */ + if (CHECK_FLAG + (bgp->rfapi_cfg->flags, BGP_VNC_CONFIG_FILTER_SELF_FROM_RSP)) + { + + struct prefix pfx_vn; + + rfapiRaddr2Qprefix (&rfd->vn_addr, &pfx_vn); + if (prefix_same (&pfx_vn, &pfx_nh)) + continue; + } + + ri = rfapi_info_new (); + ri->rk.vn = pfx_nh; + ri->rk.rd = bi->extra->vnc.import.rd; + /* + * If there is an auxiliary IP address (L2 can have it), copy it + */ + if (bi->extra->vnc.import.aux_prefix.family) + { + ri->rk.aux_prefix = bi->extra->vnc.import.aux_prefix; + } + + if (rfapiGetUnAddrOfVpnBi (bi, &ri->un)) + { + rfapi_info_free (ri); + continue; + } + + if (!pn->aggregate) + { + pn->aggregate = skiplist_new (0, rfapi_rib_key_cmp, NULL); + route_lock_node (pn); + } + + /* + * If we have already added this nexthop, the insert will fail. + * Note that the skiplist key is a pointer INTO the rfapi_info + * structure which will be added to the "info" list. + * The skiplist entry VALUE is not used for anything but + * might be useful during debugging. + */ + if (skiplist_insert ((struct skiplist *) pn->aggregate, &ri->rk, ri)) + { + + /* + * duplicate + */ + rfapi_info_free (ri); + continue; + } + + rfapiRibBi2Ri(bi, ri, lifetime); + + if (!pn->info) + { + pn->info = list_new (); + ((struct list *)(pn->info))->del = (void (*)(void *))rfapi_info_free; + route_lock_node (pn); + } + + listnode_add ((struct list *) (pn->info), ri); + } + + if (pn->info) + { + count = ((struct list *) (pn->info))->count; + } + + if (!count) + { + assert (!pn->info); + assert (!pn->aggregate); + pn->info = (void *) 1; /* magic value means this node has no routes */ + route_lock_node (pn); + } + + route_unlock_node (pn); /* route_node_get */ + + queued_flag = RFAPI_QUEUED_FLAG (afi); + + if (!CHECK_FLAG (rfd->flags, queued_flag)) + { + + struct rfapi_updated_responses_queue *urq; + + urq = XCALLOC (MTYPE_RFAPI_UPDATED_RESPONSE_QUEUE, + sizeof (struct rfapi_updated_responses_queue)); + assert (urq); + if (!rfd->updated_responses_queue) + updated_responses_queue_init (rfd); + + SET_FLAG (rfd->flags, queued_flag); + urq->rfd = rfd; + urq->afi = afi; + work_queue_add (rfd->updated_responses_queue, urq); + } + RFAPI_RIB_CHECK_COUNTS (1, 0); +} + +void +rfapiRibUpdatePendingNodeSubtree ( + struct bgp *bgp, + struct rfapi_descriptor *rfd, + struct rfapi_import_table *it, + struct route_node *it_node, + struct route_node *omit_subtree, /* may be NULL */ + uint32_t lifetime) +{ + if (it_node->l_left && (it_node->l_left != omit_subtree)) + { + if (it_node->l_left->info) + rfapiRibUpdatePendingNode (bgp, rfd, it, it_node->l_left, lifetime); + rfapiRibUpdatePendingNodeSubtree (bgp, rfd, it, it_node->l_left, + omit_subtree, lifetime); + } + + if (it_node->l_right && (it_node->l_right != omit_subtree)) + { + if (it_node->l_right->info) + rfapiRibUpdatePendingNode (bgp, rfd, it, it_node->l_right, lifetime); + rfapiRibUpdatePendingNodeSubtree (bgp, rfd, it, it_node->l_right, + omit_subtree, lifetime); + } +} + +/* + * RETURN VALUE + * + * 0 allow prefix to be included in response + * !0 don't allow prefix to be included in response + */ +int +rfapiRibFTDFilterRecentPrefix( + struct rfapi_descriptor *rfd, + struct route_node *it_rn, /* import table node */ + struct prefix *pfx_target_original) /* query target */ +{ + struct bgp *bgp = rfd->bgp; + afi_t afi = family2afi(it_rn->p.family); + time_t prefix_time; + struct route_node *trn; + + /* + * Not in FTD mode, so allow prefix + */ + if (bgp->rfapi_cfg->rfp_cfg.download_type != RFAPI_RFP_DOWNLOAD_FULL) + return 0; + + /* + * TBD + * This matches behavior of now-obsolete rfapiRibFTDFilterRecent(), + * but we need to decide if that is correct. + */ + if (it_rn->p.family == AF_ETHERNET) + return 0; + +#if DEBUG_FTD_FILTER_RECENT + { + char buf_pfx[BUFSIZ]; + + prefix2str(&it_rn->p, buf_pfx, BUFSIZ); + zlog_debug("%s: prefix %s", __func__, buf_pfx); + } +#endif + + /* + * prefix covers target address, so allow prefix + */ + if (prefix_match (&it_rn->p, pfx_target_original)) + { +#if DEBUG_FTD_FILTER_RECENT + zlog_debug("%s: prefix covers target, allowed", __func__); +#endif + return 0; + } + + /* + * check this NVE's timestamp for this prefix + */ + trn = route_node_get (rfd->rsp_times[afi], &it_rn->p); /* locks trn */ + prefix_time = (time_t) trn->info; + if (trn->lock > 1) + route_unlock_node (trn); + +#if DEBUG_FTD_FILTER_RECENT + zlog_debug("%s: last sent time %lu, last allowed time %lu", + __func__, prefix_time, rfd->ftd_last_allowed_time); +#endif + + /* + * haven't sent this prefix, which doesn't cover target address, + * to NVE since ftd_advertisement_interval, so OK to send now. + */ + if (prefix_time <= rfd->ftd_last_allowed_time) + return 0; + + return 1; +} + +/* + * Call when rfapi returns from rfapi_query() so the RIB reflects + * the routes sent to the NVE before the first updated response + * + * Also: remove duplicates from response. Caller should use returned + * value of nexthop chain. + */ +struct rfapi_next_hop_entry * +rfapiRibPreload ( + struct bgp *bgp, + struct rfapi_descriptor *rfd, + struct rfapi_next_hop_entry *response, + int use_eth_resolution) +{ + struct rfapi_next_hop_entry *nhp; + struct rfapi_next_hop_entry *nhp_next; + struct rfapi_next_hop_entry *head = NULL; + struct rfapi_next_hop_entry *tail = NULL; + time_t new_last_sent_time; + + zlog_debug ("%s: loading response=%p, use_eth_resolution=%d", + __func__, response, use_eth_resolution); + + new_last_sent_time = rfapi_time (NULL); + + for (nhp = response; nhp; nhp = nhp_next) + { + + struct prefix pfx; + struct rfapi_rib_key rk; + afi_t afi; + struct rfapi_info *ri; + int need_insert; + struct route_node *rn; + int rib_node_started_nonempty = 0; + struct route_node *trn; + int allowed = 0; + + /* save in case we delete nhp */ + nhp_next = nhp->next; + + if (nhp->lifetime == RFAPI_REMOVE_RESPONSE_LIFETIME) + { + /* + * weird, shouldn't happen + */ + zlog_debug + ("%s: got nhp->lifetime == RFAPI_REMOVE_RESPONSE_LIFETIME", + __func__); + continue; + } + + + if (use_eth_resolution) + { + /* get the prefix of the ethernet address in the L2 option */ + struct rfapi_l2address_option *pL2o; + struct rfapi_vn_option *vo; + + /* + * Look for VN option of type RFAPI_VN_OPTION_TYPE_L2ADDR + */ + for (pL2o = NULL, vo = nhp->vn_options; vo; vo = vo->next) + { + if (RFAPI_VN_OPTION_TYPE_L2ADDR == vo->type) + { + pL2o = &vo->v.l2addr; + break; + } + } + + if (!pL2o) + { + /* + * not supposed to happen + */ + zlog_debug ("%s: missing L2 info", __func__); + continue; + } + + afi = AFI_ETHER; + rfapiL2o2Qprefix (pL2o, &pfx); + } + else + { + rfapiRprefix2Qprefix (&nhp->prefix, &pfx); + afi = family2afi (pfx.family); + } + + /* + * TBD for ethernet, rib must know the right way to distinguish + * duplicate routes + * + * Current approach: prefix is key to radix tree; then + * each prefix has a set of routes with unique VN addrs + */ + + /* + * Look up prefix in RIB + */ + rn = route_node_get (rfd->rib[afi], &pfx); /* locks rn */ + + if (rn->info) + { + rib_node_started_nonempty = 1; + } + else + { + rn->info = skiplist_new (0, rfapi_rib_key_cmp, NULL); + route_lock_node (rn); + } + + /* + * Look up route at prefix + */ + need_insert = 0; + memset ((void *) &rk, 0, sizeof (rk)); + assert (!rfapiRaddr2Qprefix (&nhp->vn_address, &rk.vn)); + + if (use_eth_resolution) + { + /* copy what came from aux_prefix to rk.aux_prefix */ + rfapiRprefix2Qprefix (&nhp->prefix, &rk.aux_prefix); + if (RFAPI_0_PREFIX (&rk.aux_prefix) + && RFAPI_HOST_PREFIX (&rk.aux_prefix)) + { + /* mark as "none" if nhp->prefix is 0/32 or 0/128 */ + rk.aux_prefix.family = 0; + } + } + +#if DEBUG_NHL + { + char str_vn[BUFSIZ]; + char str_aux_prefix[BUFSIZ]; + + str_vn[0] = 0; + str_aux_prefix[0] = 0; + + prefix2str (&rk.vn, str_vn, BUFSIZ); + prefix2str (&rk.aux_prefix, str_aux_prefix, BUFSIZ); + + if (!rk.aux_prefix.family) + { + + } + zlog_debug ("%s: rk.vn=%s rk.aux_prefix=%s", + __func__, str_vn, + (rk.aux_prefix.family ? str_aux_prefix : "-")); + } + zlog_debug ("%s: RIB skiplist for this prefix follows", __func__); + rfapiRibShowRibSl (NULL, &rn->p, (struct skiplist *) rn->info); +#endif + + + if (!skiplist_search ((struct skiplist *) rn->info, &rk, (void **) &ri)) + { + /* + * Already have this route; make values match + */ + rfapiFreeRfapiUnOptionChain (ri->un_options); + ri->un_options = NULL; + rfapiFreeRfapiVnOptionChain (ri->vn_options); + ri->vn_options = NULL; + +#if DEBUG_NHL + zlog_debug ("%s: found in RIB", __func__); +#endif + + /* + * Filter duplicate routes from initial response. + * Check timestamps to avoid wraparound problems + */ + if ((ri->rsp_counter != rfd->rsp_counter) || + (ri->last_sent_time != new_last_sent_time)) + { + +#if DEBUG_NHL + zlog_debug ("%s: allowed due to counter/timestamp diff", + __func__); +#endif + allowed = 1; + } + + } + else + { + +#if DEBUG_NHL + zlog_debug ("%s: allowed due to not yet in RIB", __func__); +#endif + /* not found: add new route to RIB */ + ri = rfapi_info_new (); + need_insert = 1; + allowed = 1; + } + + ri->rk = rk; + assert (!rfapiRaddr2Qprefix (&nhp->un_address, &ri->un)); + ri->cost = nhp->prefix.cost; + ri->lifetime = nhp->lifetime; + ri->vn_options = rfapiVnOptionsDup (nhp->vn_options); + ri->rsp_counter = rfd->rsp_counter; + ri->last_sent_time = rfapi_time (NULL); + + if (need_insert) + { + int rc; + rc = skiplist_insert ((struct skiplist *) rn->info, &ri->rk, ri); + assert (!rc); + } + + if (!rib_node_started_nonempty) + { + RFAPI_RIB_PREFIX_COUNT_INCR (rfd, bgp->rfapi); + } + + RFAPI_RIB_CHECK_COUNTS (0, 0); + rfapiRibStartTimer (rfd, ri, rn, 0); + RFAPI_RIB_CHECK_COUNTS (0, 0); + + route_unlock_node (rn); + + /* + * update this NVE's timestamp for this prefix + */ + trn = route_node_get (rfd->rsp_times[afi], &pfx); /* locks trn */ + trn->info = (void *) (uintptr_t) bgp_clock (); + if (trn->lock > 1) + route_unlock_node (trn); + + { + char str_pfx[BUFSIZ]; + char str_pfx_vn[BUFSIZ]; + + prefix2str (&pfx, str_pfx, BUFSIZ); + prefix2str (&rk.vn, str_pfx_vn, BUFSIZ); + zlog_debug + ("%s: added pfx=%s nh[vn]=%s, cost=%u, lifetime=%u, allowed=%d", + __func__, str_pfx, str_pfx_vn, nhp->prefix.cost, nhp->lifetime, + allowed); + } + + if (allowed) + { + if (tail) + (tail)->next = nhp; + tail = nhp; + if (!head) + { + head = nhp; + } + } + else + { + rfapi_un_options_free (nhp->un_options); + nhp->un_options = NULL; + rfapi_vn_options_free (nhp->vn_options); + nhp->vn_options = NULL; + + XFREE (MTYPE_RFAPI_NEXTHOP, nhp); + nhp = NULL; + } + } + + if (tail) + tail->next = NULL; + return head; +} + +void +rfapiRibPendingDeleteRoute ( + struct bgp *bgp, + struct rfapi_import_table *it, + afi_t afi, + struct route_node *it_node) +{ + struct rfapi_descriptor *rfd; + struct listnode *node; + char buf[BUFSIZ]; + + prefix2str (&it_node->p, buf, BUFSIZ); + zlog_debug ("%s: entry, it=%p, afi=%d, it_node=%p, pfx=%s", + __func__, it, afi, it_node, buf); + + if (AFI_ETHER == afi) + { + /* + * ethernet import tables are per-LNI and each ethernet monitor + * identifies the rfd that owns it. + */ + struct rfapi_monitor_eth *m; + struct route_node *rn; + struct skiplist *sl; + void *cursor; + int rc; + + /* + * route-specific monitors + */ + if ((sl = RFAPI_MONITOR_ETH (it_node))) + { + + zlog_debug ("%s: route-specific skiplist: %p", __func__, sl); + + for (cursor = NULL, rc = + skiplist_next (sl, NULL, (void **) &m, (void **) &cursor); !rc; + rc = skiplist_next (sl, NULL, (void **) &m, (void **) &cursor)) + { + +#if DEBUG_PENDING_DELETE_ROUTE + zlog_debug ("%s: eth monitor rfd=%p", __func__, m->rfd); +#endif + /* + * If we have already sent a route with this prefix to this + * NVE, it's OK to send an update with the delete + */ + if ((rn = route_node_lookup (m->rfd->rib[afi], &it_node->p))) + { + rfapiRibUpdatePendingNode (bgp, m->rfd, it, it_node, + m->rfd->response_lifetime); + route_unlock_node (rn); + } + } + } + + /* + * all-routes/FTD monitors + */ + for (m = it->eth0_queries; m; m = m->next) + { +#if DEBUG_PENDING_DELETE_ROUTE + zlog_debug ("%s: eth0 monitor rfd=%p", __func__, m->rfd); +#endif + /* + * If we have already sent a route with this prefix to this + * NVE, it's OK to send an update with the delete + */ + if ((rn = route_node_lookup (m->rfd->rib[afi], &it_node->p))) + { + rfapiRibUpdatePendingNode (bgp, m->rfd, it, it_node, + m->rfd->response_lifetime); + } + } + + } + else + { + /* + * Find RFDs that reference this import table + */ + for (ALL_LIST_ELEMENTS_RO (&bgp->rfapi->descriptors, node, rfd)) + { + + struct route_node *rn; + + zlog_debug ("%s: comparing rfd(%p)->import_table=%p to it=%p", + __func__, rfd, rfd->import_table, it); + + if (rfd->import_table != it) + continue; + + zlog_debug ("%s: matched rfd %p", __func__, rfd); + + /* + * If we have sent a response to this NVE with this prefix + * previously, we should send an updated response. + */ + if ((rn = route_node_lookup (rfd->rib[afi], &it_node->p))) + { + rfapiRibUpdatePendingNode (bgp, rfd, it, it_node, + rfd->response_lifetime); + route_unlock_node (rn); + } + } + } +} + +void +rfapiRibShowResponsesSummary (void *stream) +{ + int (*fp) (void *, const char *, ...); + struct vty *vty; + void *out; + const char *vty_newline; + struct bgp *bgp = bgp_get_default (); + + int nves = 0; + int nves_with_nonempty_ribs = 0; + struct rfapi_descriptor *rfd; + struct listnode *node; + + + if (rfapiStream2Vty (stream, &fp, &vty, &out, &vty_newline) == 0) + return; + + fp (out, "%-24s ", "Responses: (Prefixes)"); + fp (out, "%-8s %-8u ", "Active:", bgp->rfapi->rib_prefix_count_total); + fp (out, "%-8s %-8u", "Maximum:", bgp->rfapi->rib_prefix_count_total_max); + fp (out, "%s", VTY_NEWLINE); + + fp (out, "%-24s ", " (Updated)"); + fp (out, "%-8s %-8u ", "Update:", + bgp->rfapi->stat.count_updated_response_updates); + fp (out, "%-8s %-8u", "Remove:", + bgp->rfapi->stat.count_updated_response_deletes); + fp (out, "%-8s %-8u", "Total:", + bgp->rfapi->stat.count_updated_response_updates + + bgp->rfapi->stat.count_updated_response_deletes); + fp (out, "%s", VTY_NEWLINE); + + fp (out, "%-24s ", " (NVEs)"); + for (ALL_LIST_ELEMENTS_RO (&bgp->rfapi->descriptors, node, rfd)) + { + ++nves; + if (rfd->rib_prefix_count) + ++nves_with_nonempty_ribs; + } + fp (out, "%-8s %-8u ", "Active:", nves_with_nonempty_ribs); + fp (out, "%-8s %-8u", "Total:", nves); + fp (out, "%s", VTY_NEWLINE); + +} + +void +rfapiRibShowResponsesSummaryClear (void) +{ + struct bgp *bgp = bgp_get_default (); + + bgp->rfapi->rib_prefix_count_total_max = bgp->rfapi->rib_prefix_count_total; +} + +static int +print_rib_sl ( + int (*fp) (void *, const char *, ...), + struct vty *vty, + void *out, + struct skiplist *sl, + int deleted, + char *str_pfx, + int *printedprefix) +{ + struct rfapi_info *ri; + int rc; + void *cursor; + int routes_displayed = 0; + + cursor = NULL; + for (rc = skiplist_next (sl, NULL, (void **) &ri, &cursor); + !rc; rc = skiplist_next (sl, NULL, (void **) &ri, &cursor)) + { + + char str_vn[BUFSIZ]; + char str_un[BUFSIZ]; + char str_lifetime[BUFSIZ]; + char str_age[BUFSIZ]; + char *p; + char str_rd[BUFSIZ]; + + ++routes_displayed; + + prefix2str (&ri->rk.vn, str_vn, BUFSIZ); + p = index (str_vn, '/'); + if (p) + *p = 0; + + prefix2str (&ri->un, str_un, BUFSIZ); + p = index (str_un, '/'); + if (p) + *p = 0; + + rfapiFormatSeconds (ri->lifetime, str_lifetime, BUFSIZ); +#if RFAPI_REGISTRATIONS_REPORT_AGE + rfapiFormatAge (ri->last_sent_time, str_age, BUFSIZ); +#else + { + time_t now = rfapi_time (NULL); + time_t expire = ri->last_sent_time + (time_t) ri->lifetime; + /* allow for delayed/async removal */ + rfapiFormatSeconds ((expire > now ? expire - now : 1), + str_age, BUFSIZ); + } +#endif + + str_rd[0] = 0; /* start empty */ +#if DEBUG_RIB_SL_RD + str_rd[0] = ' '; + prefix_rd2str(&ri->rk.rd, str_rd+1, BUFSIZ-1); +#endif + + fp (out, " %c %-20s %-15s %-15s %-4u %-8s %-8s%s%s", + deleted ? 'r' : ' ', + *printedprefix ? "" : str_pfx, + str_vn, str_un, ri->cost, str_lifetime, str_age, str_rd, VTY_NEWLINE); + + if (!*printedprefix) + *printedprefix = 1; + } + return routes_displayed; +} + +#if DEBUG_NHL +/* + * This one is for debugging (set stream to NULL to send output to log) + */ +static void +rfapiRibShowRibSl (void *stream, struct prefix *pfx, struct skiplist *sl) +{ + int (*fp) (void *, const char *, ...); + struct vty *vty; + void *out; + const char *vty_newline; + + int nhs_displayed = 0; + char str_pfx[BUFSIZ]; + int printedprefix = 0; + + if (rfapiStream2Vty (stream, &fp, &vty, &out, &vty_newline) == 0) + return; + + prefix2str (pfx, str_pfx, BUFSIZ); + + nhs_displayed += print_rib_sl (fp, vty, out, sl, + 0, str_pfx, &printedprefix); +} +#endif + +void +rfapiRibShowResponses ( + void *stream, + struct prefix *pfx_match, + int show_removed) +{ + int (*fp) (void *, const char *, ...); + struct vty *vty; + void *out; + const char *vty_newline; + + struct rfapi_descriptor *rfd; + struct listnode *node; + + struct bgp *bgp = bgp_get_default (); + int printedheader = 0; + int routes_total = 0; + int nhs_total = 0; + int prefixes_total = 0; + int prefixes_displayed = 0; + int nves_total = 0; + int nves_with_routes = 0; + int nves_displayed = 0; + int routes_displayed = 0; + int nhs_displayed = 0; + + if (rfapiStream2Vty (stream, &fp, &vty, &out, &vty_newline) == 0) + return; + /* + * loop over NVEs + */ + for (ALL_LIST_ELEMENTS_RO (&bgp->rfapi->descriptors, node, rfd)) + { + + int printednve = 0; + afi_t afi; + + ++nves_total; + if (rfd->rib_prefix_count) + ++nves_with_routes; + + for (afi = AFI_IP; afi < AFI_MAX; ++afi) + { + + struct route_node *rn; + + if (!rfd->rib[afi]) + continue; + + for (rn = route_top (rfd->rib[afi]); rn; rn = route_next (rn)) + { + + struct skiplist *sl; + char str_pfx[BUFSIZ]; + int printedprefix = 0; + + if (!show_removed) + sl = rn->info; + else + sl = rn->aggregate; + + if (!sl) + continue; + + routes_total++; + nhs_total += skiplist_count (sl); + ++prefixes_total; + + if (pfx_match && !prefix_match (pfx_match, &rn->p) && + !prefix_match (&rn->p, pfx_match)) + continue; + + ++prefixes_displayed; + + if (!printedheader) + { + ++printedheader; + + fp (out, "%s[%s]%s", + VTY_NEWLINE, + show_removed ? "Removed" : "Active", VTY_NEWLINE); + fp (out, "%-15s %-15s%s", "Querying VN", "Querying UN", + VTY_NEWLINE); + fp (out, " %-20s %-15s %-15s %4s %-8s %-8s%s", + "Prefix", "Registered VN", "Registered UN", "Cost", + "Lifetime", +#if RFAPI_REGISTRATIONS_REPORT_AGE + "Age", +#else + "Remaining", +#endif + VTY_NEWLINE); + } + if (!printednve) + { + char str_vn[BUFSIZ]; + char str_un[BUFSIZ]; + + ++printednve; + ++nves_displayed; + + fp (out, "%-15s %-15s%s", + rfapiRfapiIpAddr2Str (&rfd->vn_addr, str_vn, BUFSIZ), + rfapiRfapiIpAddr2Str (&rfd->un_addr, str_un, BUFSIZ), + VTY_NEWLINE); + + } + prefix2str (&rn->p, str_pfx, BUFSIZ); + //fp(out, " %s%s", buf, VTY_NEWLINE); /* prefix */ + + routes_displayed++; + nhs_displayed += print_rib_sl (fp, vty, out, sl, + show_removed, str_pfx, + &printedprefix); + } + } + } + + if (routes_total) + { + fp (out, "%s", VTY_NEWLINE); + fp (out, "Displayed %u NVEs, and %u out of %u %s prefixes", + nves_displayed, routes_displayed, + routes_total, show_removed ? "removed" : "active"); + if (nhs_displayed != routes_displayed || nhs_total != routes_total) + fp (out, " with %u out of %u next hops", nhs_displayed, nhs_total); + fp (out, "%s", VTY_NEWLINE); + } +} diff --git a/bgpd/rfapi/rfapi_rib.h b/bgpd/rfapi/rfapi_rib.h new file mode 100644 index 0000000000..2a111946f7 --- /dev/null +++ b/bgpd/rfapi/rfapi_rib.h @@ -0,0 +1,154 @@ +/* + * + * Copyright 2009-2016, LabN Consulting, L.L.C. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +/* + * File: rfapi_rib.h + * Purpose: per-nve rib + */ + +#ifndef QUAGGA_HGP_RFAPI_RIB_H +#define QUAGGA_HGP_RFAPI_RIB_H + +/* + * Key for indexing RIB and Pending RIB skiplists. For L3 RIBs, + * the VN address is sufficient because it represents the actual next hop. + * + * For L2 RIBs, it is possible to have multiple routes to a given L2 + * prefix via a given VN address, but each route having a unique aux_prefix. + */ +struct rfapi_rib_key +{ + struct prefix vn; + struct prefix_rd rd; + + /* + * for L2 routes: optional IP addr + * .family == 0 means "none" + */ + struct prefix aux_prefix; +}; + +struct rfapi_info +{ + struct rfapi_rib_key rk; /* NVE VN addr + aux addr */ + struct prefix un; + uint8_t cost; + uint32_t lifetime; + time_t last_sent_time; + uint32_t rsp_counter; /* dedup initial responses */ + struct bgp_tea_options *tea_options; + struct rfapi_un_option *un_options; + struct rfapi_vn_option *vn_options; + void *timer; +}; + +/* + * Work item for updated responses queue + */ +struct rfapi_updated_responses_queue +{ + struct rfapi_descriptor *rfd; + afi_t afi; +}; + + +extern void +rfapiRibClear (struct rfapi_descriptor *rfd); + +extern void +rfapiRibFree (struct rfapi_descriptor *rfd); + +extern void +rfapiRibUpdatePendingNode ( + struct bgp *bgp, + struct rfapi_descriptor *rfd, + struct rfapi_import_table *it, + struct route_node *it_node, + uint32_t lifetime); + +extern void +rfapiRibUpdatePendingNodeSubtree ( + struct bgp *bgp, + struct rfapi_descriptor *rfd, + struct rfapi_import_table *it, + struct route_node *it_node, + struct route_node *omit_subtree, + uint32_t lifetime); + +extern int +rfapiRibPreloadBi( + struct route_node *rfd_rib_node, + struct prefix *pfx_vn, + struct prefix *pfx_un, + uint32_t lifetime, + struct bgp_info *bi); + +extern struct rfapi_next_hop_entry * +rfapiRibPreload ( + struct bgp *bgp, + struct rfapi_descriptor *rfd, + struct rfapi_next_hop_entry *response, + int use_eth_resolution); + +extern void +rfapiRibPendingDeleteRoute ( + struct bgp *bgp, + struct rfapi_import_table *it, + afi_t afi, + struct route_node *it_node); + +extern void +rfapiRibShowResponsesSummary (void *stream); + +extern void +rfapiRibShowResponsesSummaryClear (void); + +extern void +rfapiRibShowResponses ( + void *stream, + struct prefix *pfx_match, + int show_removed); + +extern int +rfapiRibFTDFilterRecentPrefix( + struct rfapi_descriptor *rfd, + struct route_node *it_rn, /* import table node */ + struct prefix *pfx_target_original); /* query target */ + +extern void +rfapiFreeRfapiUnOptionChain (struct rfapi_un_option *p); + +extern void +rfapiFreeRfapiVnOptionChain (struct rfapi_vn_option *p); + +extern void +rfapiRibCheckCounts ( + int checkstats, /* validate rfd & global counts */ + unsigned int offset); /* number of ri's held separately */ + +/* enable for debugging; disable for performance */ +#if 0 +#define RFAPI_RIB_CHECK_COUNTS(checkstats, offset) rfapiRibCheckCounts(checkstats, offset) +#else +#define RFAPI_RIB_CHECK_COUNTS(checkstats, offset) +#endif + +#endif /* QUAGGA_HGP_RFAPI_RIB_H */ diff --git a/bgpd/rfapi/rfapi_vty.c b/bgpd/rfapi/rfapi_vty.c new file mode 100644 index 0000000000..e519ed4803 --- /dev/null +++ b/bgpd/rfapi/rfapi_vty.c @@ -0,0 +1,5016 @@ +/* + * + * Copyright 2009-2016, LabN Consulting, L.L.C. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + + +#include + +#include "lib/zebra.h" +#include "lib/prefix.h" +#include "lib/table.h" +#include "lib/vty.h" +#include "lib/memory.h" +#include "lib/routemap.h" +#include "lib/log.h" +#include "lib/linklist.h" +#include "lib/command.h" + +#include "bgpd/bgpd.h" +#include "bgpd/bgp_ecommunity.h" +#include "bgpd/bgp_attr.h" +#include "bgpd/bgp_mplsvpn.h" + +#include "bgpd/rfapi/bgp_rfapi_cfg.h" +#include "bgpd/rfapi/rfapi.h" +#include "bgpd/rfapi/rfapi_backend.h" + +#include "bgpd/bgp_route.h" +#include "bgpd/bgp_aspath.h" +#include "bgpd/bgp_community.h" +#include "bgpd/bgp_vnc_types.h" + +#include "bgpd/rfapi/rfapi_import.h" +#include "bgpd/rfapi/rfapi_private.h" +#include "bgpd/rfapi/rfapi_monitor.h" +#include "bgpd/rfapi/rfapi_rib.h" +#include "bgpd/rfapi/rfapi_vty.h" +#include "bgpd/rfapi/rfapi_ap.h" +#include "bgpd/rfapi/rfapi_encap_tlv.h" +#include "bgpd/rfapi/vnc_debug.h" + +#define DEBUG_L2_EXTRA 0 + +#define VNC_SHOW_STR "VNC information\n" + +/* format related utilies */ + + +#define FMT_MIN 60 /* seconds */ +#define FMT_HOUR (60 * FMT_MIN) +#define FMT_DAY (24 * FMT_HOUR) +#define FMT_YEAR (365 * FMT_DAY) + +char * +rfapiFormatSeconds (uint32_t seconds, char *buf, size_t len) +{ + int year, day, hour, min; + + if (seconds >= FMT_YEAR) + { + year = seconds / FMT_YEAR; + seconds -= year * FMT_YEAR; + } + else + year = 0; + + if (seconds >= FMT_DAY) + { + day = seconds / FMT_DAY; + seconds -= day * FMT_DAY; + } + else + day = 0; + + if (seconds >= FMT_HOUR) + { + hour = seconds / FMT_HOUR; + seconds -= hour * FMT_HOUR; + } + else + hour = 0; + + if (seconds >= FMT_MIN) + { + min = seconds / FMT_MIN; + seconds -= min * FMT_MIN; + } + else + min = 0; + + if (year > 0) + { + snprintf (buf, len, "%dy%dd%dh", year, day, hour); + } + else if (day > 0) + { + snprintf (buf, len, "%dd%dh%dm", day, hour, min); + } + else + { + snprintf (buf, len, "%02d:%02d:%02d", hour, min, seconds); + } + + return buf; +} + +char * +rfapiFormatAge (time_t age, char *buf, size_t len) +{ + time_t now, age_adjusted; + + now = rfapi_time (NULL); + age_adjusted = now - age; + + return rfapiFormatSeconds (age_adjusted, buf, len); +} + + +/* + * Reimplementation of quagga/lib/prefix.c function, but + * for RFAPI-style prefixes + */ +void +rfapiRprefixApplyMask (struct rfapi_ip_prefix *rprefix) +{ + uint8_t *pnt; + int index; + int offset; + + static uint8_t maskbit[] = + { 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe, 0xff }; + + switch (rprefix->prefix.addr_family) + { + case AF_INET: + index = rprefix->length / 8; + if (index < 4) + { + pnt = (uint8_t *) & rprefix->prefix.addr.v4; + offset = rprefix->length % 8; + pnt[index] &= maskbit[offset]; + index++; + while (index < 4) + pnt[index++] = 0; + } + break; + + case AF_INET6: + index = rprefix->length / 8; + if (index < 16) + { + pnt = (uint8_t *) & rprefix->prefix.addr.v6; + offset = rprefix->length % 8; + pnt[index] &= maskbit[offset]; + index++; + while (index < 16) + pnt[index++] = 0; + } + break; + + default: + assert (0); + } +} + +/* + * translate a quagga prefix into a rfapi IP address. The + * prefix is REQUIRED to be 32 bits for IPv4 and 128 bits for IPv6 + * + * RETURNS: + * + * 0 Success + * <0 Error + */ +int +rfapiQprefix2Raddr (struct prefix *qprefix, struct rfapi_ip_addr *raddr) +{ + memset (raddr, 0, sizeof (struct rfapi_ip_addr)); + raddr->addr_family = qprefix->family; + switch (qprefix->family) + { + case AF_INET: + if (qprefix->prefixlen != 32) + return -1; + raddr->addr.v4 = qprefix->u.prefix4; + break; + case AF_INET6: + if (qprefix->prefixlen != 128) + return -1; + raddr->addr.v6 = qprefix->u.prefix6; + break; + default: + return -1; + } + return 0; +} + +/* + * Translate Quagga prefix to RFAPI prefix + */ +/* rprefix->cost set to 0 */ +void +rfapiQprefix2Rprefix (struct prefix *qprefix, struct rfapi_ip_prefix *rprefix) +{ + memset (rprefix, 0, sizeof (struct rfapi_ip_prefix)); + rprefix->length = qprefix->prefixlen; + rprefix->prefix.addr_family = qprefix->family; + switch (qprefix->family) + { + case AF_INET: + rprefix->prefix.addr.v4 = qprefix->u.prefix4; + break; + case AF_INET6: + rprefix->prefix.addr.v6 = qprefix->u.prefix6; + break; + default: + assert (0); + } +} + +int +rfapiRprefix2Qprefix (struct rfapi_ip_prefix *rprefix, struct prefix *qprefix) +{ + memset (qprefix, 0, sizeof (struct prefix)); + qprefix->prefixlen = rprefix->length; + qprefix->family = rprefix->prefix.addr_family; + + switch (rprefix->prefix.addr_family) + { + case AF_INET: + qprefix->u.prefix4 = rprefix->prefix.addr.v4; + break; + case AF_INET6: + qprefix->u.prefix6 = rprefix->prefix.addr.v6; + break; + default: + return EAFNOSUPPORT; + } + return 0; +} + +/* + * returns 1 if prefixes have same addr family, prefix len, and address + * Note that host bits matter in this comparison! + * + * For paralellism with quagga/lib/prefix.c. if we need a comparison + * where host bits are ignored, call that function rfapiRprefixCmp. + */ +int +rfapiRprefixSame (struct rfapi_ip_prefix *hp1, struct rfapi_ip_prefix *hp2) +{ + if (hp1->prefix.addr_family != hp2->prefix.addr_family) + return 0; + if (hp1->length != hp2->length) + return 0; + if (hp1->prefix.addr_family == AF_INET) + if (IPV4_ADDR_SAME (&hp1->prefix.addr.v4, &hp2->prefix.addr.v4)) + return 1; + if (hp1->prefix.addr_family == AF_INET6) + if (IPV6_ADDR_SAME (&hp1->prefix.addr.v6, &hp2->prefix.addr.v6)) + return 1; + return 0; +} + +int +rfapiRaddr2Qprefix (struct rfapi_ip_addr *hia, struct prefix *pfx) +{ + memset (pfx, 0, sizeof (struct prefix)); + pfx->family = hia->addr_family; + + switch (hia->addr_family) + { + case AF_INET: + pfx->prefixlen = 32; + pfx->u.prefix4 = hia->addr.v4; + break; + case AF_INET6: + pfx->prefixlen = 128; + pfx->u.prefix6 = hia->addr.v6; + break; + default: + return EAFNOSUPPORT; + } + return 0; +} + +void +rfapiL2o2Qprefix (struct rfapi_l2address_option *l2o, struct prefix *pfx) +{ + memset (pfx, 0, sizeof (struct prefix)); + pfx->family = AF_ETHERNET; + pfx->prefixlen = 48; + pfx->u.prefix_eth = l2o->macaddr; +} + +char * +rfapiEthAddr2Str (const struct ethaddr *ea, char *buf, int bufsize) +{ + int i; + char *p = buf; + + assert (bufsize > (3 * ETHER_ADDR_LEN)); + + for (i = 0; i <= ETHER_ADDR_LEN; ++i) + { + sprintf (p, "%02x", ea->octet[i]); + if (i < (ETHER_ADDR_LEN - 1)) + *(p + 2) = ':'; + p += 3; + } + return buf; +} + +int +rfapiStr2EthAddr (const char *str, struct ethaddr *ea) +{ + unsigned int a[6]; + int i; + + if (sscanf (str, "%2x:%2x:%2x:%2x:%2x:%2x", + a + 0, a + 1, a + 2, a + 3, a + 4, a + 5) != 6) + { + + return EINVAL; + } + + for (i = 0; i < 6; ++i) + ea->octet[i] = a[i] & 0xff; + + return 0; +} + +const char * +rfapi_ntop (int af, const void *src, char *buf, socklen_t size) +{ + if (af == AF_ETHERNET) + { + return rfapiEthAddr2Str ((const struct ethaddr *) src, buf, size); + } + + return inet_ntop (af, src, buf, size); +} + +int +rfapiDebugPrintf (void *dummy, const char *format, ...) +{ + va_list args; + va_start (args, format); + vzlog (NULL, LOG_DEBUG, format, args); + va_end (args); + return 0; +} + +static int +rfapiStdioPrintf (void *stream, const char *format, ...) +{ + FILE *file = NULL; + + va_list args; + va_start (args, format); + + switch ((uintptr_t) stream) + { + case 1: + file = stdout; + break; + case 2: + file = stderr; + break; + default: + assert (0); + } + + vfprintf (file, format, args); + va_end (args); + return 0; +} + +/* Fake out for debug logging */ +static struct vty vty_dummy_zlog; +static struct vty vty_dummy_stdio; +#define HVTY_NEWLINE ((vty == &vty_dummy_zlog)? "": VTY_NEWLINE) + +static const char * +str_vty_newline (struct vty *vty) +{ + if (vty == &vty_dummy_zlog) + return ""; + return VTY_NEWLINE; +} + +int +rfapiStream2Vty ( + void *stream, /* input */ + int (**fp) (void *, const char *, ...), /* output */ + struct vty **vty, /* output */ + void **outstream, /* output */ + const char **vty_newline) /* output */ +{ + + if (!stream) + { + vty_dummy_zlog.type = VTY_SHELL; /* for VTY_NEWLINE */ + *vty = &vty_dummy_zlog; + *fp = (int (*)(void *, const char *,...)) rfapiDebugPrintf; + *outstream = NULL; + *vty_newline = str_vty_newline (*vty); + return (vzlog_test (NULL, LOG_DEBUG)); + } + + if (((uintptr_t) stream == (uintptr_t) 1) || + ((uintptr_t) stream == (uintptr_t) 2)) + { + + vty_dummy_stdio.type = VTY_SHELL; /* for VTY_NEWLINE */ + *vty = &vty_dummy_stdio; + *fp = (int (*)(void *, const char *,...)) rfapiStdioPrintf; + *outstream = stream; + *vty_newline = str_vty_newline (*vty); + return 1; + } + + if (stream) + { + *vty = stream; /* VTY_NEWLINE requires vty to be legit */ + *fp = (int (*)(void *, const char *,...)) vty_out; + *outstream = stream; + *vty_newline = str_vty_newline (*vty); + return 1; + } + + return 0; +} + +/* called from bgpd/bgp_vty.c'route_vty_out() */ +void +rfapi_vty_out_vncinfo ( + struct vty *vty, + struct prefix *p, + struct bgp_info *bi, + safi_t safi) +{ + char *s; + uint32_t lifetime; + + /* + * Print, on an indented line: + * UN address [if VPN route and VNC UN addr subtlv] + * EC list + * VNC lifetime + */ + vty_out (vty, " "); + + if (safi == SAFI_MPLS_VPN) + { + struct prefix pfx_un; + + if (!rfapiGetVncTunnelUnAddr (bi->attr, &pfx_un)) + { + char buf[BUFSIZ]; + vty_out (vty, "UN=%s", inet_ntop (pfx_un.family, + pfx_un.u.val, buf, BUFSIZ)); + } + } + + if (bi->attr && bi->attr->extra && bi->attr->extra->ecommunity) + { + s = ecommunity_ecom2str (bi->attr->extra->ecommunity, + ECOMMUNITY_FORMAT_ROUTE_MAP); + vty_out (vty, " EC{%s}", s); + XFREE (MTYPE_ECOMMUNITY_STR, s); + } + + if (bi->extra != NULL) + vty_out (vty, " label=%u", decode_label (bi->extra->tag)); + + if (rfapiGetVncLifetime (bi->attr, &lifetime)) + { + if (safi == SAFI_MPLS_VPN || safi == SAFI_ENCAP) + { + vty_out (vty, " life=none"); + } + } + else + { + vty_out (vty, " life=%d", lifetime); + } + + vty_out (vty, " type=%s, subtype=%d", + zebra_route_string (bi->type), bi->sub_type); + + vty_out (vty, "%s", HVTY_NEWLINE); +} + +void +rfapiPrintAttrPtrs (void *stream, struct attr *attr) +{ + int (*fp) (void *, const char *, ...); + struct vty *vty; + void *out; + const char *vty_newline; + + struct attr_extra *ae; + char buf[BUFSIZ]; + + if (rfapiStream2Vty (stream, &fp, &vty, &out, &vty_newline) == 0) + return; + + fp (out, "Attr[%p]:%s", attr, HVTY_NEWLINE); + if (!attr) + return; + + /* IPv4 Nexthop */ + inet_ntop (AF_INET, &attr->nexthop, buf, BUFSIZ); + fp (out, " nexthop=%s%s", buf, HVTY_NEWLINE); + + fp (out, " aspath=%p, refcnt=%d%s", attr->aspath, + (attr->aspath ? attr->aspath->refcnt : 0), HVTY_NEWLINE); + fp (out, " community=%p, refcnt=%d%s", attr->community, + (attr->community ? attr->community->refcnt : 0), HVTY_NEWLINE); + + if ((ae = attr->extra)) + { + fp (out, " ecommunity=%p, refcnt=%d%s", ae->ecommunity, + (ae->ecommunity ? ae->ecommunity->refcnt : 0), HVTY_NEWLINE); + fp (out, " cluster=%p, refcnt=%d%s", ae->cluster, + (ae->cluster ? ae->cluster->refcnt : 0), HVTY_NEWLINE); + fp (out, " transit=%p, refcnt=%d%s", ae->transit, + (ae->transit ? ae->transit->refcnt : 0), HVTY_NEWLINE); + } +} + +/* + * Print BI in an Import Table + */ +void +rfapiPrintBi (void *stream, struct bgp_info *bi) +{ + char buf[BUFSIZ]; + char *s; + + int (*fp) (void *, const char *, ...); + struct vty *vty; + void *out; + const char *vty_newline; + + char line[BUFSIZ]; + char *p = line; + int r; + int has_macaddr = 0; + struct ethaddr macaddr; + struct rfapi_l2address_option l2o_buf; + uint8_t l2hid=0; /* valid if has_macaddr */ + +#define REMAIN (BUFSIZ - (p-line)) +#define INCP {p += (r > REMAIN)? REMAIN: r;} + + if (rfapiStream2Vty (stream, &fp, &vty, &out, &vty_newline) == 0) + return; + + if (!bi) + return; + + if (CHECK_FLAG (bi->flags, BGP_INFO_REMOVED) && bi->extra + && bi->extra->vnc.import.timer) + { + struct thread *t = (struct thread *) bi->extra->vnc.import.timer; + r = snprintf (p, REMAIN, " [%4lu] ", thread_timer_remain_second (t)); + INCP; + + } + else + { + r = snprintf (p, REMAIN, " "); + INCP; + } + + if (bi->extra) + { + /* TBD This valid only for SAFI_MPLS_VPN, but not for encap */ + if (decode_rd_type(bi->extra->vnc.import.rd.val) == RD_TYPE_VNC_ETH) + { + has_macaddr = 1; + memcpy (macaddr.octet, bi->extra->vnc.import.rd.val + 2, 6); + l2hid = bi->extra->vnc.import.rd.val[1]; + } + } + + /* + * Print these items: + * type/subtype + * nexthop address + * lifetime + * RFP option sizes (they are opaque values) + * extended communities (RTs) + */ + if (bi->attr && bi->attr->extra) + { + uint32_t lifetime; + int printed_1st_gol = 0; + struct bgp_attr_encap_subtlv *pEncap; + struct prefix pfx_un; + int af = BGP_MP_NEXTHOP_FAMILY (bi->attr->extra->mp_nexthop_len); + + /* Nexthop */ + if (af == AF_INET) + { + r = snprintf (p, REMAIN, "%s", inet_ntop (AF_INET, + &bi->attr->extra->mp_nexthop_global_in, + buf, BUFSIZ)); + INCP; + } + else if (af == AF_INET6) + { + r = snprintf (p, REMAIN, "%s", inet_ntop (AF_INET6, + &bi->attr->extra->mp_nexthop_global, + buf, BUFSIZ)); + INCP; + } + else + { + r = snprintf (p, REMAIN, "?"); + INCP; + } + + /* + * VNC tunnel subtlv, if present, contains UN address + */ + if (!rfapiGetVncTunnelUnAddr (bi->attr, &pfx_un)) + { + r = snprintf (p, REMAIN, " un=%s", inet_ntop (pfx_un.family, + pfx_un.u.val, buf, + BUFSIZ)); + INCP; + + } + + /* Lifetime */ + if (rfapiGetVncLifetime (bi->attr, &lifetime)) + { + r = snprintf (p, REMAIN, " nolife"); + INCP; + } + else + { + if (lifetime == 0xffffffff) + r = snprintf (p, REMAIN, " %6s", "infini"); + else + r = snprintf (p, REMAIN, " %6u", lifetime); + INCP; + } + + /* RFP option lengths */ + for (pEncap = bi->attr->extra->vnc_subtlvs; pEncap; + pEncap = pEncap->next) + { + + if (pEncap->type == BGP_VNC_SUBTLV_TYPE_RFPOPTION) + { + if (printed_1st_gol) + { + r = snprintf (p, REMAIN, ","); + INCP; + } + else + { + r = snprintf (p, REMAIN, " "); /* leading space */ + INCP; + } + r = snprintf (p, REMAIN, "%d", pEncap->length); + INCP; + printed_1st_gol = 1; + } + } + + /* RT list */ + if (bi->attr->extra->ecommunity) + { + s = ecommunity_ecom2str (bi->attr->extra->ecommunity, + ECOMMUNITY_FORMAT_ROUTE_MAP); + r = snprintf (p, REMAIN, " %s", s); + INCP; + XFREE (MTYPE_ECOMMUNITY_STR, s); + } + + } + + r = snprintf (p, REMAIN, " bi@%p", bi); + INCP; + + r = snprintf (p, REMAIN, " p@%p", bi->peer); + INCP; + + if (CHECK_FLAG (bi->flags, BGP_INFO_REMOVED)) + { + r = snprintf (p, REMAIN, " HD=yes"); + INCP; + } + else + { + r = snprintf (p, REMAIN, " HD=no"); + INCP; + } + + if (bi->attr) + { + + if (bi->attr->extra) + { + r = snprintf (p, REMAIN, " W=%d", bi->attr->extra->weight); + INCP; + } + + if (bi->attr->flag & ATTR_FLAG_BIT (BGP_ATTR_LOCAL_PREF)) + { + r = snprintf (p, REMAIN, " LP=%d", bi->attr->local_pref); + INCP; + } + else + { + r = snprintf (p, REMAIN, " LP=unset"); + INCP; + } + } + + r = + snprintf (p, REMAIN, " %c:%u", zebra_route_char (bi->type), bi->sub_type); + INCP; + + fp (out, "%s%s", line, HVTY_NEWLINE); + + if (has_macaddr) + { + fp (out, " RD HID=%d ETH=%02x:%02x:%02x:%02x:%02x:%02x%s", + l2hid, + macaddr.octet[0], + macaddr.octet[1], + macaddr.octet[2], + macaddr.octet[3], macaddr.octet[4], macaddr.octet[5], HVTY_NEWLINE); + } + + if (!rfapiGetL2o (bi->attr, &l2o_buf)) + { + fp (out, + " L2O ETH=%02x:%02x:%02x:%02x:%02x:%02x LBL=%d LNI=%d LHI=%hhu%s", + l2o_buf.macaddr.octet[0], l2o_buf.macaddr.octet[1], + l2o_buf.macaddr.octet[2], l2o_buf.macaddr.octet[3], + l2o_buf.macaddr.octet[4], l2o_buf.macaddr.octet[5], l2o_buf.label, + l2o_buf.logical_net_id, l2o_buf.local_nve_id, HVTY_NEWLINE); + } + if (bi->extra && bi->extra->vnc.import.aux_prefix.family) + { + char buf[BUFSIZ]; + const char *sp; + + sp = rfapi_ntop (bi->extra->vnc.import.aux_prefix.family, + &bi->extra->vnc.import.aux_prefix.u.prefix, + buf, BUFSIZ); + buf[BUFSIZ - 1] = 0; + if (sp) + { + fp (out, " IP: %s%s", sp, HVTY_NEWLINE); + } + } + { + struct rfapi_un_option *uo = rfapi_encap_tlv_to_un_option (bi->attr); + if (uo) + { + rfapi_print_tunneltype_option (stream, 8, &uo->v.tunnel); + rfapi_un_options_free (uo); + } + } +} + +char * +rfapiMonitorVpn2Str (struct rfapi_monitor_vpn *m, char *buf, int size) +{ + char buf_pfx[BUFSIZ]; + char buf_vn[BUFSIZ]; + char buf_un[BUFSIZ]; + int rc; + + rfapiRfapiIpAddr2Str (&m->rfd->un_addr, buf_vn, BUFSIZ); + rfapiRfapiIpAddr2Str (&m->rfd->vn_addr, buf_un, BUFSIZ); + + rc = snprintf (buf, size, + "m=%p, next=%p, rfd=%p(vn=%s un=%s), p=%s/%d, node=%p", + m, m->next, m->rfd, buf_vn, buf_un, + inet_ntop (m->p.family, &m->p.u.prefix, buf_pfx, BUFSIZ), + m->p.prefixlen, m->node); + buf[size - 1] = 0; + if (rc >= size) + return NULL; + return buf; +} + +static void +rfapiDebugPrintMonitorVpn (void *stream, struct rfapi_monitor_vpn *m) +{ + char buf[BUFSIZ]; + + int (*fp) (void *, const char *, ...); + struct vty *vty; + void *out; + const char *vty_newline; + + if (rfapiStream2Vty (stream, &fp, &vty, &out, &vty_newline) == 0) + return; + + rfapiMonitorVpn2Str (m, buf, BUFSIZ); + fp (out, " Mon %s%s", buf, HVTY_NEWLINE); +} + +static void +rfapiDebugPrintMonitorEncap (void *stream, struct rfapi_monitor_encap *m) +{ + int (*fp) (void *, const char *, ...); + struct vty *vty; + void *out = NULL; + const char *vty_newline; + + if (rfapiStream2Vty (stream, &fp, &vty, &out, &vty_newline) == 0) + return; + + fp (out, " Mon m=%p, next=%p, node=%p, bi=%p%s", + m, m->next, m->node, m->bi, HVTY_NEWLINE); +} + +void +rfapiShowItNode (void *stream, struct route_node *rn) +{ + struct bgp_info *bi; + char buf[BUFSIZ]; + + int (*fp) (void *, const char *, ...); + struct vty *vty; + void *out; + const char *vty_newline; + + if (rfapiStream2Vty (stream, &fp, &vty, &out, &vty_newline) == 0) + return; + + fp (out, "%s/%d @%p #%d%s", + rfapi_ntop (rn->p.family, &rn->p.u.prefix, buf, BUFSIZ), + rn->p.prefixlen, rn, rn->lock, HVTY_NEWLINE); + + for (bi = rn->info; bi; bi = bi->next) + { + rfapiPrintBi (stream, bi); + } + + /* doesn't show montors */ +} + +void +rfapiShowImportTable ( + void *stream, + const char *label, + struct route_table *rt, + int isvpn) +{ + struct route_node *rn; + char buf[BUFSIZ]; + + int (*fp) (void *, const char *, ...); + struct vty *vty; + void *out; + const char *vty_newline; + + if (rfapiStream2Vty (stream, &fp, &vty, &out, &vty_newline) == 0) + return; + + fp (out, "Import Table [%s]%s", label, HVTY_NEWLINE); + + for (rn = route_top (rt); rn; rn = route_next (rn)) + { + struct bgp_info *bi; + + if (rn->p.family == AF_ETHERNET) + { + rfapiEthAddr2Str (&rn->p.u.prefix_eth, buf, BUFSIZ); + } + else + { + inet_ntop (rn->p.family, &rn->p.u.prefix, buf, BUFSIZ); + } + + fp (out, "%s/%d @%p #%d%s", buf, rn->p.prefixlen, rn, rn->lock - 1, /* account for loop iterator locking */ + HVTY_NEWLINE); + + for (bi = rn->info; bi; bi = bi->next) + { + rfapiPrintBi (stream, bi); + } + + if (isvpn) + { + struct rfapi_monitor_vpn *m; + for (m = RFAPI_MONITOR_VPN (rn); m; m = m->next) + { + rfapiDebugPrintMonitorVpn (stream, m); + } + } + else + { + struct rfapi_monitor_encap *m; + for (m = RFAPI_MONITOR_ENCAP (rn); m; m = m->next) + { + rfapiDebugPrintMonitorEncap (stream, m); + } + } + } +} + +int +rfapiShowVncQueries (void *stream, struct prefix *pfx_match) +{ + struct bgp *bgp; + struct rfapi *h; + struct listnode *node; + struct rfapi_descriptor *rfd; + + int (*fp) (void *, const char *, ...); + struct vty *vty; + void *out; + const char *vty_newline; + + int printedheader = 0; + + int nves_total = 0; + int nves_with_queries = 0; + int nves_displayed = 0; + + int queries_total = 0; + int queries_displayed = 0; + + if (rfapiStream2Vty (stream, &fp, &vty, &out, &vty_newline) == 0) + return CMD_WARNING; + + bgp = bgp_get_default (); /* assume 1 instance for now */ + if (!bgp) + { + vty_out (vty, "No BGP instance%s", VTY_NEWLINE); + return CMD_WARNING; + } + + h = bgp->rfapi; + if (!h) + { + vty_out (vty, "No RFAPI instance%s", VTY_NEWLINE); + return CMD_WARNING; + } + + for (ALL_LIST_ELEMENTS_RO (&h->descriptors, node, rfd)) + { + + struct route_node *rn; + int printedquerier = 0; + + + ++nves_total; + + if (rfd->mon || (rfd->mon_eth && skiplist_count (rfd->mon_eth))) + { + ++nves_with_queries; + } + else + { + continue; + } + + /* + * IP Queries + */ + if (rfd->mon) + { + for (rn = route_top (rfd->mon); rn; rn = route_next (rn)) + { + struct rfapi_monitor_vpn *m; + char buf_remain[BUFSIZ]; + char buf_pfx[BUFSIZ]; + + if (!rn->info) + continue; + + m = rn->info; + + ++queries_total; + + if (pfx_match && !prefix_match (pfx_match, &rn->p) && + !prefix_match (&rn->p, pfx_match)) + continue; + + ++queries_displayed; + + if (!printedheader) + { + ++printedheader; + fp (out, "%s", VTY_NEWLINE); + fp (out, "%-15s %-15s %-15s %-10s%s", + "VN Address", "UN Address", + "Target", "Remaining", VTY_NEWLINE); + } + + if (!printedquerier) + { + char buf_vn[BUFSIZ]; + char buf_un[BUFSIZ]; + + rfapiRfapiIpAddr2Str (&rfd->un_addr, buf_un, BUFSIZ); + rfapiRfapiIpAddr2Str (&rfd->vn_addr, buf_vn, BUFSIZ); + + fp (out, "%-15s %-15s", buf_vn, buf_un); + printedquerier = 1; + + ++nves_displayed; + } + else + fp (out, "%-15s %-15s", "", ""); + buf_remain[0] = 0; + if (m->timer) + { + rfapiFormatSeconds (thread_timer_remain_second (m->timer), + buf_remain, BUFSIZ); + } + fp (out, " %-15s %-10s%s", + inet_ntop (m->p.family, &m->p.u.prefix, buf_pfx, BUFSIZ), + buf_remain, VTY_NEWLINE); + } + } + + /* + * Ethernet Queries + */ + if (rfd->mon_eth && skiplist_count (rfd->mon_eth)) + { + + int rc; + void *cursor; + struct rfapi_monitor_eth *mon_eth; + + for (cursor = NULL, + rc = + skiplist_next (rfd->mon_eth, NULL, (void **) &mon_eth, + &cursor); rc == 0; + rc = + skiplist_next (rfd->mon_eth, NULL, (void **) &mon_eth, + &cursor)) + { + + char buf_remain[BUFSIZ]; + char buf_pfx[BUFSIZ]; + struct prefix pfx_mac; + + ++queries_total; + + zlog_debug ("%s: checking rfd=%p mon_eth=%p", __func__, rfd, + mon_eth); + + memset ((void *) &pfx_mac, 0, sizeof (struct prefix)); + pfx_mac.family = AF_ETHERNET; + pfx_mac.prefixlen = 48; + pfx_mac.u.prefix_eth = mon_eth->macaddr; + + if (pfx_match && !prefix_match (pfx_match, &pfx_mac) && + !prefix_match (&pfx_mac, pfx_match)) + continue; + + ++queries_displayed; + + if (!printedheader) + { + ++printedheader; + fp (out, "%s", VTY_NEWLINE); + fp (out, "%-15s %-15s %-17s %10s %-10s%s", + "VN Address", "UN Address", + "Target", "LNI", "Remaining", VTY_NEWLINE); + } + + if (!printedquerier) + { + char buf_vn[BUFSIZ]; + char buf_un[BUFSIZ]; + + rfapiRfapiIpAddr2Str (&rfd->un_addr, buf_un, BUFSIZ); + rfapiRfapiIpAddr2Str (&rfd->vn_addr, buf_vn, BUFSIZ); + + fp (out, "%-15s %-15s", buf_vn, buf_un); + printedquerier = 1; + + ++nves_displayed; + } + else + fp (out, "%-15s %-15s", "", ""); + buf_remain[0] = 0; + if (mon_eth->timer) + { + rfapiFormatSeconds (thread_timer_remain_second + (mon_eth->timer), buf_remain, BUFSIZ); + } + fp (out, " %-17s %10d %-10s%s", + rfapi_ntop (pfx_mac.family, &pfx_mac.u.prefix, buf_pfx, + BUFSIZ), mon_eth->logical_net_id, buf_remain, + VTY_NEWLINE); + } + } + } + + if (queries_total) + { + fp (out, "%s", VTY_NEWLINE); + fp (out, "Displayed %d out of %d total queries%s", + queries_displayed, queries_total, VTY_NEWLINE); + } + return CMD_SUCCESS; +} + +static int +rfapiPrintRemoteRegBi ( + struct bgp *bgp, + void *stream, + struct route_node *rn, + struct bgp_info *bi) +{ + int (*fp) (void *, const char *, ...); + struct vty *vty; + void *out; + const char *vty_newline; + struct prefix pfx_un; + struct prefix pfx_vn; + uint8_t cost; + uint32_t lifetime; + bgp_encap_types tun_type; + + char buf_pfx[BUFSIZ]; + char buf_ntop[BUFSIZ]; + char buf_un[BUFSIZ]; + char buf_vn[BUFSIZ]; + char buf_lifetime[BUFSIZ]; + int nlines = 0; + + if (!stream) + return 0; /* for debug log, print into buf & call output once */ + + if (rfapiStream2Vty (stream, &fp, &vty, &out, &vty_newline) == 0) + return 0; + + /* + * Prefix + */ + buf_pfx[0] = 0; + snprintf (buf_pfx, BUFSIZ, "%s/%d", + rfapi_ntop (rn->p.family, &rn->p.u.prefix, buf_ntop, BUFSIZ), + rn->p.prefixlen); + buf_pfx[BUFSIZ - 1] = 0; + nlines++; + + /* + * UN addr + */ + buf_un[0] = 0; + if (!rfapiGetUnAddrOfVpnBi (bi, &pfx_un)) + { + snprintf (buf_un, BUFSIZ, "%s", + inet_ntop (pfx_un.family, &pfx_un.u.prefix, buf_ntop, + BUFSIZ)); + } + buf_un[BUFSIZ - 1] = 0; + + rfapiGetTunnelType(bi->attr,&tun_type); + /* + * VN addr + */ + buf_vn[0] = 0; + if (tun_type == BGP_ENCAP_TYPE_MPLS) + { + /* MPLS carries un in nrli next hop (same as vn for IP tunnels) */ + if (bi->extra) + { + u_int32_t l = decode_label (bi->extra->tag); + snprintf (buf_vn, BUFSIZ, "Label: %d", l); + } + else /* should never happen */ + { + snprintf (buf_vn, BUFSIZ, "Label: N/A"); + } + } + else + { + rfapiNexthop2Prefix (bi->attr, &pfx_vn); + snprintf (buf_vn, BUFSIZ, "%s", + inet_ntop (pfx_vn.family, &pfx_vn.u.prefix, buf_ntop, BUFSIZ)); + } + buf_vn[BUFSIZ - 1] = 0; + + + /* + * Cost is encoded in local_pref as (255-cost) + * See rfapi_import.c'rfapiRouteInfo2NextHopEntry() for conversion + * back to cost. + */ + if (bi->attr) + { + uint32_t local_pref; + if (bi->attr->flag & ATTR_FLAG_BIT (BGP_ATTR_LOCAL_PREF)) + local_pref = bi->attr->local_pref; + else + local_pref = 0; + cost = (local_pref > 255) ? 0 : 255 - local_pref; + } + else + { + cost = 0; + } + + fp (out, "%-20s ", buf_pfx); + fp (out, "%-15s ", buf_vn); + fp (out, "%-15s ", buf_un); + fp (out, "%-4d ", cost); + + /* Lifetime */ + /* NB rfapiGetVncLifetime sets infinite value when returning !0 */ + if (rfapiGetVncLifetime (bi->attr, &lifetime) || + (lifetime == RFAPI_INFINITE_LIFETIME)) + { + + fp (out, "%-10s ", "infinite"); + } + else + { + time_t t_lifetime = lifetime; + rfapiFormatSeconds (t_lifetime, buf_lifetime, BUFSIZ); + fp (out, "%-10s ", buf_lifetime); + } + + if (CHECK_FLAG (bi->flags, BGP_INFO_REMOVED) && + bi->extra && bi->extra->vnc.import.timer) + { + + uint32_t remaining; + time_t age; + char buf_age[BUFSIZ]; + + struct thread *t = (struct thread *) bi->extra->vnc.import.timer; + remaining = thread_timer_remain_second (t); + +#if RFAPI_REGISTRATIONS_REPORT_AGE + /* + * Calculate when the timer started. Doing so here saves + * us a timestamp field in "struct bgp_info". + * + * See rfapi_import.c'rfapiBiStartWithdrawTimer() for the + * original calculation. + */ + age = rfapiGetHolddownFromLifetime (lifetime, factor) - remaining; +#else /* report remaining time */ + age = remaining; +#endif + rfapiFormatSeconds (age, buf_age, BUFSIZ); + + fp (out, "%-10s ", buf_age); + + } + else if (RFAPI_LOCAL_BI (bi)) + { + + char buf_age[BUFSIZ]; + + if (bi && bi->extra && bi->extra->vnc.import.create_time) + { + rfapiFormatAge (bi->extra->vnc.import.create_time, buf_age, BUFSIZ); + } + else + { + buf_age[0] = '?'; + buf_age[1] = 0; + } + fp (out, "%-10s ", buf_age); + } + fp (out, "%s", HVTY_NEWLINE); + + if (rn->p.family == AF_ETHERNET) + { + /* + * If there is a corresponding IP address && != VN address, + * print that on the next line + */ + + if (bi && bi->extra && bi->extra->vnc.import.aux_prefix.family) + { + const char *sp; + + sp = rfapi_ntop (bi->extra->vnc.import.aux_prefix.family, + &bi->extra->vnc.import.aux_prefix.u.prefix, + buf_ntop, BUFSIZ); + buf_ntop[BUFSIZ - 1] = 0; + + if (sp && strcmp (buf_vn, sp) != 0) + { + fp (out, " IP: %s", sp); + if (nlines == 1) + nlines++; + } + } + } + if (tun_type != BGP_ENCAP_TYPE_MPLS && bi->extra) + { + u_int32_t l = decode_label (bi->extra->tag); + if (!MPLS_LABEL_IS_NULL (l)) + { + fp (out, " Label: %d", l); + if (nlines == 1) + nlines++; + } + } + if (nlines > 1) + fp (out, "%s", HVTY_NEWLINE); + + return 1; +} + +static int +rfapiShowRemoteRegistrationsIt ( + struct bgp *bgp, + void *stream, + struct rfapi_import_table *it, + struct prefix *prefix_only, + int show_expiring, /* either/or */ + int show_local, + int show_remote, + int show_imported, /* either/or */ + uint32_t *pLni) /* AFI_ETHER only */ +{ + afi_t afi; + int printed_rtlist_hdr = 0; + + int (*fp) (void *, const char *, ...); + struct vty *vty; + void *out; + const char *vty_newline; + int total = 0; + int printed = 0; + + if (rfapiStream2Vty (stream, &fp, &vty, &out, &vty_newline) == 0) + return printed; + + for (afi = AFI_IP; afi < AFI_MAX; ++afi) + { + + struct route_node *rn; + + if (!it->imported_vpn[afi]) + continue; + + for (rn = route_top (it->imported_vpn[afi]); rn; rn = route_next (rn)) + { + + struct bgp_info *bi; + int count_only; + + /* allow for wider or more narrow mask from user */ + if (prefix_only && + !prefix_match (prefix_only, &rn->p) && + !prefix_match (&rn->p, prefix_only)) + count_only = 1; + else + count_only = 0; + + for (bi = rn->info; bi; bi = bi->next) + { + + if (!show_local && RFAPI_LOCAL_BI (bi)) + { + + /* local route from RFP */ + continue; + } + + if (!show_remote && !RFAPI_LOCAL_BI (bi)) + { + + /* remote route */ + continue; + } + + if (show_expiring && !CHECK_FLAG (bi->flags, BGP_INFO_REMOVED)) + continue; + + if (!show_expiring && CHECK_FLAG (bi->flags, BGP_INFO_REMOVED)) + continue; + + if (bi->type == ZEBRA_ROUTE_BGP_DIRECT || + bi->type == ZEBRA_ROUTE_BGP_DIRECT_EXT) + { + if (!show_imported) + continue; + } + else + { + if (show_imported) + continue; + } + + total++; + if (count_only == 1) + continue; + if (!printed_rtlist_hdr) + { + const char *agetype = ""; + char *s; + const char *type = ""; + if (show_imported) + { + type = "Imported"; + } + else + { + if (show_expiring) + { + type = "Holddown"; + } + else + { + if (RFAPI_LOCAL_BI (bi)) + { + type = "Local"; + } + else + { + type = "Remote"; + } + } + } + + s = ecommunity_ecom2str (it->rt_import_list, + ECOMMUNITY_FORMAT_ROUTE_MAP); + + if (pLni) + { + fp (out, "%s[%s] L2VPN Network 0x%x (%u) RT={%s}%s", + HVTY_NEWLINE, type, *pLni, (*pLni & 0xfff), s, + HVTY_NEWLINE); + } + else + { + fp (out, "%s[%s] Prefix RT={%s}%s", + HVTY_NEWLINE, type, s, HVTY_NEWLINE); + } + XFREE (MTYPE_ECOMMUNITY_STR, s); + + if (show_expiring) + { +#if RFAPI_REGISTRATIONS_REPORT_AGE + agetype = "Age"; +#else + agetype = "Remaining"; +#endif + } + else if (show_local) + { + agetype = "Age"; + } + + printed_rtlist_hdr = 1; + + fp (out, "%-20s %-15s %-15s %4s %-10s %-10s%s", + (pLni ? "L2 Address/IP" : "Prefix"), + "VN Address", "UN Address", "Cost", + "Lifetime", agetype, HVTY_NEWLINE); + } + printed += rfapiPrintRemoteRegBi (bgp, stream, rn, bi); + } + } + } + + if (printed > 0) + { + + const char *type = "prefixes"; + + if (show_imported) + { + type = "imported prefixes"; + } + else + { + if (show_expiring) + { + type = "prefixes in holddown"; + } + else + { + if (show_local && !show_remote) + { + type = "locally registered prefixes"; + } + else if (!show_local && show_remote) + { + type = "remotely registered prefixes"; + } + } + } + + fp (out, "Displayed %d out of %d %s%s", + printed, total, type, HVTY_NEWLINE); + } + return printed; +} + + + +/* + * rfapiShowRemoteRegistrations + * + * Similar to rfapiShowImportTable() above. This function + * is mean to produce the "remote" portion of the output + * of "show vnc registrations". + */ +int +rfapiShowRemoteRegistrations ( + void *stream, + struct prefix *prefix_only, + int show_expiring, + int show_local, + int show_remote, + int show_imported) +{ + struct bgp *bgp; + struct rfapi *h; + struct rfapi_import_table *it; + int printed = 0; + + bgp = bgp_get_default (); + if (!bgp) + { + return printed; + } + + h = bgp->rfapi; + if (!h) + { + return printed; + } + + for (it = h->imports; it; it = it->next) + { + printed += + rfapiShowRemoteRegistrationsIt (bgp, stream, it, prefix_only, + show_expiring, show_local, + show_remote, show_imported, NULL); + } + + if (h->import_mac) + { + void *cursor = NULL; + int rc; + uintptr_t lni_as_ptr; + uint32_t lni; + uint32_t *pLni; + + for (rc = + skiplist_next (h->import_mac, (void **) &lni_as_ptr, (void **) &it, + &cursor); !rc; + rc = + skiplist_next (h->import_mac, (void **) &lni_as_ptr, (void **) &it, + &cursor)) + { + pLni = NULL; + if ((lni_as_ptr & 0xffffffff) == lni_as_ptr) + { + lni = (uint32_t) (lni_as_ptr & 0xffffffff); + pLni = &lni; + } + + printed += + rfapiShowRemoteRegistrationsIt (bgp, stream, it, prefix_only, + show_expiring, show_local, + show_remote, show_imported, pLni); + } + } + + return printed; +} + +/*------------------------------------------ + * rfapiRfapiIpAddr2Str + * + * UI helper: generate string from rfapi_ip_addr + * + * input: + * a IP v4/v6 address + * + * output + * buf put string here + * bufsize max space to write + * + * return value: + * NULL conversion failed + * non-NULL pointer to buf + --------------------------------------------*/ +const char * +rfapiRfapiIpAddr2Str (struct rfapi_ip_addr *a, char *buf, int bufsize) +{ + const char *rc = NULL; + + switch (a->addr_family) + { + case AF_INET: + rc = inet_ntop (a->addr_family, &a->addr.v4, buf, bufsize); + break; + case AF_INET6: + rc = inet_ntop (a->addr_family, &a->addr.v6, buf, bufsize); + break; + } + return rc; +} + +void +rfapiPrintRfapiIpAddr (void *stream, struct rfapi_ip_addr *a) +{ + char buf[BUFSIZ]; + const char *rc = NULL; + + int (*fp) (void *, const char *, ...); + struct vty *vty; + void *out = NULL; + const char *vty_newline; + + if (rfapiStream2Vty (stream, &fp, &vty, &out, &vty_newline) == 0) + return; + + rc = rfapiRfapiIpAddr2Str (a, buf, BUFSIZ); + + if (rc) + fp (out, "%s", buf); +} + +const char * +rfapiRfapiIpPrefix2Str (struct rfapi_ip_prefix *p, char *buf, int bufsize) +{ + struct rfapi_ip_addr *a = &p->prefix; + const char *rc = NULL; + + switch (a->addr_family) + { + case AF_INET: + rc = inet_ntop (a->addr_family, &a->addr.v4, buf, bufsize); + break; + case AF_INET6: + rc = inet_ntop (a->addr_family, &a->addr.v6, buf, bufsize); + break; + } + + if (rc) + { + int alen = strlen (buf); + int remaining = bufsize - alen - 1; + int slen; + + if (remaining > 0) + { + slen = snprintf (buf + alen, remaining, "/%u", p->length); + if (slen < remaining) /* see man page for snprintf(3) */ + return rc; + } + } + + return NULL; +} + +void +rfapiPrintRfapiIpPrefix (void *stream, struct rfapi_ip_prefix *p) +{ + char buf[BUFSIZ]; + const char *rc; + + int (*fp) (void *, const char *, ...); + struct vty *vty; + void *out = NULL; + const char *vty_newline; + + if (rfapiStream2Vty (stream, &fp, &vty, &out, &vty_newline) == 0) + return; + + rc = rfapiRfapiIpPrefix2Str (p, buf, BUFSIZ); + + if (rc) + fp (out, "%s:%u", buf, p->cost); + else + fp (out, "?/?:?"); +} + +void +rfapiPrintRd (struct vty *vty, struct prefix_rd *prd) +{ + char buf[BUFSIZ]; + + buf[0] = 0; + prefix_rd2str (prd, buf, BUFSIZ); + buf[BUFSIZ - 1] = 0; + vty_out (vty, "%s", buf); +} + +void +rfapiPrintAdvertisedInfo ( + struct vty *vty, + struct rfapi_descriptor *rfd, + safi_t safi, + struct prefix *p) +{ + afi_t afi; /* of the VN address */ + struct bgp_node *bn; + struct bgp_info *bi; + uint8_t type = ZEBRA_ROUTE_BGP; + struct bgp *bgp; + int printed = 0; + struct prefix_rd prd0; + struct prefix_rd *prd; + + /* + * Find the bgp_info in the RIB corresponding to this + * prefix and rfd + */ + + afi = family2afi (p->family); + assert (afi == AFI_IP || afi == AFI_IP6); + + bgp = bgp_get_default (); /* assume 1 instance for now */ + assert (bgp); + + if (safi == SAFI_ENCAP) + { + memset (&prd0, 0, sizeof (prd0)); + prd0.family = AF_UNSPEC; + prd0.prefixlen = 64; + prd = &prd0; + } + else + { + prd = &rfd->rd; + } + bn = bgp_afi_node_get (bgp->rib[afi][safi], afi, safi, p, prd); + + vty_out (vty, " bn=%p%s", bn, HVTY_NEWLINE); + + for (bi = bn->info; bi; bi = bi->next) + { + if (bi->peer == rfd->peer && + bi->type == type && + bi->sub_type == BGP_ROUTE_RFP && + bi->extra && bi->extra->vnc.export.rfapi_handle == (void *) rfd) + { + + rfapiPrintBi (vty, bi); + printed = 1; + } + } + + if (!printed) + { + vty_out (vty, " --?--%s", HVTY_NEWLINE); + return; + } + +} + +void +rfapiPrintDescriptor (struct vty *vty, struct rfapi_descriptor *rfd) +{ + /* pHD un-addr vn-addr pCB cookie rd lifetime */ + /* RT export list */ + /* RT import list */ + /* list of advertised prefixes */ + /* dump import table */ + + char *s; + void *cursor; + int rc; + afi_t afi; + struct rfapi_adb *adb; + char buf[BUFSIZ]; + + vty_out (vty, "%-10p ", rfd); + rfapiPrintRfapiIpAddr (vty, &rfd->un_addr); + vty_out (vty, " "); + rfapiPrintRfapiIpAddr (vty, &rfd->vn_addr); + vty_out (vty, " %p %p ", rfd->response_cb, rfd->cookie); + rfapiPrintRd (vty, &rfd->rd); + vty_out (vty, " %d", rfd->response_lifetime); + vty_out (vty, " %s", (rfd->rfg ? rfd->rfg->name : "")); + vty_out (vty, "%s", HVTY_NEWLINE); + + vty_out (vty, " Peer %p #%d%s", rfd->peer, rfd->peer->lock, HVTY_NEWLINE); + + /* export RT list */ + if (rfd->rt_export_list) + { + s = + ecommunity_ecom2str (rfd->rt_export_list, + ECOMMUNITY_FORMAT_ROUTE_MAP); + vty_out (vty, " Export %s%s", s, HVTY_NEWLINE); + XFREE (MTYPE_ECOMMUNITY_STR, s); + } + else + { + vty_out (vty, " Export (nil)%s", HVTY_NEWLINE); + } + + /* import RT list */ + if (rfd->import_table) + { + s = ecommunity_ecom2str (rfd->import_table->rt_import_list, + ECOMMUNITY_FORMAT_ROUTE_MAP); + vty_out (vty, " Import %s%s", s, HVTY_NEWLINE); + XFREE (MTYPE_ECOMMUNITY_STR, s); + } + else + { + vty_out (vty, " Import (nil)%s", HVTY_NEWLINE); + } + + for (afi = AFI_IP; afi < AFI_MAX; ++afi) + { + u_char family; + + family = afi2family (afi); + if (!family) + continue; + + cursor = NULL; + for (rc = + skiplist_next (rfd->advertised.ipN_by_prefix, NULL, (void **) &adb, + &cursor); rc == 0; + rc = + skiplist_next (rfd->advertised.ipN_by_prefix, NULL, (void **) &adb, + &cursor)) + { + + /* group like family prefixes together in output */ + if (family != adb->prefix_ip.family) + continue; + + prefix2str (&adb->prefix_ip, buf, BUFSIZ); + buf[BUFSIZ - 1] = 0; /* guarantee NUL-terminated */ + + vty_out (vty, " Adv Pfx: %s%s", buf, HVTY_NEWLINE); + rfapiPrintAdvertisedInfo (vty, rfd, SAFI_MPLS_VPN, &adb->prefix_ip); + } + } + for (rc = + skiplist_next (rfd->advertised.ip0_by_ether, NULL, (void **) &adb, + &cursor); rc == 0; + rc = + skiplist_next (rfd->advertised.ip0_by_ether, NULL, (void **) &adb, + &cursor)) + { + + prefix2str (&adb->prefix_eth, buf, BUFSIZ); + buf[BUFSIZ - 1] = 0; /* guarantee NUL-terminated */ + + vty_out (vty, " Adv Pfx: %s%s", buf, HVTY_NEWLINE); + + /* TBD update the following function to print ethernet info */ + /* Also need to pass/use rd */ + rfapiPrintAdvertisedInfo (vty, rfd, SAFI_MPLS_VPN, &adb->prefix_ip); + } + vty_out (vty, "%s", HVTY_NEWLINE); +} + +/* + * test scripts rely on first line for each nve starting in 1st column, + * leading whitespace for additional detail of that nve + */ +void +rfapiPrintMatchingDescriptors (struct vty *vty, + struct prefix *vn_prefix, + struct prefix *un_prefix) +{ + struct bgp *bgp; + struct rfapi *h; + struct listnode *ln; + struct rfapi_descriptor *rfd; + int printed = 0; + + bgp = bgp_get_default (); /* assume 1 instance for now */ + if (!bgp) + return; + + h = bgp->rfapi; + assert (h); + + for (ln = listhead (&h->descriptors); ln; ln = listnextnode (ln)) + { + rfd = listgetdata (ln); + + struct prefix pfx; + + if (vn_prefix) + { + assert (!rfapiRaddr2Qprefix (&rfd->vn_addr, &pfx)); + if (!prefix_match (vn_prefix, &pfx)) + continue; + } + + if (un_prefix) + { + assert (!rfapiRaddr2Qprefix (&rfd->un_addr, &pfx)); + if (!prefix_match (un_prefix, &pfx)) + continue; + } + + if (!printed) + { + /* print column header */ + vty_out (vty, + "%s %s %s %s %s %s %s %s%s", + "descriptor", "un-addr", "vn-addr", "callback", "cookie", + "RD", "lifetime", "group", HVTY_NEWLINE); + } + rfapiPrintDescriptor (vty, rfd); + printed = 1; + } +} + + +/* + * Parse an address and put into a struct prefix + */ +int +rfapiCliGetPrefixAddr (struct vty *vty, const char *str, struct prefix *p) +{ + if (!str2prefix (str, p)) + { + vty_out (vty, "Malformed address \"%s\"%s", str, HVTY_NEWLINE); + return CMD_WARNING; + } + switch (p->family) + { + case AF_INET: + if (p->prefixlen != 32) + { + vty_out (vty, "Not a host address: \"%s\"%s", str, HVTY_NEWLINE); + return CMD_WARNING; + } + break; + case AF_INET6: + if (p->prefixlen != 128) + { + vty_out (vty, "Not a host address: \"%s\"%s", str, HVTY_NEWLINE); + return CMD_WARNING; + } + break; + default: + vty_out (vty, "Invalid address \"%s\"%s", str, HVTY_NEWLINE); + return CMD_WARNING; + } + return 0; +} + +int +rfapiCliGetRfapiIpAddr ( + struct vty *vty, + const char *str, + struct rfapi_ip_addr *hai) +{ + struct prefix pfx; + int rc; + + rc = rfapiCliGetPrefixAddr (vty, str, &pfx); + if (rc) + return rc; + + hai->addr_family = pfx.family; + if (pfx.family == AF_INET) + hai->addr.v4 = pfx.u.prefix4; + else + hai->addr.v6 = pfx.u.prefix6; + + return 0; +} + +/* + * Note: this function does not flush vty output, so if it is called + * with a stream pointing to a vty, the user will have to type something + * before the callback output shows up + */ +void +rfapiPrintNhl (void *stream, struct rfapi_next_hop_entry *next_hops) +{ + struct rfapi_next_hop_entry *nh; + int count; + + int (*fp) (void *, const char *, ...); + struct vty *vty; + void *out; + const char *vty_newline; + +#define REMAIN (BUFSIZ - (p-line)) +#define INCP {p += (r > REMAIN)? REMAIN: r;} + + + if (rfapiStream2Vty (stream, &fp, &vty, &out, &vty_newline) == 0) + return; + + for (nh = next_hops, count = 1; nh; nh = nh->next, ++count) + { + + char line[BUFSIZ]; + char *p = line; + int r; + + r = snprintf (p, REMAIN, "%3d pfx=", count); + INCP; + + if (rfapiRfapiIpPrefix2Str (&nh->prefix, p, REMAIN)) + { + /* it fit, so count length */ + r = strlen (p); + } + else + { + /* didn't fit */ + goto truncate; + } + INCP; + + r = snprintf (p, REMAIN, ", un="); + INCP; + + if (rfapiRfapiIpAddr2Str (&nh->un_address, p, REMAIN)) + { + /* it fit, so count length */ + r = strlen (p); + } + else + { + /* didn't fit */ + goto truncate; + } + INCP; + + r = snprintf (p, REMAIN, ", vn="); + INCP; + + if (rfapiRfapiIpAddr2Str (&nh->vn_address, p, REMAIN)) + { + /* it fit, so count length */ + r = strlen (p); + } + else + { + /* didn't fit */ + goto truncate; + } + INCP; + + truncate: + line[BUFSIZ - 1] = 0; + fp (out, "%s%s", line, HVTY_NEWLINE); + + /* + * options + */ + if (nh->vn_options) + { + struct rfapi_vn_option *vo; + char offset[] = " "; + + for (vo = nh->vn_options; vo; vo = vo->next) + { + char pbuf[100]; + + switch (vo->type) + { + case RFAPI_VN_OPTION_TYPE_L2ADDR: + rfapiEthAddr2Str (&vo->v.l2addr.macaddr, pbuf, + sizeof (pbuf)); + fp (out, "%sL2 %s LBL=0x%06x NETID=0x%06x NVEID=%d%s", + offset, pbuf, (vo->v.l2addr.label & 0x00ffffff), + (vo->v.l2addr.logical_net_id & 0x00ffffff), + vo->v.l2addr.local_nve_id, HVTY_NEWLINE); + break; + + case RFAPI_VN_OPTION_TYPE_LOCAL_NEXTHOP: + prefix2str (&vo->v.local_nexthop.addr, pbuf, sizeof (pbuf)); + fp (out, "%sLNH %s cost=%d%s", + offset, pbuf, vo->v.local_nexthop.cost, HVTY_NEWLINE); + break; + + default: + fp (out, "%svn option type %d (unknown)%s", + offset, vo->type, HVTY_NEWLINE); + break; + } + } + } + if (nh->un_options) + { + struct rfapi_un_option *uo; + char offset[] = " "; + + for (uo = nh->un_options; uo; uo = uo->next) + { + switch (uo->type) + { + case RFAPI_UN_OPTION_TYPE_TUNNELTYPE: + rfapi_print_tunneltype_option (stream, 8, &uo->v.tunnel); + break; + default: + fp (out, "%sUN Option type %d%s", + offset, uo->type, vty_newline); + break; + } + + } + } + } +} + +/*********************************************************************** + * STATIC ROUTES + ***********************************************************************/ + +/* + * Add another nexthop to the NHL + */ +static void +rfapiAddDeleteLocalRfpPrefix ( + struct rfapi_ip_addr *un_addr, + struct rfapi_ip_addr *vn_addr, + struct rfapi_ip_prefix *rprefix, + int is_add, + uint32_t lifetime, /* add only */ + struct rfapi_vn_option *vn_options, + struct rfapi_next_hop_entry **head, + struct rfapi_next_hop_entry **tail) +{ + struct rfapi_next_hop_entry *new; + + /* + * construct NHL + */ + + new = XCALLOC (MTYPE_RFAPI_NEXTHOP, sizeof (struct rfapi_next_hop_entry)); + new->prefix = *rprefix; + new->un_address = *un_addr; + new->vn_address = *vn_addr; + + new->vn_options = vn_options; + if (is_add) + { + new->lifetime = lifetime; + } + else + { + new->lifetime = RFAPI_REMOVE_RESPONSE_LIFETIME; + } + + if (*tail) + (*tail)->next = new; + *tail = new; + if (!*head) + { + *head = new; + } +} + + +static int +register_add ( + struct vty *vty, + const char *arg_prefix, + const char *arg_vn, + const char *arg_un, + const char *arg_cost, /* optional */ + const char *arg_lifetime, /* optional */ + const char *arg_macaddr, /* optional */ + const char *arg_vni, /* mac present=>mandatory Virtual Network ID */ + int argc, + const char **argv) +{ + struct rfapi_ip_addr vn_address; + struct rfapi_ip_addr un_address; + struct prefix pfx; + struct rfapi_ip_prefix rpfx; + uint32_t cost; + uint32_t lnh_cost; + uint32_t lifetime; + rfapi_handle rfd; + struct rfapi_vn_option optary[10]; /* XXX must be big enough */ + struct rfapi_vn_option *opt = NULL; + int opt_next = 0; + + int rc = CMD_WARNING; + char *endptr; + struct bgp *bgp; + struct rfapi *h; + struct rfapi_cfg *rfapi_cfg; + + const char *arg_lnh = NULL; + const char *arg_lnh_cost = NULL; + + bgp = bgp_get_default (); /* assume 1 instance for now */ + if (!bgp) + { + if (vty) + vty_out (vty, "BGP not configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + h = bgp->rfapi; + rfapi_cfg = bgp->rfapi_cfg; + if (!h || !rfapi_cfg) + { + if (vty) + vty_out (vty, "RFAPI not configured%s", VTY_NEWLINE); + return CMD_WARNING; + } + + for (; argc; --argc, ++argv) + { + if (!strcmp (*argv, "local-next-hop")) + { + if (arg_lnh) + { + vty_out (vty, "local-next-hop specified more than once%s", + VTY_NEWLINE); + return CMD_WARNING; + } + if (argc <= 1) + { + vty_out (vty, "Missing parameter for local-next-hop%s", + VTY_NEWLINE); + return CMD_WARNING; + } + ++argv, --argc; + arg_lnh = *argv; + } + if (!strcmp (*argv, "local-cost")) + { + if (arg_lnh_cost) + { + vty_out (vty, "local-cost specified more than once%s", + VTY_NEWLINE); + return CMD_WARNING; + } + if (argc <= 1) + { + vty_out (vty, "Missing parameter for local-cost%s", + VTY_NEWLINE); + return CMD_WARNING; + } + ++argv, --argc; + arg_lnh_cost = *argv; + } + } + + if ((rc = rfapiCliGetRfapiIpAddr (vty, arg_vn, &vn_address))) + goto fail; + if ((rc = rfapiCliGetRfapiIpAddr (vty, arg_un, &un_address))) + goto fail; + + /* arg_prefix is optional if mac address is given */ + if (arg_macaddr && !arg_prefix) + { + /* + * fake up a 0/32 or 0/128 prefix + */ + switch (vn_address.addr_family) + { + case AF_INET: + arg_prefix = "0.0.0.0/32"; + break; + case AF_INET6: + arg_prefix = "0::0/128"; + break; + default: + vty_out (vty, "Internal error, unknown VN address family%s", + VTY_NEWLINE); + return CMD_WARNING; + } + + } + + if (!str2prefix (arg_prefix, &pfx)) + { + vty_out (vty, "Malformed prefix \"%s\"%s", arg_prefix, + VTY_NEWLINE); + goto fail; + } + if (pfx.family != AF_INET + && pfx.family != AF_INET6) + { + vty_out (vty, "prefix \"%s\" has invalid address family%s", + arg_prefix, VTY_NEWLINE); + goto fail; + } + + + memset (optary, 0, sizeof (optary)); + + if (arg_cost) + { + endptr = NULL; + cost = strtoul (arg_cost, &endptr, 10); + if (*endptr != '\0' || cost > 255) + { + vty_out (vty, "%% Invalid %s value%s", "cost", VTY_NEWLINE); + goto fail; + } + } + else + { + cost = 255; + } + + if (arg_lifetime) + { + if (!strcmp (arg_lifetime, "infinite")) + { + lifetime = RFAPI_INFINITE_LIFETIME; + } + else + { + endptr = NULL; + lifetime = strtoul (arg_lifetime, &endptr, 10); + if (*endptr != '\0') + { + vty_out (vty, "%% Invalid %s value%s", "lifetime", + VTY_NEWLINE); + goto fail; + } + } + } + else + { + lifetime = RFAPI_INFINITE_LIFETIME; /* default infinite */ + } + + if (arg_lnh_cost) + { + if (!arg_lnh) + { + vty_out (vty, + "%% %s may only be specified with local-next-hop%s", + "local-cost", VTY_NEWLINE); + goto fail; + } + endptr = NULL; + lnh_cost = strtoul (arg_lnh_cost, &endptr, 10); + if (*endptr != '\0' || lnh_cost > 255) + { + vty_out (vty, "%% Invalid %s value%s", "local-cost", + VTY_NEWLINE); + goto fail; + } + } + else + { + lnh_cost = 255; + } + + if (arg_lnh) + { + if (!arg_prefix) + { + vty_out (vty, "%% %s may only be specified with prefix%s", + "local-next-hop", VTY_NEWLINE); + goto fail; + } + if ((rc = rfapiCliGetPrefixAddr (vty, arg_lnh, + &optary[opt_next].v. + local_nexthop.addr))) + { + + goto fail; + } + + optary[opt_next].v.local_nexthop.cost = lnh_cost; + optary[opt_next].type = RFAPI_VN_OPTION_TYPE_LOCAL_NEXTHOP; + + if (opt_next) + { + optary[opt_next - 1].next = optary + opt_next; + } + else + { + opt = optary; + } + ++opt_next; + } + + if (arg_vni && !arg_macaddr) + { + vty_out (vty, "%% %s may only be specified with mac address%s", + "virtual-network-identifier", VTY_NEWLINE); + goto fail; + } + + if (arg_macaddr) + { + if (!arg_vni) + { + vty_out (vty, + "Missing \"vni\" parameter (mandatory with mac)%s", + VTY_NEWLINE); + return CMD_WARNING; + } + VTY_GET_INTEGER ("Logical Network ID", + optary[opt_next].v.l2addr.logical_net_id, + arg_vni); + + if ((rc = rfapiStr2EthAddr (arg_macaddr, + &optary[opt_next].v.l2addr.macaddr))) + { + vty_out (vty, "Invalid %s value%s", "mac address", + VTY_NEWLINE); + goto fail; + } + /* TBD label, NVE ID */ + + optary[opt_next].type = RFAPI_VN_OPTION_TYPE_L2ADDR; + + if (opt_next) + { + optary[opt_next - 1].next = optary + opt_next; + } + else + { + opt = optary; + } + ++opt_next; + } + + zlog_debug + ("%s: vn=%s, un=%s, prefix=%s, cost=%s, lifetime=%s, lnh=%s", + __func__, arg_vn, arg_un, arg_prefix, + (arg_cost ? arg_cost : "NULL"), + (arg_lifetime ? arg_lifetime : "NULL"), + (arg_lnh ? arg_lnh : "NULL")); + + rfapiQprefix2Rprefix (&pfx, &rpfx); + + rpfx.cost = cost & 255; + + /* look up rf descriptor, call open if it doesn't exist */ + rc = + rfapi_find_rfd (bgp, &vn_address, &un_address, + (struct rfapi_descriptor **) &rfd); + if (rc) + { + if (ENOENT == rc) + { + struct rfapi_un_option uo; + + /* + * flag descriptor as provisionally opened for static route + * registration so that we can fix up the other parameters + * when the real open comes along + */ + memset (&uo, 0, sizeof (uo)); + uo.type = RFAPI_UN_OPTION_TYPE_PROVISIONAL; + + rc = rfapi_open (rfapi_get_rfp_start_val_by_bgp (bgp), &vn_address, &un_address, &uo, /* flags */ + NULL, NULL, /* no userdata */ + &rfd); + if (rc) + { + vty_out (vty, "Can't open session for this NVE: %s%s", + rfapi_error_str (rc), VTY_NEWLINE); + rc = CMD_WARNING; + goto fail; + } + } + else + { + vty_out (vty, "Can't find session for this NVE: %s%s", + rfapi_error_str (rc), VTY_NEWLINE); + goto fail; + } + } + + rc = + rfapi_register (rfd, &rpfx, lifetime, NULL, opt, RFAPI_REGISTER_ADD); + if (!rc) + { + struct rfapi_next_hop_entry *head = NULL; + struct rfapi_next_hop_entry *tail = NULL; + struct rfapi_vn_option *vn_opt_new; + + zlog_debug ("%s: rfapi_register succeeded, returning 0", __func__); + + if (h->rfp_methods.local_cb) + { + struct rfapi_descriptor *r = (struct rfapi_descriptor *) rfd; + vn_opt_new = rfapi_vn_options_dup (opt); + + rfapiAddDeleteLocalRfpPrefix (&r->un_addr, &r->vn_addr, &rpfx, + 1, lifetime, vn_opt_new, &head, + &tail); + if (head) + { + h->flags |= RFAPI_INCALLBACK; + (*h->rfp_methods.local_cb) (head, r->cookie); + h->flags &= ~RFAPI_INCALLBACK; + } + head = tail = NULL; + } + return 0; + } + + zlog_debug ("%s: rfapi_register failed", __func__); + vty_out (vty, "%s", VTY_NEWLINE); + vty_out (vty, "Registration failed.%s", VTY_NEWLINE); + vty_out (vty, + "Confirm that either the VN or UN address matches a configured NVE group.%s", + VTY_NEWLINE); + return CMD_WARNING; + + fail: + zlog_debug ("%s: fail, rc=%d", __func__, rc); + return rc; +} + +/************************************************************************ + * Add prefix With .LNH_OPTIONS + ************************************************************************/ +DEFUN (add_vnc_prefix_cost_life_lnh, + add_vnc_prefix_cost_life_lnh_cmd, + "add vnc prefix vn un cost (0-255) lifetime (1-4294967295) .LNH_OPTIONS", + "Add registration\n" + "VNC Information\n" + "Add/modify prefix related infomation\n" + "IPv4 prefix\n" + "IPv6 prefix\n" + "VN address of NVE\n" + "VN IPv4 interface address\n" + "VN IPv6 interface address\n" + "UN address of NVE\n" + "UN IPv4 interface address\n" + "UN IPv6 interface address\n" + "Administrative cost [default: 255]\n" + "Administrative cost\n" + "Registration lifetime [default: infinite]\n" + "Lifetime value in seconds\n" + "[local-next-hop (A.B.C.D|X:X::X:X)] [local-cost <0-255>]\n") +{ + /* pfx vn un cost life */ + return register_add (vty, argv[0], argv[1], argv[2], argv[3], argv[4], + /* mac vni */ + NULL, NULL, argc, argv); +} + +DEFUN (add_vnc_prefix_life_cost_lnh, + add_vnc_prefix_life_cost_lnh_cmd, + "add vnc prefix vn un lifetime (1-4294967295) cost (0-255) .LNH_OPTIONS", + "Add registration\n" + "VNC Information\n" + "Add/modify prefix related infomation\n" + "IPv4 prefix\n" + "IPv6 prefix\n" + "VN address of NVE\n" + "VN IPv4 interface address\n" + "VN IPv6 interface address\n" + "UN address of NVE\n" + "UN IPv4 interface address\n" + "UN IPv6 interface address\n" + "Registration lifetime [default: infinite]\n" + "Lifetime value in seconds\n" + "Administrative cost [default: 255]\n" + "Administrative cost\n" + "[local-next-hop (A.B.C.D|X:X::X:X)] [local-cost <0-255>]\n") +{ + /* pfx vn un cost life */ + return register_add (vty, argv[0], argv[1], argv[2], argv[4], argv[3], + /* mac vni */ + NULL, NULL, argc, argv); +} + +DEFUN (add_vnc_prefix_cost_lnh, + add_vnc_prefix_cost_lnh_cmd, + "add vnc prefix vn un cost (0-255) .LNH_OPTIONS", + "Add registration\n" + "VNC Information\n" + "Add/modify prefix related infomation\n" + "IPv4 prefix\n" + "IPv6 prefix\n" + "VN address of NVE\n" + "VN IPv4 interface address\n" + "VN IPv6 interface address\n" + "UN address of NVE\n" + "UN IPv4 interface address\n" + "UN IPv6 interface address\n" + "Administrative cost [default: 255]\n" + "Administrative cost\n" + "[local-next-hop (A.B.C.D|X:X::X:X)] [local-cost <0-255>]\n") +{ + /* pfx vn un cost life */ + return register_add (vty, argv[0], argv[1], argv[2], argv[3], NULL, + /* mac vni */ + NULL, NULL, argc, argv); +} + +DEFUN (add_vnc_prefix_life_lnh, + add_vnc_prefix_life_lnh_cmd, + "add vnc prefix vn un lifetime (1-4294967295) .LNH_OPTIONS", + "Add registration\n" + "VNC Information\n" + "Add/modify prefix related infomation\n" + "IPv4 prefix\n" + "IPv6 prefix\n" + "VN address of NVE\n" + "VN IPv4 interface address\n" + "VN IPv6 interface address\n" + "UN address of NVE\n" + "UN IPv4 interface address\n" + "UN IPv6 interface address\n" + "Registration lifetime [default: infinite]\n" + "Lifetime value in seconds\n" + "[local-next-hop (A.B.C.D|X:X::X:X)] [local-cost <0-255>]\n") +{ + /* pfx vn un cost life */ + return register_add (vty, argv[0], argv[1], argv[2], NULL, argv[3], + /* mac vni */ + NULL, NULL, argc, argv); +} + +DEFUN (add_vnc_prefix_lnh, + add_vnc_prefix_lnh_cmd, + "add vnc prefix vn un .LNH_OPTIONS", + "Add registration\n" + "VNC Information\n" + "Add/modify prefix related infomation\n" + "IPv4 prefix\n" + "IPv6 prefix\n" + "VN address of NVE\n" + "VN IPv4 interface address\n" + "VN IPv6 interface address\n" + "UN address of NVE\n" + "UN IPv4 interface address\n" + "UN IPv6 interface address\n" + "[local-next-hop (A.B.C.D|X:X::X:X)] [local-cost <0-255>]\n") +{ + /* pfx vn un cost life */ + return register_add (vty, argv[0], argv[1], argv[2], NULL, NULL, + /* mac vni */ + NULL, NULL, argc, argv); +} + +/************************************************************************ + * Add prefix Without .LNH_OPTIONS + ************************************************************************/ +DEFUN (add_vnc_prefix_cost_life, + add_vnc_prefix_cost_life_cmd, + "add vnc prefix vn un cost (0-255) lifetime (1-4294967295)", + "Add registration\n" + "VNC Information\n" + "Add/modify prefix related infomation\n" + "IPv4 prefix\n" + "IPv6 prefix\n" + "VN address of NVE\n" + "VN IPv4 interface address\n" + "VN IPv6 interface address\n" + "UN address of NVE\n" + "UN IPv4 interface address\n" + "UN IPv6 interface address\n" + "Administrative cost [default: 255]\n" + "Administrative cost\n" + "Registration lifetime [default: infinite]\n" + "Lifetime value in seconds\n" + "[local-next-hop (A.B.C.D|X:X::X:X)] [local-cost <0-255>]\n") +{ + /* pfx vn un cost life */ + return register_add (vty, argv[0], argv[1], argv[2], argv[3], argv[4], + /* mac vni */ + NULL, NULL, 0, NULL); +} + +DEFUN (add_vnc_prefix_life_cost, + add_vnc_prefix_life_cost_cmd, + "add vnc prefix vn un lifetime (1-4294967295) cost (0-255)", + "Add registration\n" + "VNC Information\n" + "Add/modify prefix related infomation\n" + "IPv4 prefix\n" + "IPv6 prefix\n" + "VN address of NVE\n" + "VN IPv4 interface address\n" + "VN IPv6 interface address\n" + "UN address of NVE\n" + "UN IPv4 interface address\n" + "UN IPv6 interface address\n" + "Registration lifetime [default: infinite]\n" + "Lifetime value in seconds\n" + "Administrative cost [default: 255]\n" + "Administrative cost\n" + "[local-next-hop (A.B.C.D|X:X::X:X)] [local-cost <0-255>]\n") +{ + /* pfx vn un cost life */ + return register_add (vty, argv[0], argv[1], argv[2], argv[4], argv[3], + /* mac vni */ + NULL, NULL, 0, NULL); +} + +DEFUN (add_vnc_prefix_cost, + add_vnc_prefix_cost_cmd, + "add vnc prefix vn un cost (0-255)", + "Add registration\n" + "VNC Information\n" + "Add/modify prefix related infomation\n" + "IPv4 prefix\n" + "IPv6 prefix\n" + "VN address of NVE\n" + "VN IPv4 interface address\n" + "VN IPv6 interface address\n" + "UN address of NVE\n" + "UN IPv4 interface address\n" + "UN IPv6 interface address\n" + "Administrative cost [default: 255]\n" + "Administrative cost\n" + "[local-next-hop (A.B.C.D|X:X::X:X)] [local-cost <0-255>]\n") +{ + /* pfx vn un cost life */ + return register_add (vty, argv[0], argv[1], argv[2], argv[3], NULL, + /* mac vni */ + NULL, NULL, 0, NULL); +} + +DEFUN (add_vnc_prefix_life, + add_vnc_prefix_life_cmd, + "add vnc prefix vn un lifetime (1-4294967295)", + "Add registration\n" + "VNC Information\n" + "Add/modify prefix related infomation\n" + "IPv4 prefix\n" + "IPv6 prefix\n" + "VN address of NVE\n" + "VN IPv4 interface address\n" + "VN IPv6 interface address\n" + "UN address of NVE\n" + "UN IPv4 interface address\n" + "UN IPv6 interface address\n" + "Registration lifetime [default: infinite]\n" + "Lifetime value in seconds\n" + "[local-next-hop (A.B.C.D|X:X::X:X)] [local-cost <0-255>]\n") +{ + /* pfx vn un cost life */ + return register_add (vty, argv[0], argv[1], argv[2], NULL, argv[3], + /* mac vni */ + NULL, NULL, 0, NULL); +} + +DEFUN (add_vnc_prefix, + add_vnc_prefix_cmd, + "add vnc prefix vn un ", + "Add registration\n" + "VNC Information\n" + "Add/modify prefix related infomation\n" + "IPv4 prefix\n" + "IPv6 prefix\n" + "VN address of NVE\n" + "VN IPv4 interface address\n" + "VN IPv6 interface address\n" + "UN address of NVE\n" + "UN IPv4 interface address\n" + "UN IPv6 interface address\n" + "[local-next-hop (A.B.C.D|X:X::X:X)] [local-cost <0-255>]\n") +{ + /* pfx vn un cost life */ + return register_add (vty, argv[0], argv[1], argv[2], NULL, NULL, + /* mac vni */ + NULL, NULL, 0, NULL); +} + +/************************************************************************ + * Mac address registrations + ************************************************************************/ +DEFUN (add_vnc_mac_vni_prefix_cost_life, + add_vnc_mac_vni_prefix_cost_life_cmd, + "add vnc mac YY:YY:YY:YY:YY:YY virtual-network-identifier (1-4294967295) vn un prefix cost (0-255) lifetime (1-4294967295)", + "Add registration\n" + "VNC Information\n" + "Add/modify mac address infomation\n" + "MAC address\n" + "Virtual Network Identifier follows\n" + "Virtual Network Identifier\n" + "VN address of NVE\n" + "VN IPv4 interface address\n" + "VN IPv6 interface address\n" + "UN address of NVE\n" + "UN IPv4 interface address\n" + "UN IPv6 interface address\n" + "Add/modify prefix related infomation\n" + "IPv4 prefix\n" + "IPv6 prefix\n" + "Administrative cost [default: 255]\n" + "Administrative cost\n" + "Registration lifetime [default: infinite]\n" + "Lifetime value in seconds\n") +{ + /* pfx vn un cost life */ + return register_add (vty, argv[4], argv[2], argv[3], argv[5], argv[6], + /* mac vni */ + argv[0], argv[1], 0, NULL); +} + + +DEFUN (add_vnc_mac_vni_prefix_life, + add_vnc_mac_vni_prefix_life_cmd, + "add vnc mac YY:YY:YY:YY:YY:YY virtual-network-identifier (1-4294967295) vn un prefix lifetime (1-4294967295)", + "Add registration\n" + "VNC Information\n" + "Add/modify mac address infomation\n" + "MAC address\n" + "Virtual Network Identifier follows\n" + "Virtual Network Identifier\n" + "VN address of NVE\n" + "VN IPv4 interface address\n" + "VN IPv6 interface address\n" + "UN address of NVE\n" + "UN IPv4 interface address\n" + "UN IPv6 interface address\n" + "Add/modify prefix related infomation\n" + "IPv4 prefix\n" + "IPv6 prefix\n" + "Registration lifetime [default: infinite]\n" + "Lifetime value in seconds\n") +{ + /* pfx vn un cost life */ + return register_add (vty, argv[4], argv[2], argv[3], NULL, argv[5], + /* mac vni */ + argv[0], argv[1], 0, NULL); +} + +DEFUN (add_vnc_mac_vni_prefix_cost, + add_vnc_mac_vni_prefix_cost_cmd, + "add vnc mac YY:YY:YY:YY:YY:YY virtual-network-identifier (1-4294967295) vn un prefix cost (0-255)", + "Add registration\n" + "VNC Information\n" + "Add/modify mac address infomation\n" + "MAC address\n" + "Virtual Network Identifier follows\n" + "Virtual Network Identifier\n" + "VN address of NVE\n" + "VN IPv4 interface address\n" + "VN IPv6 interface address\n" + "UN address of NVE\n" + "UN IPv4 interface address\n" + "UN IPv6 interface address\n" + "Add/modify prefix related infomation\n" + "IPv4 prefix\n" + "IPv6 prefix\n" + "Administrative cost [default: 255]\n" "Administrative cost\n") +{ + /* pfx vn un cost life */ + return register_add (vty, argv[4], argv[2], argv[3], argv[5], NULL, + /* mac vni */ + argv[0], argv[1], 0, NULL); +} + +DEFUN (add_vnc_mac_vni_prefix, + add_vnc_mac_vni_prefix_cmd, + "add vnc mac YY:YY:YY:YY:YY:YY virtual-network-identifier (1-4294967295) vn un prefix ", + "Add registration\n" + "VNC Information\n" + "Add/modify mac address infomation\n" + "MAC address\n" + "Virtual Network Identifier follows\n" + "Virtual Network Identifier\n" + "VN address of NVE\n" + "VN IPv4 interface address\n" + "VN IPv6 interface address\n" + "UN address of NVE\n" + "UN IPv4 interface address\n" + "UN IPv6 interface address\n" + "Add/modify prefix related infomation\n" + "IPv4 prefix\n" "IPv6 prefix\n") +{ + /* pfx vn un cost life */ + return register_add (vty, argv[4], argv[2], argv[3], NULL, NULL, + /* mac vni */ + argv[0], argv[1], 0, NULL); +} + +DEFUN (add_vnc_mac_vni_cost_life, + add_vnc_mac_vni_cost_life_cmd, + "add vnc mac YY:YY:YY:YY:YY:YY virtual-network-identifier (1-4294967295) vn un cost (0-255) lifetime (1-4294967295)", + "Add registration\n" + "VNC Information\n" + "Add/modify mac address infomation\n" + "MAC address\n" + "Virtual Network Identifier follows\n" + "Virtual Network Identifier\n" + "VN address of NVE\n" + "VN IPv4 interface address\n" + "VN IPv6 interface address\n" + "UN address of NVE\n" + "UN IPv4 interface address\n" + "UN IPv6 interface address\n" + "Administrative cost [default: 255]\n" + "Administrative cost\n" + "Registration lifetime [default: infinite]\n" + "Lifetime value in seconds\n") +{ + /* pfx vn un cost life */ + return register_add (vty, NULL, argv[2], argv[3], argv[4], argv[5], + /* mac vni */ + argv[0], argv[1], 0, NULL); +} + + +DEFUN (add_vnc_mac_vni_cost, + add_vnc_mac_vni_cost_cmd, + "add vnc mac YY:YY:YY:YY:YY:YY virtual-network-identifier (1-4294967295) vn un cost (0-255)", + "Add registration\n" + "VNC Information\n" + "Add/modify mac address infomation\n" + "MAC address\n" + "Virtual Network Identifier follows\n" + "Virtual Network Identifier\n" + "VN address of NVE\n" + "VN IPv4 interface address\n" + "VN IPv6 interface address\n" + "UN address of NVE\n" + "UN IPv4 interface address\n" + "UN IPv6 interface address\n" + "Administrative cost [default: 255]\n" "Administrative cost\n") +{ + /* pfx vn un cost life */ + return register_add (vty, NULL, argv[2], argv[3], argv[4], NULL, + /* mac vni */ + argv[0], argv[1], 0, NULL); +} + + +DEFUN (add_vnc_mac_vni_life, + add_vnc_mac_vni_life_cmd, + "add vnc mac YY:YY:YY:YY:YY:YY virtual-network-identifier (1-4294967295) vn un lifetime (1-4294967295)", + "Add registration\n" + "VNC Information\n" + "Add/modify mac address infomation\n" + "MAC address\n" + "Virtual Network Identifier follows\n" + "Virtual Network Identifier\n" + "VN address of NVE\n" + "VN IPv4 interface address\n" + "VN IPv6 interface address\n" + "UN address of NVE\n" + "UN IPv4 interface address\n" + "UN IPv6 interface address\n" + "Registration lifetime [default: infinite]\n" + "Lifetime value in seconds\n") +{ + /* pfx vn un cost life */ + return register_add (vty, NULL, argv[2], argv[3], NULL, argv[4], + /* mac vni */ + argv[0], argv[1], 0, NULL); +} + + +DEFUN (add_vnc_mac_vni, + add_vnc_mac_vni_cmd, + "add vnc mac YY:YY:YY:YY:YY:YY virtual-network-identifier (1-4294967295) vn un ", + "Add registration\n" + "VNC Information\n" + "Add/modify mac address infomation\n" + "MAC address\n" + "Virtual Network Identifier follows\n" + "Virtual Network Identifier\n" + "VN address of NVE\n" + "VN IPv4 interface address\n" + "VN IPv6 interface address\n" + "UN address of NVE\n" + "UN IPv4 interface address\n" "UN IPv6 interface address\n") +{ + /* pfx vn un cost life */ + return register_add (vty, NULL, argv[2], argv[3], NULL, NULL, + /* mac vni */ + argv[0], argv[1], 0, NULL); +} + +/************************************************************************ + * Delete prefix + ************************************************************************/ + +struct rfapi_local_reg_delete_arg +{ + /* + * match parameters + */ + struct rfapi_ip_addr un_address; /* AF==0: wildcard */ + struct rfapi_ip_addr vn_address; /* AF==0: wildcard */ + struct prefix prefix; /* AF==0: wildcard */ + + struct rfapi_l2address_option_match l2o; + + /* + * result parameters + */ + struct vty *vty; + uint32_t reg_count; + uint32_t pfx_count; + uint32_t query_count; + + uint32_t failed_pfx_count; + + uint32_t nve_count; + struct skiplist *nves; + + uint32_t remote_active_nve_count; + uint32_t remote_active_pfx_count; + uint32_t remote_holddown_nve_count; + uint32_t remote_holddown_pfx_count; +}; + +struct nve_addr +{ + struct rfapi_ip_addr vn; + struct rfapi_ip_addr un; + struct rfapi_descriptor *rfd; + struct rfapi_local_reg_delete_arg *cda; +}; + +static void +nve_addr_free (void *hap) +{ + ((struct nve_addr *) hap)->cda->nve_count += 1; + XFREE (MTYPE_RFAPI_NVE_ADDR, hap); +} + +static int +nve_addr_cmp (void *k1, void *k2) +{ + struct nve_addr *a = (struct nve_addr *) k1; + struct nve_addr *b = (struct nve_addr *) k2; + int ret = 0; + + if (!a || !b) + { + return (a - b); + } + if (a->un.addr_family != b->un.addr_family) + { + return (a->un.addr_family - b->un.addr_family); + } + if (a->vn.addr_family != b->vn.addr_family) + { + return (a->vn.addr_family - b->vn.addr_family); + } + if (a->un.addr_family == AF_INET) + { + ret = IPV4_ADDR_CMP (&a->un.addr.v4, &b->un.addr.v4); + if (ret != 0) + { + return ret; + } + } + else if (a->un.addr_family == AF_INET6) + { + ret = IPV6_ADDR_CMP (&a->un.addr.v6, &b->un.addr.v6); + if (ret != 0) + { + return ret; + } + } + else + { + assert (0); + } + if (a->vn.addr_family == AF_INET) + { + ret = IPV4_ADDR_CMP (&a->vn.addr.v4, &b->vn.addr.v4); + if (ret != 0) + return ret; + } + else if (a->vn.addr_family == AF_INET6) + { + ret = IPV6_ADDR_CMP (&a->vn.addr.v6, &b->vn.addr.v6); + if (ret == 0) + { + return ret; + } + } + else + { + assert (0); + } + return 0; +} + +static int +parse_deleter_args ( + struct vty *vty, + const char *arg_prefix, + const char *arg_vn, + const char *arg_un, + const char *arg_l2addr, + const char *arg_vni, + struct rfapi_local_reg_delete_arg *rcdarg) +{ + int rc = CMD_WARNING; + + memset (rcdarg, 0, sizeof (struct rfapi_local_reg_delete_arg)); + + if (arg_vn && strcmp (arg_vn, "*")) + { + if ((rc = rfapiCliGetRfapiIpAddr (vty, arg_vn, &rcdarg->vn_address))) + return rc; + } + if (arg_un && strcmp (arg_un, "*")) + { + if ((rc = rfapiCliGetRfapiIpAddr (vty, arg_un, &rcdarg->un_address))) + return rc; + } + if (arg_prefix && strcmp (arg_prefix, "*")) + { + + if (!str2prefix (arg_prefix, &rcdarg->prefix)) + { + vty_out (vty, "Malformed prefix \"%s\"%s", arg_prefix, VTY_NEWLINE); + return rc; + } + } + + if (arg_l2addr) + { + if (!arg_vni) + { + vty_out (vty, "Missing VNI%s", VTY_NEWLINE); + return rc; + } + if (strcmp (arg_l2addr, "*")) + { + if ((rc = rfapiStr2EthAddr (arg_l2addr, &rcdarg->l2o.o.macaddr))) + { + vty_out (vty, "Malformed L2 Address \"%s\"%s", + arg_l2addr, VTY_NEWLINE); + return rc; + } + rcdarg->l2o.flags |= RFAPI_L2O_MACADDR; + } + if (strcmp (arg_vni, "*")) + { + VTY_GET_INTEGER ("Logical Network ID", + rcdarg->l2o.o.logical_net_id, arg_vni); + rcdarg->l2o.flags |= RFAPI_L2O_LNI; + } + } + return 0; +} + +static void +record_nve_in_cda_list ( + struct rfapi_local_reg_delete_arg *cda, + struct rfapi_ip_addr *un_address, + struct rfapi_ip_addr *vn_address, + struct rfapi_descriptor *rfd) +{ + struct nve_addr ha; + struct nve_addr *hap; + + memset (&ha, 0, sizeof (ha)); + ha.un = *un_address; + ha.vn = *vn_address; + ha.rfd = rfd; + + if (!cda->nves) + cda->nves = skiplist_new (0, nve_addr_cmp, nve_addr_free); + + if (skiplist_search (cda->nves, &ha, (void *) &hap)) + { + hap = XCALLOC (MTYPE_RFAPI_NVE_ADDR, sizeof (struct nve_addr)); + assert (hap); + ha.cda = cda; + * hap = ha; + skiplist_insert (cda->nves, hap, hap); + } +} + +static void +clear_vnc_responses (struct rfapi_local_reg_delete_arg *cda) +{ + struct rfapi *h; + struct rfapi_descriptor *rfd; + int query_count = 0; + struct listnode *node; + struct bgp *bgp_default = bgp_get_default (); + + if (cda->vn_address.addr_family && cda->un_address.addr_family) + { + /* + * Single nve case + */ + if (rfapi_find_rfd + (bgp_default, &cda->vn_address, &cda->un_address, &rfd)) + return; + + rfapiRibClear (rfd); + rfapi_query_done_all (rfd, &query_count); + cda->query_count += query_count; + + /* + * Track unique nves seen + */ + record_nve_in_cda_list (cda, &rfd->un_addr, &rfd->vn_addr, rfd); + return; + } + + /* + * wildcard case + */ + + if (!bgp_default) + return; /* ENXIO */ + + h = bgp_default->rfapi; + + if (!h) + return; /* ENXIO */ + + for (ALL_LIST_ELEMENTS_RO (&h->descriptors, node, rfd)) + { + /* + * match un, vn addresses of NVEs + */ + if (cda->un_address.addr_family && + rfapi_ip_addr_cmp (&cda->un_address, &rfd->un_addr)) + { + continue; + } + if (cda->vn_address.addr_family && + rfapi_ip_addr_cmp (&cda->vn_address, &rfd->vn_addr)) + { + continue; + } + + rfapiRibClear (rfd); + + rfapi_query_done_all (rfd, &query_count); + cda->query_count += query_count; + + /* + * Track unique nves seen + */ + record_nve_in_cda_list (cda, &rfd->un_addr, &rfd->vn_addr, rfd); + } +} + +/* + * TBD need to count deleted prefixes and nves? + * + * ENXIO BGP or VNC not configured + */ +static int +rfapiDeleteLocalPrefixes (struct rfapi_local_reg_delete_arg *cda) +{ + struct rfapi_ip_addr *pUn; /* NULL = wildcard */ + struct rfapi_ip_addr *pVn; /* NULL = wildcard */ + struct prefix *pPrefix; /* NULL = wildcard */ + + struct rfapi *h; + struct listnode *node; + struct rfapi_descriptor *rfd; + struct rfapi_ip_prefix rprefix; + struct bgp *bgp_default = bgp_get_default (); + struct rfapi_next_hop_entry *head = NULL; + struct rfapi_next_hop_entry *tail = NULL; + struct rfapi_cfg *rfapi_cfg; + +#if DEBUG_L2_EXTRA + zlog_debug ("%s: entry", __func__); +#endif + + if (!bgp_default) + return ENXIO; + + pUn = (cda->un_address.addr_family ? &cda->un_address : NULL); + pVn = (cda->vn_address.addr_family ? &cda->vn_address : NULL); + pPrefix = (cda->prefix.family ? &cda->prefix : NULL); + + h = bgp_default->rfapi; + rfapi_cfg = bgp_default->rfapi_cfg; + + if (!h || !rfapi_cfg) + return ENXIO; + + if (pPrefix) + { + rfapiQprefix2Rprefix (pPrefix, &rprefix); + } + +#if DEBUG_L2_EXTRA + zlog_debug ("%s: starting descriptor loop", __func__); +#endif + + for (ALL_LIST_ELEMENTS_RO (&h->descriptors, node, rfd)) + { + struct rfapi_adb *adb; + int rc; + int deleted_from_this_nve; + struct nve_addr ha; + struct nve_addr *hap; + +#if DEBUG_L2_EXTRA + zlog_debug ("%s: rfd=%p", __func__, rfd); +#endif + + /* + * match un, vn addresses of NVEs + */ + if (pUn && (rfapi_ip_addr_cmp (pUn, &rfd->un_addr))) + continue; + if (pVn && (rfapi_ip_addr_cmp (pVn, &rfd->vn_addr))) + continue; + +#if DEBUG_L2_EXTRA + zlog_debug ("%s: un, vn match", __func__); +#endif + + /* + * match prefix + */ + + deleted_from_this_nve = 0; + + { + struct skiplist *sl; + struct rfapi_ip_prefix rp; + void *cursor; + struct list *adb_delete_list; + + /* + * The advertisements are stored in a skiplist. Withdrawing + * the registration deletes the advertisement from the + * skiplist, which we can't do while iterating over that + * same skiplist using the current skiplist API. + * + * Strategy: iterate over the skiplist and build another + * list containing only the matching ADBs. Then delete + * _everything_ in that second list (which can be done + * using either skiplists or quagga linklists). + */ + adb_delete_list = list_new (); + + /* + * Advertised IP prefixes (not 0/32 or 0/128) + */ + sl = rfd->advertised.ipN_by_prefix; + + for (cursor = NULL, + rc = skiplist_next (sl, NULL, (void **) &adb, &cursor); + !rc; rc = skiplist_next (sl, NULL, (void **) &adb, &cursor)) + { + + if (pPrefix) + { + if (!prefix_same (pPrefix, &adb->prefix_ip)) + { +#if DEBUG_L2_EXTRA + zlog_debug ("%s: adb=%p, prefix doesn't match, skipping", + __func__, adb); +#endif + continue; + } + } + if (CHECK_FLAG (cda->l2o.flags, RFAPI_L2O_MACADDR)) + { + if (memcmp + (cda->l2o.o.macaddr.octet, + adb->prefix_eth.u.prefix_eth.octet, ETHER_ADDR_LEN)) + { +#if DEBUG_L2_EXTRA + zlog_debug ("%s: adb=%p, macaddr doesn't match, skipping", + __func__, adb); +#endif + continue; + } + } + + if (CHECK_FLAG (cda->l2o.flags, RFAPI_L2O_LNI)) + { + if (cda->l2o.o.logical_net_id != adb->l2o.logical_net_id) + { +#if DEBUG_L2_EXTRA + zlog_debug ("%s: adb=%p, LNI doesn't match, skipping", + __func__, adb); +#endif + continue; + } + } + +#if DEBUG_L2_EXTRA + zlog_debug ("%s: ipN adding adb %p to delete list", __func__, + adb); +#endif + + listnode_add (adb_delete_list, adb); + } + + struct listnode *node; + + for (ALL_LIST_ELEMENTS_RO (adb_delete_list, node, adb)) + { + + struct rfapi_vn_option vn1; + struct rfapi_vn_option vn2; + struct rfapi_vn_option *pVn; + int this_advertisement_prefix_count; + + this_advertisement_prefix_count = 1; + + rfapiQprefix2Rprefix (&adb->prefix_ip, &rp); + + /* if mac addr present in advert, make l2o vn option */ + if (adb->prefix_eth.family == AF_ETHERNET) + { + + memset (&vn1, 0, sizeof (vn1)); + memset (&vn2, 0, sizeof (vn2)); + + vn1.type = RFAPI_VN_OPTION_TYPE_L2ADDR; + vn1.v.l2addr.macaddr = adb->prefix_eth.u.prefix_eth; + + /* + * use saved RD value instead of trying to invert + * complex L2-style RD computation in rfapi_register() + */ + vn2.type = RFAPI_VN_OPTION_TYPE_INTERNAL_RD; + vn2.v.internal_rd = adb->prd; + + vn1.next = &vn2; + + pVn = &vn1; + ++this_advertisement_prefix_count; + } + else + { + pVn = NULL; + } + +#if DEBUG_L2_EXTRA + zlog_debug ("%s: ipN killing reg from adb %p ", __func__, adb); +#endif + + rc = rfapi_register (rfd, &rp, 0, NULL, pVn, RFAPI_REGISTER_KILL); + if (!rc) + { + cda->pfx_count += this_advertisement_prefix_count; + cda->reg_count += 1; + deleted_from_this_nve = 1; + } + if (h->rfp_methods.local_cb) + { + rfapiAddDeleteLocalRfpPrefix (&rfd->un_addr, &rfd->vn_addr, + &rp, 0, 0, NULL, &head, &tail); + } + } + list_delete_all_node (adb_delete_list); + + if (!(pPrefix && !RFAPI_0_PREFIX (pPrefix))) + { + void *cursor; + + /* + * Caller didn't specify a prefix, or specified (0/32 or 0/128) + */ + + /* + * Advertised 0/32 and 0/128 (indexed by ethernet address) + */ + sl = rfd->advertised.ip0_by_ether; + + for (cursor = NULL, + rc = skiplist_next (sl, NULL, (void **) &adb, &cursor); + !rc; rc = skiplist_next (sl, NULL, (void **) &adb, &cursor)) + { + + if (CHECK_FLAG (cda->l2o.flags, RFAPI_L2O_MACADDR)) + { + if (memcmp (cda->l2o.o.macaddr.octet, + adb->prefix_eth.u.prefix_eth.octet, + ETHER_ADDR_LEN)) + { + + continue; + } + } + if (CHECK_FLAG (cda->l2o.flags, RFAPI_L2O_LNI)) + { + if (cda->l2o.o.logical_net_id != adb->l2o.logical_net_id) + { + continue; + } + } +#if DEBUG_L2_EXTRA + zlog_debug ("%s: ip0 adding adb %p to delete list", + __func__, adb); +#endif + listnode_add (adb_delete_list, adb); + } + + + for (ALL_LIST_ELEMENTS_RO (adb_delete_list, node, adb)) + { + + struct rfapi_vn_option vn; + + rfapiQprefix2Rprefix (&adb->prefix_ip, &rp); + + memset (&vn, 0, sizeof (vn)); + vn.type = RFAPI_VN_OPTION_TYPE_L2ADDR; + vn.v.l2addr = adb->l2o; + +#if DEBUG_L2_EXTRA + zlog_debug ("%s: ip0 killing reg from adb %p ", + __func__, adb); +#endif + + rc = rfapi_register (rfd, &rp, 0, NULL, &vn, + RFAPI_REGISTER_KILL); + if (!rc) + { + cda->pfx_count += 1; + cda->reg_count += 1; + deleted_from_this_nve = 1; + } + if (h->rfp_methods.local_cb) + { + struct rfapi_vn_option *vn_opt_new; + + vn_opt_new = rfapi_vn_options_dup (&vn); + rfapiAddDeleteLocalRfpPrefix (&rfd->un_addr, + &rfd->vn_addr, &rp, 0, 0, + vn_opt_new, &head, &tail); + } + } + list_delete_all_node (adb_delete_list); + } + list_delete (adb_delete_list); + } + + + if (head) + { /* should not be set if (NULL == rfapi_cfg->local_cb) */ + h->flags |= RFAPI_INCALLBACK; + (*h->rfp_methods.local_cb) (head, rfd->cookie); + h->flags &= ~RFAPI_INCALLBACK; + head = tail = NULL; + } + + if (deleted_from_this_nve) + { + /* + * track unique NVEs seen + */ + memset (&ha, 0, sizeof (ha)); + ha.un = rfd->un_addr; + ha.vn = rfd->vn_addr; + + if (!cda->nves) + cda->nves = skiplist_new (0, nve_addr_cmp, nve_addr_free); + if (skiplist_search (cda->nves, &ha, (void **) &hap)) + { + hap = XCALLOC (MTYPE_RFAPI_NVE_ADDR, sizeof (struct nve_addr)); + assert (hap); + ha.cda = cda; + *hap = ha; + skiplist_insert (cda->nves, hap, hap); + } + } + } + + return 0; +} + +/* + * clear_vnc_prefix + * + * Deletes local and remote prefixes that match + */ +static void +clear_vnc_prefix (struct rfapi_local_reg_delete_arg *cda) +{ + struct prefix pfx_un; + struct prefix pfx_vn; + + struct prefix *pUN = NULL; + struct prefix *pVN = NULL; + struct prefix *pPrefix = NULL; + + /* + * Delete matching remote prefixes in holddown + */ + if (cda->vn_address.addr_family) + { + if (!rfapiRaddr2Qprefix (&cda->vn_address, &pfx_vn)) + pVN = &pfx_vn; + } + if (cda->un_address.addr_family) + { + if (!rfapiRaddr2Qprefix (&cda->un_address, &pfx_un)) + pUN = &pfx_un; + } + if (cda->prefix.family) + { + pPrefix = &cda->prefix; + } + rfapiDeleteRemotePrefixes (pUN, pVN, pPrefix, + 0, 1, &cda->remote_active_pfx_count, + &cda->remote_active_nve_count, + &cda->remote_holddown_pfx_count, + &cda->remote_holddown_nve_count); + + /* + * Now do local prefixes + */ + rfapiDeleteLocalPrefixes (cda); +} + +static void +print_cleared_stats (struct rfapi_local_reg_delete_arg *cda) +{ + struct vty *vty = cda->vty; /* for benefit of VTY_NEWLINE */ + + /* Our special element-deleting function counts nves */ + if (cda->nves) + { + skiplist_free (cda->nves); + cda->nves = NULL; + } + if (cda->failed_pfx_count) + vty_out (vty, "Failed to delete %d prefixes%s", + cda->failed_pfx_count, VTY_NEWLINE); + + /* left as "prefixes" even in single case for ease of machine parsing */ + vty_out (vty, + "[Local] Cleared %u registrations, %u prefixes, %u responses from %d NVEs%s", + cda->reg_count, cda->pfx_count, cda->query_count, cda->nve_count, + VTY_NEWLINE); + +/* + * We don't currently allow deletion of active remote prefixes from + * the command line + */ + + vty_out (vty, "[Holddown] Cleared %u prefixes from %u NVEs%s", + cda->remote_holddown_pfx_count, cda->remote_holddown_nve_count, + VTY_NEWLINE); +} + +/* + * Caller has already deleted registrations and queries for this/these + * NVEs. Now we just have to close their descriptors. + */ +static void +clear_vnc_nve_closer (struct rfapi_local_reg_delete_arg *cda) +{ + struct skiplist *sl = cda->nves; /* contains affected NVEs */ + struct nve_addr *pKey; + struct nve_addr *pValue; + void *cursor = NULL; + int rc; + + if (!sl) + return; + + for (rc = skiplist_next (sl, (void **) &pKey, (void **) &pValue, &cursor); + !rc; + rc = skiplist_next (sl, (void **) &pKey, (void **) &pValue, &cursor)) + { + + if (pValue->rfd) + { + ((struct rfapi_descriptor *) pValue->rfd)->flags |= + RFAPI_HD_FLAG_CLOSING_ADMINISTRATIVELY; + rfapi_close (pValue->rfd); + } + } +} + +DEFUN (clear_vnc_nve_all, + clear_vnc_nve_all_cmd, + "clear vnc nve *", + "clear\n" + "VNC Information\n" "Clear per NVE information\n" "For all NVEs\n") +{ + + struct rfapi_local_reg_delete_arg cda; + int rc; + + if ((rc = parse_deleter_args (vty, NULL, NULL, NULL, NULL, NULL, &cda))) + return rc; + + cda.vty = vty; + + clear_vnc_responses (&cda); + clear_vnc_prefix (&cda); + clear_vnc_nve_closer (&cda); + + print_cleared_stats (&cda); + + return 0; +} + +DEFUN (clear_vnc_nve_vn_un, + clear_vnc_nve_vn_un_cmd, + "clear vnc nve vn <*|A.B.C.D|X:X::X:X> un <*|A.B.C.D|X:X::X:X>", + "clear\n" + "VNC Information\n" + "Clear prefix registration infomation\n" + "VN address of NVE\n" + "VN IPv4 interface address\n" + "VN IPv6 interface address\n" + "UN address of NVE\n" + "UN IPv4 interface address\n" "UN IPv6 interface address\n") +{ + struct rfapi_local_reg_delete_arg cda; + int rc; + + if ((rc = + parse_deleter_args (vty, NULL, argv[0], argv[1], NULL, NULL, &cda))) + return rc; + + cda.vty = vty; + + clear_vnc_responses (&cda); + clear_vnc_prefix (&cda); + clear_vnc_nve_closer (&cda); + + print_cleared_stats (&cda); + + return 0; +} + +DEFUN (clear_vnc_nve_un_vn, + clear_vnc_nve_un_vn_cmd, + "clear vnc nve un <*|A.B.C.D|X:X::X:X> vn <*|A.B.C.D|X:X::X:X>", + "clear\n" + "VNC Information\n" + "Clear prefix registration infomation\n" + "UN address of NVE\n" + "UN IPv4 interface address\n" + "UN IPv6 interface address\n" + "VN address of NVE\n" + "VN IPv4 interface address\n" "VN IPv6 interface address\n") +{ + struct rfapi_local_reg_delete_arg cda; + int rc; + + if ((rc = + parse_deleter_args (vty, NULL, argv[1], argv[0], NULL, NULL, &cda))) + return rc; + + cda.vty = vty; + + clear_vnc_responses (&cda); + clear_vnc_prefix (&cda); + clear_vnc_nve_closer (&cda); + + print_cleared_stats (&cda); + + return 0; +} + +DEFUN (clear_vnc_nve_vn, + clear_vnc_nve_vn_cmd, + "clear vnc nve vn <*|A.B.C.D|X:X::X:X>", + "clear\n" + "VNC Information\n" + "Clear prefix registration infomation\n" + "VN address of NVE\n" + "VN IPv4 interface address\n" "VN IPv6 interface address\n") +{ + struct rfapi_local_reg_delete_arg cda; + int rc; + + if ((rc = parse_deleter_args (vty, NULL, argv[0], NULL, NULL, NULL, &cda))) + return rc; + + cda.vty = vty; + + clear_vnc_responses (&cda); + clear_vnc_prefix (&cda); + clear_vnc_nve_closer (&cda); + + print_cleared_stats (&cda); + return 0; +} + +DEFUN (clear_vnc_nve_un, + clear_vnc_nve_un_cmd, + "clear vnc nve un <*|A.B.C.D|X:X::X:X>", + "clear\n" + "VNC Information\n" + "Clear prefix registration infomation\n" + "UN address of NVE\n" + "UN IPv4 interface address\n" "UN IPv6 interface address\n") +{ + struct rfapi_local_reg_delete_arg cda; + int rc; + + if ((rc = parse_deleter_args (vty, NULL, NULL, argv[0], NULL, NULL, &cda))) + return rc; + + cda.vty = vty; + + clear_vnc_responses (&cda); + clear_vnc_prefix (&cda); + clear_vnc_nve_closer (&cda); + + print_cleared_stats (&cda); + return 0; +} + +/*------------------------------------------------- + * Clear VNC Prefix + *-------------------------------------------------*/ + +/* + * This function is defined in this file (rather than in rfp_registration.c) + * because here we have access to all the task handles. + */ +DEFUN (clear_vnc_prefix_vn_un, + clear_vnc_prefix_vn_un_cmd, + "clear vnc prefix <*|A.B.C.D/M|X:X::X:X/M> vn <*|A.B.C.D|X:X::X:X> un <*|A.B.C.D|X:X::X:X>", + "clear\n" + "VNC Information\n" + "Clear prefix registration infomation\n" + "All prefixes\n" + "IPv4 prefix\n" + "IPv6 prefix\n" + "VN address of NVE\n" + "All VN addresses\n" + "VN IPv4 interface address\n" + "VN IPv6 interface address\n" + "UN address of NVE\n" + "All UN addresses\n" + "UN IPv4 interface address\n" + "UN IPv6 interface address\n") +{ + struct rfapi_local_reg_delete_arg cda; + int rc; + + if ((rc = + parse_deleter_args (vty, argv[0], argv[1], argv[2], NULL, NULL, &cda))) + return rc; + cda.vty = vty; + clear_vnc_prefix (&cda); + print_cleared_stats (&cda); + return 0; +} + +DEFUN (clear_vnc_prefix_un_vn, + clear_vnc_prefix_un_vn_cmd, + "clear vnc prefix <*|A.B.C.D/M|X:X::X:X/M> un <*|A.B.C.D|X:X::X:X> vn <*|A.B.C.D|X:X::X:X>", + "clear\n" + "VNC Information\n" + "Clear prefix registration infomation\n" + "All prefixes\n" + "IPv4 prefix\n" + "IPv6 prefix\n" + "UN address of NVE\n" + "All UN addresses\n" + "UN IPv4 interface address\n" + "UN IPv6 interface address\n" + "VN address of NVE\n" + "All VN addresses\n" + "VN IPv4 interface address\n" + "VN IPv6 interface address\n") +{ + struct rfapi_local_reg_delete_arg cda; + int rc; + + if ((rc = + parse_deleter_args (vty, argv[0], argv[2], argv[1], NULL, NULL, &cda))) + return rc; + cda.vty = vty; + clear_vnc_prefix (&cda); + print_cleared_stats (&cda); + return 0; +} + +DEFUN (clear_vnc_prefix_un, + clear_vnc_prefix_un_cmd, + "clear vnc prefix <*|A.B.C.D/M|X:X::X:X/M> un <*|A.B.C.D|X:X::X:X>", + "clear\n" + "VNC Information\n" + "Clear prefix registration infomation\n" + "All prefixes\n" + "IPv4 prefix\n" + "IPv6 prefix\n" + "UN address of NVE\n" + "All UN addresses\n" + "UN IPv4 interface address\n" + "UN IPv6 interface address\n") +{ + struct rfapi_local_reg_delete_arg cda; + int rc; + + if ((rc = + parse_deleter_args (vty, argv[0], NULL, argv[1], NULL, NULL, &cda))) + return rc; + cda.vty = vty; + clear_vnc_prefix (&cda); + print_cleared_stats (&cda); + return 0; +} + +DEFUN (clear_vnc_prefix_vn, + clear_vnc_prefix_vn_cmd, + "clear vnc prefix <*|A.B.C.D/M|X:X::X:X/M> vn <*|A.B.C.D|X:X::X:X>", + "clear\n" + "VNC Information\n" + "Clear prefix registration infomation\n" + "All prefixes\n" + "IPv4 prefix\n" + "IPv6 prefix\n" + "UN address of NVE\n" + "All VN addresses\n" + "VN IPv4 interface address\n" + "VN IPv6 interface address\n") +{ + struct rfapi_local_reg_delete_arg cda; + int rc; + + if ((rc = + parse_deleter_args (vty, argv[0], argv[1], NULL, NULL, NULL, &cda))) + return rc; + cda.vty = vty; + clear_vnc_prefix (&cda); + print_cleared_stats (&cda); + return 0; +} + +DEFUN (clear_vnc_prefix_all, + clear_vnc_prefix_all_cmd, + "clear vnc prefix <*|A.B.C.D/M|X:X::X:X/M> *", + "clear\n" + "VNC Information\n" + "Clear prefix registration infomation\n" + "All prefixes\n" + "IPv4 prefix\n" + "IPv6 prefix\n" + "From any NVE\n") +{ + struct rfapi_local_reg_delete_arg cda; + int rc; + + if ((rc = parse_deleter_args (vty, argv[0], NULL, NULL, NULL, NULL, &cda))) + return rc; + cda.vty = vty; + clear_vnc_prefix (&cda); + print_cleared_stats (&cda); + return 0; +} + +/*------------------------------------------------- + * Clear VNC MAC + *-------------------------------------------------*/ + +/* + * This function is defined in this file (rather than in rfp_registration.c) + * because here we have access to all the task handles. + */ +DEFUN (clear_vnc_mac_vn_un, + clear_vnc_mac_vn_un_cmd, + "clear vnc mac <*|YY:YY:YY:YY:YY:YY> virtual-network-identifier <*|(1-4294967295)> vn <*|A.B.C.D|X:X::X:X> un <*|A.B.C.D|X:X::X:X>", + "clear\n" + "VNC Information\n" + "Clear mac registration infomation\n" + "All macs\n" + "MAC address\n" + "VNI keyword\n" + "Any virtual network identifier\n" + "Virtual network identifier\n" + "Virtual network identifier\n" + "VN address of NVE\n" + "All VN addresses\n" + "VN IPv4 interface address\n" + "VN IPv6 interface address\n" + "UN address of NVE\n" + "All UN addresses\n" + "UN IPv4 interface address\n" + "UN IPv6 interface address\n") +{ + struct rfapi_local_reg_delete_arg cda; + int rc; + + /* pfx vn un L2 VNI */ + if ((rc = + parse_deleter_args (vty, NULL, argv[2], argv[3], argv[0], argv[1], + &cda))) + return rc; + cda.vty = vty; + clear_vnc_prefix (&cda); + print_cleared_stats (&cda); + return 0; +} + +DEFUN (clear_vnc_mac_un_vn, + clear_vnc_mac_un_vn_cmd, + "clear vnc mac <*|YY:YY:YY:YY:YY:YY> virtual-network-identifier <*|(1-4294967295)> un <*|A.B.C.D|X:X::X:X> vn <*|A.B.C.D|X:X::X:X>", + "clear\n" + "VNC Information\n" + "Clear mac registration infomation\n" + "All macs\n" + "MAC address\n" + "VNI keyword\n" + "Any virtual network identifier\n" + "Virtual network identifier\n" + "UN address of NVE\n" + "All UN addresses\n" + "UN IPv4 interface address\n" + "UN IPv6 interface address\n" + "VN address of NVE\n" + "All VN addresses\n" + "VN IPv4 interface address\n" + "VN IPv6 interface address\n") +{ + struct rfapi_local_reg_delete_arg cda; + int rc; + + /* pfx vn un L2 VNI */ + if ((rc = + parse_deleter_args (vty, NULL, argv[3], argv[2], argv[0], argv[1], + &cda))) + return rc; + cda.vty = vty; + clear_vnc_prefix (&cda); + print_cleared_stats (&cda); + return 0; +} + +DEFUN (clear_vnc_mac_un, + clear_vnc_mac_un_cmd, + "clear vnc mac <*|YY:YY:YY:YY:YY:YY> virtual-network-identifier <*|(1-4294967295)> un <*|A.B.C.D|X:X::X:X>", + "clear\n" + "VNC Information\n" + "Clear mac registration infomation\n" + "All macs\n" + "MAC address\n" + "VNI keyword\n" + "Any virtual network identifier\n" + "Virtual network identifier\n" + "UN address of NVE\n" + "All UN addresses\n" + "UN IPv4 interface address\n" + "UN IPv6 interface address\n") +{ + struct rfapi_local_reg_delete_arg cda; + int rc; + + /* pfx vn un L2 VNI */ + if ((rc = + parse_deleter_args (vty, NULL, NULL, argv[2], argv[0], argv[1], &cda))) + return rc; + cda.vty = vty; + clear_vnc_prefix (&cda); + print_cleared_stats (&cda); + return 0; +} + +DEFUN (clear_vnc_mac_vn, + clear_vnc_mac_vn_cmd, + "clear vnc mac <*|YY:YY:YY:YY:YY:YY> virtual-network-identifier <*|(1-4294967295)> vn <*|A.B.C.D|X:X::X:X>", + "clear\n" + "VNC Information\n" + "Clear mac registration infomation\n" + "All macs\n" + "MAC address\n" + "VNI keyword\n" + "Any virtual network identifier\n" + "Virtual network identifier\n" + "UN address of NVE\n" + "All VN addresses\n" + "VN IPv4 interface address\n" + "VN IPv6 interface address\n") +{ + struct rfapi_local_reg_delete_arg cda; + int rc; + + /* pfx vn un L2 VNI */ + if ((rc = + parse_deleter_args (vty, NULL, argv[2], NULL, argv[0], argv[1], &cda))) + return rc; + cda.vty = vty; + clear_vnc_prefix (&cda); + print_cleared_stats (&cda); + return 0; +} + +DEFUN (clear_vnc_mac_all, + clear_vnc_mac_all_cmd, + "clear vnc mac <*|YY:YY:YY:YY:YY:YY> virtual-network-identifier <*|(1-4294967295)> *", + "clear\n" + "VNC Information\n" + "Clear mac registration infomation\n" + "All macs\n" + "MAC address\n" + "VNI keyword\n" + "Any virtual network identifier\n" + "Virtual network identifier\n" + "From any NVE\n") +{ + struct rfapi_local_reg_delete_arg cda; + int rc; + + /* pfx vn un L2 VNI */ + if ((rc = + parse_deleter_args (vty, NULL, NULL, NULL, argv[0], argv[1], &cda))) + return rc; + cda.vty = vty; + clear_vnc_prefix (&cda); + print_cleared_stats (&cda); + return 0; +} + +/*------------------------------------------------- + * Clear VNC MAC PREFIX + *-------------------------------------------------*/ + +DEFUN (clear_vnc_mac_vn_un_prefix, + clear_vnc_mac_vn_un_prefix_cmd, + "clear vnc mac <*|YY:YY:YY:YY:YY:YY> virtual-network-identifier <*|(1-4294967295)> vn <*|A.B.C.D|X:X::X:X> un <*|A.B.C.D|X:X::X:X> prefix <*|A.B.C.D/M|X:X::X:X/M>", + "clear\n" + "VNC Information\n" + "Clear mac registration infomation\n" + "All macs\n" + "MAC address\n" + "VNI keyword\n" + "Any virtual network identifier\n" + "Virtual network identifier\n" + "Virtual network identifier\n" + "VN address of NVE\n" + "All VN addresses\n" + "VN IPv4 interface address\n" + "VN IPv6 interface address\n" + "UN address of NVE\n" + "All UN addresses\n" + "UN IPv4 interface address\n" + "UN IPv6 interface address\n" + "Clear prefix registration infomation\n" + "All prefixes\n" + "IPv4 prefix\n" + "IPv6 prefix\n") +{ + struct rfapi_local_reg_delete_arg cda; + int rc; + + /* pfx vn un L2 VNI */ + if ((rc = + parse_deleter_args (vty, argv[4], argv[2], argv[3], argv[0], argv[1], + &cda))) + return rc; + cda.vty = vty; + clear_vnc_prefix (&cda); + print_cleared_stats (&cda); + return 0; +} + +DEFUN (clear_vnc_mac_un_vn_prefix, + clear_vnc_mac_un_vn_prefix_cmd, + "clear vnc mac <*|YY:YY:YY:YY:YY:YY> virtual-network-identifier <*|(1-4294967295)> un <*|A.B.C.D|X:X::X:X> vn <*|A.B.C.D|X:X::X:X> prefix <*|A.B.C.D/M|X:X::X:X/M> prefix <*|A.B.C.D/M|X:X::X:X/M>", + "clear\n" + "VNC Information\n" + "Clear mac registration infomation\n" + "All macs\n" + "MAC address\n" + "VNI keyword\n" + "Any virtual network identifier\n" + "Virtual network identifier\n" + "UN address of NVE\n" + "All UN addresses\n" + "UN IPv4 interface address\n" + "UN IPv6 interface address\n" + "VN address of NVE\n" + "All VN addresses\n" + "VN IPv4 interface address\n" + "VN IPv6 interface address\n") +{ + struct rfapi_local_reg_delete_arg cda; + int rc; + + /* pfx vn un L2 VNI */ + if ((rc = + parse_deleter_args (vty, argv[4], argv[3], argv[2], argv[0], argv[1], + &cda))) + return rc; + cda.vty = vty; + clear_vnc_prefix (&cda); + print_cleared_stats (&cda); + return 0; +} + +DEFUN (clear_vnc_mac_un_prefix, + clear_vnc_mac_un_prefix_cmd, + "clear vnc mac <*|YY:YY:YY:YY:YY:YY> virtual-network-identifier <*|(1-4294967295)> un <*|A.B.C.D|X:X::X:X> prefix <*|A.B.C.D/M|X:X::X:X/M>", + "clear\n" + "VNC Information\n" + "Clear mac registration infomation\n" + "All macs\n" + "MAC address\n" + "VNI keyword\n" + "Any virtual network identifier\n" + "Virtual network identifier\n" + "UN address of NVE\n" + "All UN addresses\n" + "UN IPv4 interface address\n" + "UN IPv6 interface address\n") +{ + struct rfapi_local_reg_delete_arg cda; + int rc; + + /* pfx vn un L2 VNI */ + if ((rc = + parse_deleter_args (vty, argv[3], NULL, argv[2], argv[0], argv[1], + &cda))) + return rc; + cda.vty = vty; + clear_vnc_prefix (&cda); + print_cleared_stats (&cda); + return 0; +} + +DEFUN (clear_vnc_mac_vn_prefix, + clear_vnc_mac_vn_prefix_cmd, + "clear vnc mac <*|YY:YY:YY:YY:YY:YY> virtual-network-identifier <*|(1-4294967295)> vn <*|A.B.C.D|X:X::X:X> prefix <*|A.B.C.D/M|X:X::X:X/M>", + "clear\n" + "VNC Information\n" + "Clear mac registration infomation\n" + "All macs\n" + "MAC address\n" + "VNI keyword\n" + "Any virtual network identifier\n" + "Virtual network identifier\n" + "UN address of NVE\n" + "All VN addresses\n" + "VN IPv4 interface address\n" + "VN IPv6 interface address\n") +{ + struct rfapi_local_reg_delete_arg cda; + int rc; + + /* pfx vn un L2 VNI */ + if ((rc = + parse_deleter_args (vty, argv[3], argv[2], NULL, argv[0], argv[1], + &cda))) + return rc; + cda.vty = vty; + clear_vnc_prefix (&cda); + print_cleared_stats (&cda); + return 0; +} + +DEFUN (clear_vnc_mac_all_prefix, + clear_vnc_mac_all_prefix_cmd, + "clear vnc mac <*|YY:YY:YY:YY:YY:YY> virtual-network-identifier <*|(1-4294967295)> prefix <*|A.B.C.D/M|X:X::X:X/M>", + "clear\n" + "VNC Information\n" + "Clear mac registration infomation\n" + "All macs\n" + "MAC address\n" + "VNI keyword\n" + "Any virtual network identifier\n" + "Virtual network identifier\n" + "UN address of NVE\n" + "All VN addresses\n" + "VN IPv4 interface address\n" + "VN IPv6 interface address\n") +{ + struct rfapi_local_reg_delete_arg cda; + int rc; + + /* pfx vn un L2 VNI */ + if ((rc = + parse_deleter_args (vty, argv[2], NULL, NULL, argv[0], argv[1], &cda))) + return rc; + cda.vty = vty; + clear_vnc_prefix (&cda); + print_cleared_stats (&cda); + return 0; +} + +/************************************************************************ + * Show commands + ************************************************************************/ + + +/* copied from rfp_vty.c */ +static int +check_and_display_is_vnc_running (struct vty *vty) +{ + if (!bgp_rfapi_is_vnc_configured (NULL)) + return 1; /* is running */ + + if (vty) + { + vty_out (vty, + "VNC is not configured. (There are no configured BGP VPN SAFI peers.)%s", + VTY_NEWLINE); + } + return 0; /* not running */ +} + +static int +rfapi_vty_show_nve_summary (struct vty *vty, show_nve_summary_t show_type) +{ + struct bgp *bgp_default = bgp_get_default (); + struct rfapi *h; + int is_vnc_running = !bgp_rfapi_is_vnc_configured (bgp_default); + + int active_local_routes; + int active_remote_routes; + int holddown_remote_routes; + int imported_remote_routes; + + if (!bgp_default) + goto notcfg; + + h = bgp_default->rfapi; + + if (!h) + goto notcfg; + + /* don't show local info if not running RFP */ + if (is_vnc_running || show_type == SHOW_NVE_SUMMARY_REGISTERED) + { + + switch (show_type) + { + + case SHOW_NVE_SUMMARY_ACTIVE_NVES: + vty_out (vty, "%-24s ", "NVEs:"); + vty_out (vty, "%-8s %-8u ", "Active:", h->descriptors.count); + vty_out (vty, "%-8s %-8u ", "Maximum:", h->stat.max_descriptors); + vty_out (vty, "%-8s %-8u", "Unknown:", h->stat.count_unknown_nves); + break; + + case SHOW_NVE_SUMMARY_REGISTERED: + /* + * NB: With the introduction of L2 route support, we no + * longer have a one-to-one correspondence between + * locally-originated route advertisements and routes in + * the import tables that have local origin. This + * discrepancy arises because a single advertisement + * may contain both an IP prefix and a MAC address. + * Such an advertisement results in two import table + * entries: one indexed by IP prefix, the other indexed + * by MAC address. + * + * TBD: update computation and display of registration + * statistics to reflect the underlying semantics. + */ + if (is_vnc_running) + { + vty_out (vty, "%-24s ", "Registrations:"); + vty_out (vty, "%-8s %-8u ", "Active:", + rfapiApCountAll (bgp_default)); + vty_out (vty, "%-8s %-8u ", "Failed:", + h->stat.count_registrations_failed); + vty_out (vty, "%-8s %-8u", "Total:", + h->stat.count_registrations); + vty_out (vty, "%s", VTY_NEWLINE); + } + vty_out (vty, "%-24s ", "Prefixes registered:"); + vty_out (vty, "%s", VTY_NEWLINE); + + rfapiCountAllItRoutes (&active_local_routes, + &active_remote_routes, + &holddown_remote_routes, + &imported_remote_routes); + + /* local */ + if (is_vnc_running) + { + vty_out (vty, " %-20s ", "Locally:"); + vty_out (vty, "%-8s %-8u ", "Active:", active_local_routes); + vty_out (vty, "%s", VTY_NEWLINE); + } + + + vty_out (vty, " %-20s ", "Remotely:"); + vty_out (vty, "%-8s %-8u", "Active:", active_remote_routes); + vty_out (vty, "%s", VTY_NEWLINE); + vty_out (vty, " %-20s ", "In Holddown:"); + vty_out (vty, "%-8s %-8u", "Active:", holddown_remote_routes); + vty_out (vty, "%s", VTY_NEWLINE); + vty_out (vty, " %-20s ", "Imported:"); + vty_out (vty, "%-8s %-8u", "Active:", imported_remote_routes); + break; + + case SHOW_NVE_SUMMARY_QUERIES: + vty_out (vty, "%-24s ", "Queries:"); + vty_out (vty, "%-8s %-8u ", "Active:", rfapi_monitor_count (NULL)); + vty_out (vty, "%-8s %-8u ", "Failed:", + h->stat.count_queries_failed); + vty_out (vty, "%-8s %-8u", "Total:", h->stat.count_queries); + break; + + case SHOW_NVE_SUMMARY_RESPONSES: + rfapiRibShowResponsesSummary (vty); + + default: + break; + } + vty_out (vty, "%s", VTY_NEWLINE); + } + return 0; + +notcfg: + vty_out (vty, "VNC is not configured.%s", VTY_NEWLINE); + return CMD_WARNING; +} + +static int +rfapi_show_nves ( + struct vty *vty, + struct prefix *vn_prefix, + struct prefix *un_prefix) +{ + //struct hash *rfds; + //struct rfp_rfapi_descriptor_param param; + + struct bgp *bgp_default = bgp_get_default (); + struct rfapi *h; + struct listnode *node; + struct rfapi_descriptor *rfd; + + int total = 0; + int printed = 0; + int rc; + + if (!bgp_default) + goto notcfg; + + h = bgp_default->rfapi; + + if (!h) + goto notcfg; + + rc = rfapi_vty_show_nve_summary (vty, SHOW_NVE_SUMMARY_ACTIVE_NVES); + if (rc) + return rc; + + for (ALL_LIST_ELEMENTS_RO (&h->descriptors, node, rfd)) + { + struct prefix pfx; + char vn_addr_buf[INET6_ADDRSTRLEN] = + { + 0,}; + char un_addr_buf[INET6_ADDRSTRLEN] = + { + 0,}; + char age[10]; + + ++total; + + if (vn_prefix) + { + assert (!rfapiRaddr2Qprefix (&rfd->vn_addr, &pfx)); + if (!prefix_match (vn_prefix, &pfx)) + continue; + } + + if (un_prefix) + { + assert (!rfapiRaddr2Qprefix (&rfd->un_addr, &pfx)); + if (!prefix_match (un_prefix, &pfx)) + continue; + } + + rfapiRfapiIpAddr2Str (&rfd->vn_addr, vn_addr_buf, INET6_ADDRSTRLEN); + rfapiRfapiIpAddr2Str (&rfd->un_addr, un_addr_buf, INET6_ADDRSTRLEN); + + if (!printed) + { + /* print out a header */ + vty_out (vty, " " + "Active Next Hops%s", VTY_NEWLINE); + vty_out (vty, "%-15s %-15s %-5s %-5s %-6s %-6s %s%s", + "VN Address", + "UN Address", + "Regis", "Resps", "Reach", "Remove", "Age", VTY_NEWLINE); + } + + ++printed; + + vty_out (vty, "%-15s %-15s %-5u %-5u %-6u %-6u %s%s", + vn_addr_buf, + un_addr_buf, + rfapiApCount (rfd), + rfapi_monitor_count (rfd), + rfd->stat_count_nh_reachable, + rfd->stat_count_nh_removal, + rfapiFormatAge (rfd->open_time, age, 10), VTY_NEWLINE); + } + + if (printed > 0 || vn_prefix || un_prefix) + vty_out (vty, "Displayed %d out of %d active NVEs%s", + printed, total, VTY_NEWLINE); + + return 0; + +notcfg: + vty_out (vty, "VNC is not configured.%s", VTY_NEWLINE); + return CMD_WARNING; +} + + +DEFUN (vnc_show_summary, + vnc_show_summary_cmd, + "show vnc summary", + SHOW_STR + VNC_SHOW_STR + "Display VNC status summary\n") +{ + if (!check_and_display_is_vnc_running (vty)) + return CMD_SUCCESS; + bgp_rfapi_show_summary (bgp_get_default (), vty); + vty_out (vty, "%s", VTY_NEWLINE); + rfapi_vty_show_nve_summary (vty, SHOW_NVE_SUMMARY_ACTIVE_NVES); + rfapi_vty_show_nve_summary (vty, SHOW_NVE_SUMMARY_QUERIES); + rfapi_vty_show_nve_summary (vty, SHOW_NVE_SUMMARY_RESPONSES); + rfapi_vty_show_nve_summary (vty, SHOW_NVE_SUMMARY_REGISTERED); + return CMD_SUCCESS; +} + +DEFUN (vnc_show_nves, + vnc_show_nves_cmd, + "show vnc nves", + SHOW_STR + VNC_SHOW_STR + "List known NVEs\n") +{ + rfapi_show_nves (vty, NULL, NULL); + return CMD_SUCCESS; +} + +DEFUN (vnc_show_nves_ptct, + vnc_show_nves_ptct_cmd, + "show vnc nves ", + SHOW_STR + VNC_SHOW_STR + "List known NVEs\n" + "VN address of NVE\n" + "UN address of NVE\n" + "IPv4 interface address\n" + "IPv6 interface address\n") +{ + struct prefix pfx; + + if (!check_and_display_is_vnc_running (vty)) + return CMD_SUCCESS; + + if (!str2prefix (argv[1], &pfx)) + { + vty_out (vty, "Malformed address \"%s\"%s", argv[1], VTY_NEWLINE); + return CMD_WARNING; + } + if (pfx.family != AF_INET && pfx.family != AF_INET6) + { + vty_out (vty, "Invalid address \"%s\"%s", argv[1], VTY_NEWLINE); + return CMD_WARNING; + } + + if (*(argv[0]) == 'u') + { + rfapi_show_nves (vty, NULL, &pfx); + } + else + { + rfapi_show_nves (vty, &pfx, NULL); + } + + return CMD_SUCCESS; +} + +/* adapted from rfp_registration_cache_log() */ +static void +rfapi_show_registrations ( + struct vty *vty, + struct prefix *restrict_to, + int show_local, + int show_remote, + int show_holddown, + int show_imported) +{ + int printed = 0; + + if (!vty) + return; + + rfapi_vty_show_nve_summary (vty, SHOW_NVE_SUMMARY_REGISTERED); + + if (show_local) + { + /* non-expiring, local */ + printed += rfapiShowRemoteRegistrations (vty, restrict_to, 0, 1, 0, 0); + } + if (show_remote) + { + /* non-expiring, non-local */ + printed += rfapiShowRemoteRegistrations (vty, restrict_to, 0, 0, 1, 0); + } + if (show_holddown) + { + /* expiring, including local */ + printed += rfapiShowRemoteRegistrations (vty, restrict_to, 1, 1, 1, 0); + } + if (show_imported) + { + /* non-expiring, non-local */ + printed += rfapiShowRemoteRegistrations (vty, restrict_to, 0, 0, 1, 1); + } + if (!printed) + { + vty_out (vty, "%s", VTY_NEWLINE); + } +} + +DEFUN (vnc_show_registrations_pfx, + vnc_show_registrations_pfx_cmd, + "show vnc registrations <[A.B.C.D/M]|[X:X::X:X/M]|[YY:YY:YY:YY:YY:YY]>", + SHOW_STR + VNC_SHOW_STR + "List active prefix registrations\n" + "Limit output to a particular prefix or address\n" + "Limit output to a particular prefix or address\n") +{ + struct prefix p; + struct prefix *p_addr = NULL; + + if (argc == 1) + { + if (!str2prefix (argv[0], &p)) + { + vty_out (vty, "Invalid prefix: %s%s", argv[0], VTY_NEWLINE); + return CMD_SUCCESS; + } + else + { + p_addr = &p; + } + } + + rfapi_show_registrations (vty, p_addr, 1, 1, 1, 1); + return CMD_SUCCESS; +} + +ALIAS (vnc_show_registrations_pfx, + vnc_show_registrations_cmd, + "show vnc registrations", + SHOW_STR + VNC_SHOW_STR + "List active prefix registrations\n") + DEFUN (vnc_show_registrations_some_pfx, + vnc_show_registrations_some_pfx_cmd, + "show vnc registrations (all|holddown|imported|local|remote) ([A.B.C.D/M]|[X:X::X:X/M]|[YY:YY:YY:YY:YY:YY])", + SHOW_STR + VNC_SHOW_STR + "List active prefix registrations\n" + "show all registrations\n" + "show only registrations in holddown\n" + "show only imported prefixes\n" + "show only local registrations\n" + "show only remote registrations\n" + "Limit output to a particular prefix or address\n" + "Limit output to a particular prefix or address\n") +{ + struct prefix p; + struct prefix *p_addr = NULL; + + int show_local = 0; + int show_remote = 0; + int show_holddown = 0; + int show_imported = 0; + + if (argc == 2) + { + if (!str2prefix (argv[1], &p)) + { + vty_out (vty, "Invalid prefix: %s%s", argv[1], VTY_NEWLINE); + return CMD_SUCCESS; + } + else + { + p_addr = &p; + } + } + switch (*argv[0]) + { + case 'a': + show_local = 1; + show_remote = 1; + show_holddown = 1; + show_imported = 1; + break; + + case 'h': + show_holddown = 1; + break; + + case 'i': + show_imported = 1; + break; + + case 'l': + show_local = 1; + break; + + case 'r': + show_remote = 1; + break; + } + + rfapi_show_registrations (vty, p_addr, + show_local, show_remote, show_holddown, + show_imported); + return CMD_SUCCESS; +} + +ALIAS (vnc_show_registrations_some_pfx, + vnc_show_registrations_some_cmd, + "show vnc registrations (all|holddown|imported|local|remote)", + SHOW_STR + VNC_SHOW_STR + "List active prefix registrations\n" + "show all registrations\n" + "show only registrations in holddown\n" + "show only imported prefixes\n" + "show only local registrations\n" + "show only remote registrations\n") + +DEFUN (vnc_show_responses_pfx, + vnc_show_responses_pfx_cmd, + "show vnc responses <[A.B.C.D/M]|[X:X::X:X/M]|[YY:YY:YY:YY:YY:YY]>", + SHOW_STR + VNC_SHOW_STR + "List recent query responses\n" + "Limit output to a particular prefix or address\n" + "Limit output to a particular prefix or address\n") +{ + struct prefix p; + struct prefix *p_addr = NULL; + + if (argc == 1) + { + if (!str2prefix (argv[0], &p)) + { + vty_out (vty, "Invalid prefix: %s%s", argv[0], VTY_NEWLINE); + return CMD_SUCCESS; + } + else + { + p_addr = &p; + } + } + rfapi_vty_show_nve_summary (vty, SHOW_NVE_SUMMARY_QUERIES); + + rfapiRibShowResponsesSummary (vty); + + rfapiRibShowResponses (vty, p_addr, 0); + rfapiRibShowResponses (vty, p_addr, 1); + + return CMD_SUCCESS; +} + +ALIAS (vnc_show_responses_pfx, + vnc_show_responses_cmd, + "show vnc responses", + SHOW_STR + VNC_SHOW_STR + "List recent query responses\n") + +DEFUN (vnc_show_responses_some_pfx, + vnc_show_responses_some_pfx_cmd, + "show vnc responses <[A.B.C.D/M]|[X:X::X:X/M]|[YY:YY:YY:YY:YY:YY]>", + SHOW_STR + VNC_SHOW_STR + "List recent query responses\n" + "show only active query responses\n" + "show only removed query responses\n" + "Limit output to a particular prefix or address\n" + "Limit output to a particular prefix or address\n") +{ + struct prefix p; + struct prefix *p_addr = NULL; + + int show_active = 0; + int show_removed = 0; + + if (!check_and_display_is_vnc_running (vty)) + return CMD_SUCCESS; + + if (argc == 2) + { + if (!str2prefix (argv[1], &p)) + { + vty_out (vty, "Invalid prefix: %s%s", argv[1], VTY_NEWLINE); + return CMD_SUCCESS; + } + else + { + p_addr = &p; + } + } + + switch (*argv[0]) + { + case 'a': + show_active = 1; + break; + + case 'r': + show_removed = 1; + break; + } + + rfapi_vty_show_nve_summary (vty, SHOW_NVE_SUMMARY_QUERIES); + + rfapiRibShowResponsesSummary (vty); + + if (show_active) + rfapiRibShowResponses (vty, p_addr, 0); + if (show_removed) + rfapiRibShowResponses (vty, p_addr, 1); + + return CMD_SUCCESS; +} + +ALIAS (vnc_show_responses_some_pfx, + vnc_show_responses_some_cmd, + "show vnc responses (active|removed)", + SHOW_STR + VNC_SHOW_STR + "List recent query responses\n" + "show only active query responses\n" + "show only removed query responses\n") + +DEFUN (show_vnc_queries_pfx, + show_vnc_queries_pfx_cmd, + "show vnc queries <[A.B.C.D/M]|[X:X::X:X/M]|[YY:YY:YY:YY:YY:YY]>", + SHOW_STR + VNC_SHOW_STR + "List active queries\n" + "Limit output to a particular IPv4 prefix or address\n" + "Limit output to a particular IPv6 prefix or address\n") +{ + struct prefix pfx; + struct prefix *p = NULL; + + if (argc == 1) + { + if (!str2prefix (argv[0], &pfx)) + { + vty_out (vty, "Invalid prefix: %s%s", argv[0], VTY_NEWLINE); + return CMD_WARNING; + } + p = &pfx; + } + + rfapi_vty_show_nve_summary (vty, SHOW_NVE_SUMMARY_QUERIES); + + return rfapiShowVncQueries (vty, p); +} + +ALIAS (show_vnc_queries_pfx, + show_vnc_queries_cmd, + "show vnc queries", + SHOW_STR + VNC_SHOW_STR + "List active queries\n") + +DEFUN (vnc_clear_counters, + vnc_clear_counters_cmd, + "clear vnc counters", + CLEAR_STR + VNC_SHOW_STR + "Reset VNC counters\n") +{ + struct bgp *bgp_default = bgp_get_default (); + struct rfapi *h; + struct listnode *node; + struct rfapi_descriptor *rfd; + + if (!bgp_default) + goto notcfg; + + h = bgp_default->rfapi; + + if (!h) + goto notcfg; + + /* per-rfd */ + for (ALL_LIST_ELEMENTS_RO (&h->descriptors, node, rfd)) + { + rfd->stat_count_nh_reachable = 0; + rfd->stat_count_nh_removal = 0; + } + + /* global */ + memset (&h->stat, 0, sizeof (h->stat)); + + /* + * 151122 per bug 103, set count_registrations = number active. + * Do same for queries + */ + h->stat.count_registrations = rfapiApCountAll (bgp_default); + h->stat.count_queries = rfapi_monitor_count (NULL); + + rfapiRibShowResponsesSummaryClear (); + + return CMD_SUCCESS; + +notcfg: + vty_out (vty, "VNC is not configured.%s", VTY_NEWLINE); + return CMD_WARNING; +} + +void rfapi_vty_init () +{ + install_element (ENABLE_NODE, &add_vnc_prefix_cost_life_lnh_cmd); + install_element (ENABLE_NODE, &add_vnc_prefix_life_cost_lnh_cmd); + install_element (ENABLE_NODE, &add_vnc_prefix_cost_lnh_cmd); + install_element (ENABLE_NODE, &add_vnc_prefix_life_lnh_cmd); + install_element (ENABLE_NODE, &add_vnc_prefix_lnh_cmd); + + install_element (ENABLE_NODE, &add_vnc_prefix_cost_life_cmd); + install_element (ENABLE_NODE, &add_vnc_prefix_life_cost_cmd); + install_element (ENABLE_NODE, &add_vnc_prefix_cost_cmd); + install_element (ENABLE_NODE, &add_vnc_prefix_life_cmd); + install_element (ENABLE_NODE, &add_vnc_prefix_cmd); + + install_element (ENABLE_NODE, &add_vnc_mac_vni_prefix_cost_life_cmd); + install_element (ENABLE_NODE, &add_vnc_mac_vni_prefix_life_cmd); + install_element (ENABLE_NODE, &add_vnc_mac_vni_prefix_cost_cmd); + install_element (ENABLE_NODE, &add_vnc_mac_vni_prefix_cmd); + install_element (ENABLE_NODE, &add_vnc_mac_vni_cost_life_cmd); + install_element (ENABLE_NODE, &add_vnc_mac_vni_cost_cmd); + install_element (ENABLE_NODE, &add_vnc_mac_vni_life_cmd); + install_element (ENABLE_NODE, &add_vnc_mac_vni_cmd); + + install_element (ENABLE_NODE, &clear_vnc_nve_all_cmd); + install_element (ENABLE_NODE, &clear_vnc_nve_vn_un_cmd); + install_element (ENABLE_NODE, &clear_vnc_nve_un_vn_cmd); + install_element (ENABLE_NODE, &clear_vnc_nve_vn_cmd); + install_element (ENABLE_NODE, &clear_vnc_nve_un_cmd); + + install_element (ENABLE_NODE, &clear_vnc_prefix_vn_un_cmd); + install_element (ENABLE_NODE, &clear_vnc_prefix_un_vn_cmd); + install_element (ENABLE_NODE, &clear_vnc_prefix_un_cmd); + install_element (ENABLE_NODE, &clear_vnc_prefix_vn_cmd); + install_element (ENABLE_NODE, &clear_vnc_prefix_all_cmd); + + install_element (ENABLE_NODE, &clear_vnc_mac_vn_un_cmd); + install_element (ENABLE_NODE, &clear_vnc_mac_un_vn_cmd); + install_element (ENABLE_NODE, &clear_vnc_mac_un_cmd); + install_element (ENABLE_NODE, &clear_vnc_mac_vn_cmd); + install_element (ENABLE_NODE, &clear_vnc_mac_all_cmd); + + install_element (ENABLE_NODE, &clear_vnc_mac_vn_un_prefix_cmd); + install_element (ENABLE_NODE, &clear_vnc_mac_un_vn_prefix_cmd); + install_element (ENABLE_NODE, &clear_vnc_mac_un_prefix_cmd); + install_element (ENABLE_NODE, &clear_vnc_mac_vn_prefix_cmd); + install_element (ENABLE_NODE, &clear_vnc_mac_all_prefix_cmd); + + install_element (ENABLE_NODE, &vnc_clear_counters_cmd); + + install_element (VIEW_NODE, &vnc_show_summary_cmd); + install_element (ENABLE_NODE, &vnc_show_summary_cmd); + install_element (VIEW_NODE, &vnc_show_nves_cmd); + install_element (ENABLE_NODE, &vnc_show_nves_cmd); + install_element (VIEW_NODE, &vnc_show_nves_ptct_cmd); + install_element (ENABLE_NODE, &vnc_show_nves_ptct_cmd); + + install_element (VIEW_NODE, &vnc_show_registrations_cmd); + install_element (ENABLE_NODE, &vnc_show_registrations_cmd); + install_element (VIEW_NODE, &vnc_show_registrations_pfx_cmd); + install_element (ENABLE_NODE, &vnc_show_registrations_pfx_cmd); + + install_element (VIEW_NODE, &vnc_show_registrations_some_cmd); + install_element (ENABLE_NODE, &vnc_show_registrations_some_cmd); + install_element (VIEW_NODE, &vnc_show_registrations_some_pfx_cmd); + install_element (ENABLE_NODE, &vnc_show_registrations_some_pfx_cmd); + + install_element (VIEW_NODE, &vnc_show_responses_cmd); + install_element (ENABLE_NODE, &vnc_show_responses_cmd); + install_element (VIEW_NODE, &vnc_show_responses_pfx_cmd); + install_element (ENABLE_NODE, &vnc_show_responses_pfx_cmd); + + install_element (VIEW_NODE, &vnc_show_responses_some_cmd); + install_element (ENABLE_NODE, &vnc_show_responses_some_cmd); + install_element (VIEW_NODE, &vnc_show_responses_some_pfx_cmd); + install_element (ENABLE_NODE, &vnc_show_responses_some_pfx_cmd); + + install_element (ENABLE_NODE, &show_vnc_queries_cmd); + install_element (VIEW_NODE, &show_vnc_queries_cmd); + install_element (ENABLE_NODE, &show_vnc_queries_pfx_cmd); + install_element (VIEW_NODE, &show_vnc_queries_pfx_cmd); +} diff --git a/bgpd/rfapi/rfapi_vty.h b/bgpd/rfapi/rfapi_vty.h new file mode 100644 index 0000000000..c1aeda953c --- /dev/null +++ b/bgpd/rfapi/rfapi_vty.h @@ -0,0 +1,223 @@ +/* + * + * Copyright 2009-2016, LabN Consulting, L.L.C. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#ifndef RFAPI_VTY_H +#define RFAPI_VTY_H + +#include "lib/vty.h" + +typedef enum +{ + SHOW_NVE_SUMMARY_ACTIVE_NVES, + SHOW_NVE_SUMMARY_UNKNOWN_NVES, /* legacy */ + SHOW_NVE_SUMMARY_REGISTERED, + SHOW_NVE_SUMMARY_QUERIES, + SHOW_NVE_SUMMARY_RESPONSES, + SHOW_NVE_SUMMARY_MAX +} show_nve_summary_t; + +#define VNC_SHOW_STR "VNC information\n" + +extern char * +rfapiFormatSeconds (uint32_t seconds, char *buf, size_t len); + +extern char * +rfapiFormatAge (time_t age, char *buf, size_t len); + +extern void +rfapiRprefixApplyMask (struct rfapi_ip_prefix *rprefix); + +extern int +rfapiQprefix2Raddr (struct prefix *qprefix, struct rfapi_ip_addr *raddr); + +extern void +rfapiQprefix2Rprefix (struct prefix *qprefix, + struct rfapi_ip_prefix *rprefix); + +extern int +rfapiRprefix2Qprefix (struct rfapi_ip_prefix *rprefix, + struct prefix *qprefix); + +extern int +rfapiRaddr2Qprefix (struct rfapi_ip_addr *hia, struct prefix *pfx); + +extern int +rfapiRprefixSame (struct rfapi_ip_prefix *hp1, struct rfapi_ip_prefix *hp2); + +extern void +rfapiL2o2Qprefix (struct rfapi_l2address_option *l2o, struct prefix *pfx); + +extern int +rfapiStr2EthAddr (const char *str, struct ethaddr *ea); + +extern const char * +rfapi_ntop ( + int af, + const void *src, + char *buf, + socklen_t size); + +extern int +rfapiDebugPrintf (void *dummy, const char *format, ...); + +extern int +rfapiStream2Vty ( + void *stream, /* input */ + int (**fp) (void *, const char *, ...), /* output */ + struct vty **vty, /* output */ + void **outstream, /* output */ + const char **vty_newline); /* output */ + +/*------------------------------------------ + * rfapiRfapiIpAddr2Str + * + * UI helper: generate string from rfapi_ip_addr + * + * input: + * a IP v4/v6 address + * + * output + * buf put string here + * bufsize max space to write + * + * return value: + * NULL conversion failed + * non-NULL pointer to buf + --------------------------------------------*/ +extern const char * +rfapiRfapiIpAddr2Str (struct rfapi_ip_addr *a, char *buf, int bufsize); + +extern void +rfapiPrintRfapiIpAddr (void *stream, struct rfapi_ip_addr *a); + +extern void +rfapiPrintRfapiIpPrefix (void *stream, struct rfapi_ip_prefix *p); + +void +rfapiPrintRd (struct vty *vty, struct prefix_rd *prd); + +extern void +rfapiPrintAdvertisedInfo ( + struct vty *vty, + struct rfapi_descriptor *rfd, + safi_t safi, + struct prefix *p); + +extern void +rfapiPrintDescriptor (struct vty *vty, struct rfapi_descriptor *rfd); + +extern void +rfapiPrintMatchingDescriptors (struct vty *vty, + struct prefix *vn_prefix, + struct prefix *un_prefix); + +extern void +rfapiPrintAttrPtrs (void *stream, struct attr *attr); + +/* + * Parse an address and put into a struct prefix + */ +extern int +rfapiCliGetPrefixAddr (struct vty *vty, const char *str, struct prefix *p); + +extern int +rfapiCliGetRfapiIpAddr ( + struct vty *vty, + const char *str, + struct rfapi_ip_addr *hai); + +extern void +rfapiPrintNhl (void *stream, struct rfapi_next_hop_entry *next_hops); + +extern char * +rfapiMonitorVpn2Str ( + struct rfapi_monitor_vpn *m, + char *buf, + int size); + +extern const char * +rfapiRfapiIpPrefix2Str ( + struct rfapi_ip_prefix *p, + char *buf, + int bufsize); + +extern void +rfapiShowItNode (void *stream, struct route_node *rn); + +extern char * +rfapiEthAddr2Str ( + const struct ethaddr *ea, + char *buf, + int bufsize); + +/* install vty commands */ +extern void +rfapi_vty_init (void); + +/*------------------------------------------ + * rfapiShowRemoteRegistrations + * + * UI helper: produces the "remote" portion of the output + * of "show vnc registrations". + * + * input: + * stream pointer to output stream + * prefix_only pointer to prefix. If non-NULL, print only registrations + * matching the specified prefix + * show_expiring if non-zero, show expiring registrations + * show_local if non-zero, show local registrations + * show_imported if non-zero, show imported registrations + * + * return value: + * 0 nothing printed + * >0 something printed + --------------------------------------------*/ +extern int +rfapiShowRemoteRegistrations ( + void *stream, + struct prefix *prefix_only, + int show_expiring, + int show_local, + int show_remote, + int show_imported); + +/*------------------------------------------ + * rfapi_monitor_count + * + * UI helper: count number of active monitors + * + * input: + * handle rfapi handle (NULL to count across + * all open handles) + * + * output + * + * return value: + * count of monitors + --------------------------------------------*/ +extern uint32_t +rfapi_monitor_count (rfapi_handle); + +extern int +rfapiShowVncQueries (void *stream, struct prefix *pfx_match); + + +#endif diff --git a/bgpd/rfapi/vnc_debug.c b/bgpd/rfapi/vnc_debug.c new file mode 100644 index 0000000000..5db6f558b8 --- /dev/null +++ b/bgpd/rfapi/vnc_debug.c @@ -0,0 +1,230 @@ +/* + * + * Copyright 2016, LabN Consulting, L.L.C. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#include "lib/zebra.h" + +#include +#include "lib/prefix.h" +#include "lib/linklist.h" +#include "lib/stream.h" +#include "lib/command.h" +#include "lib/str.h" +#include "lib/log.h" +#include "bgpd/rfapi/vnc_debug.h" + +/* + * debug state storage + */ +unsigned long conf_vnc_debug; +unsigned long term_vnc_debug; + +struct vnc_debug { + unsigned long bit; + const char *name; +}; + +struct vnc_debug vncdebug[] = +{ + {VNC_DEBUG_RFAPI_QUERY, "rfapi-query"}, + {VNC_DEBUG_IMPORT_BI_ATTACH, "import-bi-attach"}, + {VNC_DEBUG_IMPORT_DEL_REMOTE, "import-del-remote"}, + {VNC_DEBUG_EXPORT_BGP_GETCE, "export-bgp-getce"}, + {VNC_DEBUG_EXPORT_BGP_DIRECT_ADD, "export-bgp-direct-add"}, + {VNC_DEBUG_IMPORT_BGP_ADD_ROUTE, "import-bgp-add-route"}, +}; + +#define VNC_STR "VNC information\n" + +/*********************************************************************** + * debug bgp vnc + ***********************************************************************/ +DEFUN (debug_bgp_vnc, + debug_bgp_vnc_cmd, + "debug bgp vnc ", + DEBUG_STR + BGP_STR + VNC_STR + "rfapi query handling\n" + "import BI atachment\n" + "import delete remote routes\n") +{ + size_t i; + + for (i = 0; i < (sizeof(vncdebug) / sizeof(struct vnc_debug)); ++i) + { + if (!strcmp(argv[0], vncdebug[i].name)) + { + if (vty->node == CONFIG_NODE) + { + conf_vnc_debug |= vncdebug[i].bit; + term_vnc_debug |= vncdebug[i].bit; + } + else + { + term_vnc_debug |= vncdebug[i].bit; + vty_out (vty, "BGP vnc %s debugging is on%s", + vncdebug[i].name, VTY_NEWLINE); + } + return CMD_SUCCESS; + } + } + vty_out (vty, "Unknown debug flag: %s%s", argv[0], VTY_NEWLINE); + return CMD_WARNING; +} + +DEFUN (no_debug_bgp_vnc, + no_debug_bgp_vnc_cmd, + "no debug bgp vnc ", + NO_STR + DEBUG_STR + BGP_STR + VNC_STR + "rfapi query handling\n" + "import BI atachment\n" + "import delete remote routes\n") +{ + size_t i; + + for (i = 0; i < (sizeof(vncdebug) / sizeof(struct vnc_debug)); ++i) + { + if (!strcmp(argv[0], vncdebug[i].name)) + { + if (vty->node == CONFIG_NODE) + { + conf_vnc_debug &= ~vncdebug[i].bit; + term_vnc_debug &= ~vncdebug[i].bit; + } + else + { + term_vnc_debug &= ~vncdebug[i].bit; + vty_out (vty, "BGP vnc %s debugging is off%s", + vncdebug[i].name, VTY_NEWLINE); + } + return CMD_SUCCESS; + } + } + vty_out (vty, "Unknown debug flag: %s%s", argv[0], VTY_NEWLINE); + return CMD_WARNING; +} + +ALIAS (no_debug_bgp_vnc, + undebug_bgp_vnc_cmd, + "undebug bgp vnc (rfapi-query|import-bi-attach|import-del-remote)", + UNDEBUG_STR + BGP_STR + VNC_STR + "rfapi query handling\n" + "import BI atachment\n" + "import delete remote routes\n") + + +/*********************************************************************** + * no debug bgp vnc all + ***********************************************************************/ + +DEFUN (no_debug_bgp_vnc_all, + no_debug_bgp_vnc_all_cmd, + "no debug all bgp vnc", + NO_STR + DEBUG_STR + "Disable all VNC debugging\n" + BGP_STR + VNC_STR) +{ + term_vnc_debug = 0; + vty_out (vty, "All possible VNC debugging has been turned off%s", VTY_NEWLINE); + + return CMD_SUCCESS; +} + +ALIAS (no_debug_bgp_vnc_all, + undebug_bgp_vnc_all_cmd, + "undebug all bgp vnc", + UNDEBUG_STR + "Disable all VNC debugging\n" + BGP_STR + VNC_STR) + +/*********************************************************************** + * show/save + ***********************************************************************/ + +DEFUN (show_debugging_bgp_vnc, + show_debugging_bgp_vnc_cmd, + "show debugging bgp vnc", + SHOW_STR + DEBUG_STR + BGP_STR + VNC_STR) +{ + size_t i; + + vty_out (vty, "BGP VNC debugging status:%s", VTY_NEWLINE); + + for (i = 0; i < (sizeof(vncdebug) / sizeof(struct vnc_debug)); ++i) + { + if (term_vnc_debug & vncdebug[i].bit) + { + vty_out (vty, " BGP VNC %s debugging is on%s", + vncdebug[i].name, VTY_NEWLINE); + } + } + vty_out (vty, "%s", VTY_NEWLINE); + return CMD_SUCCESS; +} + +static int +bgp_vnc_config_write_debug (struct vty *vty) +{ + int write = 0; + size_t i; + + for (i = 0; i < (sizeof(vncdebug) / sizeof(struct vnc_debug)); ++i) + { + if (conf_vnc_debug & vncdebug[i].bit) + { + vty_out (vty, "debug bgp vnc %s%s", vncdebug[i].name, VTY_NEWLINE); + write++; + } + } + return write; +} + +static struct cmd_node debug_node = +{ + DEBUG_VNC_NODE, + "", + 1 +}; + +void +vnc_debug_init (void) +{ + install_node (&debug_node, bgp_vnc_config_write_debug); + install_element (ENABLE_NODE, &show_debugging_bgp_vnc_cmd); + + install_element (ENABLE_NODE, &debug_bgp_vnc_cmd); + install_element (CONFIG_NODE, &debug_bgp_vnc_cmd); + install_element (ENABLE_NODE, &no_debug_bgp_vnc_cmd); + install_element (ENABLE_NODE, &undebug_bgp_vnc_cmd); + + install_element (ENABLE_NODE, &no_debug_bgp_vnc_all_cmd); + install_element (ENABLE_NODE, &undebug_bgp_vnc_all_cmd); +} diff --git a/bgpd/rfapi/vnc_debug.h b/bgpd/rfapi/vnc_debug.h new file mode 100644 index 0000000000..9d4270651e --- /dev/null +++ b/bgpd/rfapi/vnc_debug.h @@ -0,0 +1,49 @@ +/* + * + * Copyright 2016, LabN Consulting, L.L.C. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#ifndef _QUAGGA_BGP_VNC_DEBUG_H +#define _QUAGGA_BGP_VNC_DEBUG_H + +#if ENABLE_BGP_VNC + +/* + * debug state storage + */ +extern unsigned long conf_vnc_debug; +extern unsigned long term_vnc_debug; + +/* + * debug flag bits + */ +#define VNC_DEBUG_RFAPI_QUERY 0x00000001 +#define VNC_DEBUG_IMPORT_BI_ATTACH 0x00000002 +#define VNC_DEBUG_IMPORT_DEL_REMOTE 0x00000004 +#define VNC_DEBUG_EXPORT_BGP_GETCE 0x00000008 +#define VNC_DEBUG_EXPORT_BGP_DIRECT_ADD 0x00000010 +#define VNC_DEBUG_IMPORT_BGP_ADD_ROUTE 0x00000020 + +#define VNC_DEBUG(bit) (term_vnc_debug & (VNC_DEBUG_ ## bit)) + +extern void +vnc_debug_init (void); + +#endif /* ENABLE_BGP_VNC */ + +#endif /* _QUAGGA_BGP_VNC_DEBUG_H */ diff --git a/bgpd/rfapi/vnc_export_bgp.c b/bgpd/rfapi/vnc_export_bgp.c new file mode 100644 index 0000000000..6434c3744d --- /dev/null +++ b/bgpd/rfapi/vnc_export_bgp.c @@ -0,0 +1,2177 @@ +/* + * + * Copyright 2009-2016, LabN Consulting, L.L.C. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +/* + * File: vnc_export_bgp.c + * Purpose: Export routes to BGP directly (not via zebra) + */ + +#include "lib/zebra.h" +#include "lib/prefix.h" +#include "lib/table.h" +#include "lib/vty.h" +#include "lib/log.h" +#include "lib/stream.h" +#include "lib/memory.h" +#include "lib/linklist.h" +#include "lib/plist.h" +#include "lib/routemap.h" + +#include "bgpd/bgpd.h" +#include "bgpd/bgp_ecommunity.h" +#include "bgpd/bgp_attr.h" +#include "bgpd/bgp_aspath.h" + +#include "bgpd/rfapi/vnc_export_bgp.h" +#include "bgpd/rfapi/vnc_export_bgp_p.h" +#include "bgpd/rfapi/vnc_export_table.h" +#include "bgpd/rfapi/bgp_rfapi_cfg.h" +#include "bgpd/rfapi/rfapi.h" +#include "bgpd/rfapi/rfapi_import.h" +#include "bgpd/rfapi/rfapi_private.h" +#include "bgpd/rfapi/rfapi_backend.h" +#include "bgpd/rfapi/rfapi_vty.h" +#include "bgpd/rfapi/vnc_debug.h" + +/*********************************************************************** + * Export methods that set nexthop to CE (from 5226 roo EC) BEGIN + ***********************************************************************/ + +/* + * Memory allocation approach: make a ghost attr that + * has non-interned parts for the modifications. ghost attr + * memory is allocated by caller. + * + * - extract ce (=5226) EC and use as new nexthop + * - strip Tunnel Encap attr + * - copy all ECs + */ +static void +encap_attr_export_ce ( + struct attr *new, + struct attr *orig, + struct prefix *use_nexthop) +{ + /* + * Make "new" a ghost attr copy of "orig" + */ + memset (new, 0, sizeof (struct attr)); + bgp_attr_dup (new, orig); + bgp_attr_extra_get (new); + bgp_attr_flush_encap (new); + + /* + * Set nexthop + */ + switch (use_nexthop->family) + { + case AF_INET: + new->nexthop = use_nexthop->u.prefix4; + new->extra->mp_nexthop_len = 4; /* bytes */ + new->flag |= ATTR_FLAG_BIT (BGP_ATTR_NEXT_HOP); + break; + + case AF_INET6: + if (!new->extra) + { + new->extra = XCALLOC (MTYPE_ATTR_EXTRA, sizeof (struct attr_extra)); + } + new->extra->mp_nexthop_global = use_nexthop->u.prefix6; + new->extra->mp_nexthop_len = 16; /* bytes */ + break; + + default: + assert (0); + break; + } + + /* + * Set MED + * + * Note that it will be deleted when BGP sends to any eBGP + * peer unless PEER_FLAG_MED_UNCHANGED is set: + * + * neighbor NEIGHBOR attribute-unchanged med + */ + if (!CHECK_FLAG (new->flag, BGP_ATTR_MULTI_EXIT_DISC)) + { + if (CHECK_FLAG (new->flag, BGP_ATTR_LOCAL_PREF)) + { + if (new->local_pref > 255) + new->med = 0; + else + new->med = 255 - new->local_pref; + } + else + { + new->med = 255; /* shouldn't happen */ + } + new->flag |= ATTR_FLAG_BIT (BGP_ATTR_MULTI_EXIT_DISC); + } + + /* + * "new" is now a ghost attr: + * - it owns an "extra" struct + * - it owns any non-interned parts + * - any references to interned parts are not counted + * + * Caller should, after using the attr, call: + * - bgp_attr_flush() to free non-interned parts + * - call bgp_attr_extra_free() to free extra + */ +} + +static int +getce (struct bgp *bgp, struct attr *attr, struct prefix *pfx_ce) +{ + uint8_t *ecp; + int i; + uint16_t localadmin = bgp->rfapi_cfg->resolve_nve_roo_local_admin; + + for (ecp = attr->extra->ecommunity->val, i = 0; + i < attr->extra->ecommunity->size; ++i, ecp += ECOMMUNITY_SIZE) + { + + if (VNC_DEBUG(EXPORT_BGP_GETCE)) + { + zlog_debug ("%s: %02x %02x %02x %02x %02x %02x %02x %02x", + __func__, + ecp[0], ecp[1], ecp[2], ecp[3], ecp[4], ecp[5], ecp[6], + ecp[7]); + } + + /* + * is it ROO? + */ + if (ecp[0] != 1 || ecp[1] != 3) + { + continue; + } + + /* + * Match local admin value? + */ + if (ecp[6] != ((localadmin & 0xff00) >> 8) || + ecp[7] != (localadmin & 0xff)) + continue; + + memset ((uint8_t *) pfx_ce, 0, sizeof (*pfx_ce)); + memcpy (&pfx_ce->u.prefix4, ecp + 2, 4); + pfx_ce->family = AF_INET; + pfx_ce->prefixlen = 32; + + return 0; + } + return -1; +} + + +void +vnc_direct_bgp_add_route_ce ( + struct bgp *bgp, + struct route_node *rn, + struct bgp_info *bi) +{ + struct attr *attr = bi->attr; + struct peer *peer = bi->peer; + struct prefix *prefix = &rn->p; + afi_t afi = family2afi (prefix->family); + struct bgp_node *urn; + struct bgp_info *ubi; + struct attr hattr; + struct attr *iattr; + struct prefix ce_nexthop; + struct prefix post_routemap_nexthop; + + + if (!afi) + { + zlog_err ("%s: can't get afi of route node", __func__); + return; + } + + if ((bi->type != ZEBRA_ROUTE_BGP) || + (bi->sub_type != BGP_ROUTE_NORMAL && + bi->sub_type != BGP_ROUTE_RFP && bi->sub_type != BGP_ROUTE_STATIC)) + { + + zlog_debug ("%s: wrong route type/sub_type for export, skipping", + __func__); + return; + } + + /* check bgp redist flag for vnc direct ("vpn") routes */ + if (!bgp->redist[afi][ZEBRA_ROUTE_VNC_DIRECT]) + { + zlog_debug ("%s: bgp redistribution of VNC direct routes is off", + __func__); + return; + } + + if (!bgp->rfapi_cfg) + { + zlog_debug ("%s: bgp->rfapi_cfg is NULL, skipping", __func__); + return; + } + + if (!VNC_EXPORT_BGP_CE_ENABLED (bgp->rfapi_cfg)) + { + zlog_debug ("%s: export-to-bgp ce mode not enabled, skipping", + __func__); + return; + } + + /* + * prefix list check + */ + if (bgp->rfapi_cfg->plist_export_bgp[afi]) + { + if (prefix_list_apply (bgp->rfapi_cfg->plist_export_bgp[afi], prefix) == + PREFIX_DENY) + { + zlog_debug ("%s: prefix list denied, skipping", __func__); + return; + } + } + + + /* + * Extract CE + * This works only for IPv4 because IPv6 addresses are too big + * to fit in an extended community + */ + if (getce (bgp, attr, &ce_nexthop)) + { + zlog_debug ("%s: EC has no encoded CE, skipping", __func__); + return; + } + + /* + * Is this route already represented in the unicast RIB? + * (look up prefix; compare route type, sub_type, peer, nexthop) + */ + urn = + bgp_afi_node_get (bgp->rib[afi][SAFI_UNICAST], afi, SAFI_UNICAST, prefix, + NULL); + for (ubi = urn->info; ubi; ubi = ubi->next) + { + struct prefix unicast_nexthop; + + if (CHECK_FLAG (ubi->flags, BGP_INFO_REMOVED)) + continue; + + rfapiUnicastNexthop2Prefix (afi, ubi->attr, &unicast_nexthop); + + if (ubi->type == ZEBRA_ROUTE_VNC_DIRECT && + ubi->sub_type == BGP_ROUTE_REDISTRIBUTE && + ubi->peer == peer && prefix_same (&unicast_nexthop, &ce_nexthop)) + { + + zlog_debug + ("%s: already have matching exported unicast route, skipping", + __func__); + return; + } + } + + /* + * Construct new attribute set with CE addr as + * nexthop and without Tunnel Encap attr + */ + encap_attr_export_ce (&hattr, attr, &ce_nexthop); + if (bgp->rfapi_cfg->routemap_export_bgp) + { + struct bgp_info info; + route_map_result_t ret; + + memset (&info, 0, sizeof (info)); + info.peer = peer; + info.attr = &hattr; + ret = + route_map_apply (bgp->rfapi_cfg->routemap_export_bgp, prefix, + RMAP_BGP, &info); + if (ret == RMAP_DENYMATCH) + { + bgp_attr_flush (&hattr); + bgp_attr_extra_free (&hattr); + return; + } + } + + iattr = bgp_attr_intern (&hattr); + bgp_attr_flush (&hattr); + bgp_attr_extra_free (&hattr); + + /* + * Rule: disallow route-map alteration of next-hop, because it + * would make it too difficult to keep track of the correspondence + * between VPN routes and unicast routes. + */ + rfapiUnicastNexthop2Prefix (afi, iattr, &post_routemap_nexthop); + + if (!prefix_same (&ce_nexthop, &post_routemap_nexthop)) + { + zlog_debug + ("%s: route-map modification of nexthop not allowed, skipping", + __func__); + bgp_attr_unintern (&iattr); + return; + } + + bgp_update (peer, prefix, + 0, /* addpath_id */ + iattr, /* bgp_update copies this attr */ + afi, SAFI_UNICAST, ZEBRA_ROUTE_VNC_DIRECT, BGP_ROUTE_REDISTRIBUTE, NULL, /* RD not used for unicast */ + NULL, /* tag not used for unicast */ + 0); + bgp_attr_unintern (&iattr); +} + + +/* + * "Withdrawing a Route" export process + */ +void +vnc_direct_bgp_del_route_ce ( + struct bgp *bgp, + struct route_node *rn, + struct bgp_info *bi) +{ + afi_t afi = family2afi (rn->p.family); + struct bgp_info *vbi; + struct prefix ce_nexthop; + + if (!afi) + { + zlog_err ("%s: bad afi", __func__); + return; + } + + /* check bgp redist flag for vnc direct ("vpn") routes */ + if (!bgp->redist[afi][ZEBRA_ROUTE_VNC_DIRECT]) + { + zlog_debug ("%s: bgp redistribution of VNC direct routes is off", + __func__); + return; + } + + if (!bgp->rfapi_cfg) + { + zlog_debug ("%s: bgp->rfapi_cfg is NULL, skipping", __func__); + return; + } + if (!VNC_EXPORT_BGP_CE_ENABLED (bgp->rfapi_cfg)) + { + zlog_debug ("%s: export-to-bgp ce mode not enabled, skipping", + __func__); + return; + } + + /* + * Extract CE + * This works only for IPv4 because IPv6 addresses are too big + * to fit in an extended community + */ + if (getce (bgp, bi->attr, &ce_nexthop)) + { + zlog_debug ("%s: EC has no encoded CE, skipping", __func__); + return; + } + + /* + * Look for other VPN routes with same prefix, same 5226 CE, + * same peer. If at least one is present, don't remove the + * route from the unicast RIB + */ + + for (vbi = rn->info; vbi; vbi = vbi->next) + { + struct prefix ce; + if (bi == vbi) + continue; + if (bi->peer != vbi->peer) + continue; + if (getce (bgp, vbi->attr, &ce)) + continue; + if (prefix_same (&ce, &ce_nexthop)) + { + zlog_debug ("%s: still have a route via CE, not deleting unicast", + __func__); + return; + } + } + + /* + * withdraw the route + */ + bgp_withdraw (bi->peer, &rn->p, + 0, /* addpath_id */ + NULL, /* attr, ignored */ + afi, SAFI_UNICAST, ZEBRA_ROUTE_VNC_DIRECT, BGP_ROUTE_REDISTRIBUTE, NULL, /* RD not used for unicast */ + NULL); /* tag not used for unicast */ + +} + +static void +vnc_direct_bgp_vpn_enable_ce (struct bgp *bgp, afi_t afi) +{ + struct rfapi_cfg *hc; + struct route_node *rn; + struct bgp_info *ri; + + zlog_debug ("%s: entry, afi=%d", __func__, afi); + + if (!bgp) + return; + + if (!(hc = bgp->rfapi_cfg)) + return; + + if (!VNC_EXPORT_BGP_CE_ENABLED (bgp->rfapi_cfg)) + { + zlog_debug ("%s: export of CE routes not enabled, skipping", __func__); + return; + } + + if (afi != AFI_IP + && afi != AFI_IP6) + { + zlog_debug ("%s: bad afi: %d", __func__, afi); + return; + } + + /* + * Go through entire ce import table and export to BGP unicast. + */ + for (rn = route_top (bgp->rfapi->it_ce->imported_vpn[afi]); rn; + rn = route_next (rn)) + { + + if (!rn->info) + continue; + + { + char prefixstr[BUFSIZ]; + + prefixstr[0] = 0; + inet_ntop (rn->p.family, &rn->p.u.prefix, prefixstr, BUFSIZ); + zlog_debug ("%s: checking prefix %s/%d", __func__, prefixstr, + rn->p.prefixlen); + } + + for (ri = rn->info; ri; ri = ri->next) + { + + zlog_debug ("%s: ri->sub_type: %d", __func__, ri->sub_type); + + if (ri->sub_type == BGP_ROUTE_NORMAL || + ri->sub_type == BGP_ROUTE_RFP || + ri->sub_type == BGP_ROUTE_STATIC) + { + + vnc_direct_bgp_add_route_ce (bgp, rn, ri); + } + + } + } +} + +static void +vnc_direct_bgp_vpn_disable_ce (struct bgp *bgp, afi_t afi) +{ + struct bgp_node *rn; + + zlog_debug ("%s: entry, afi=%d", __func__, afi); + + if (!bgp) + return; + + if (afi != AFI_IP + && afi != AFI_IP6) + { + zlog_debug ("%s: bad afi: %d", __func__, afi); + return; + } + + /* + * Go through the entire BGP unicast table and remove routes that + * originated from us + */ + for (rn = bgp_table_top (bgp->rib[afi][SAFI_UNICAST]); rn; + rn = bgp_route_next (rn)) + { + + struct bgp_info *ri; + struct bgp_info *next; + + for (ri = rn->info, next = NULL; ri; ri = next) + { + + next = ri->next; + + if (ri->type == ZEBRA_ROUTE_VNC_DIRECT && + ri->sub_type == BGP_ROUTE_REDISTRIBUTE) + { + + bgp_withdraw (ri->peer, &rn->p, /* prefix */ + 0, /* addpath_id */ + NULL, /* ignored */ + AFI_IP, SAFI_UNICAST, ZEBRA_ROUTE_VNC_DIRECT, BGP_ROUTE_REDISTRIBUTE, NULL, /* RD not used for unicast */ + NULL); /* tag not used for unicast */ + } + } + } +} + +/*********************************************************************** + * Export methods that set nexthop to CE (from 5226 roo EC) END + ***********************************************************************/ + +/*********************************************************************** + * Export methods that proxy nexthop BEGIN + ***********************************************************************/ + +static struct ecommunity * +vnc_route_origin_ecom (struct route_node *rn) +{ + struct ecommunity *new; + struct bgp_info *bi; + + if (!rn->info) + return NULL; + + new = ecommunity_new (); + + for (bi = rn->info; bi; bi = bi->next) + { + + struct ecommunity_val roec; + + switch (BGP_MP_NEXTHOP_FAMILY (bi->attr->extra->mp_nexthop_len)) + { + case AF_INET: + memset (&roec, 0, sizeof (roec)); + roec.val[0] = 0x01; + roec.val[1] = 0x03; + memcpy (roec.val + 2, + &bi->attr->extra->mp_nexthop_global_in.s_addr, 4); + roec.val[6] = 0; + roec.val[7] = 0; + ecommunity_add_val (new, &roec); + break; + case AF_INET6: + /* No support for IPv6 addresses in extended communities */ + break; + } + } + + if (!new->size) + { + ecommunity_free (&new); + new = NULL; + } + + return new; +} + +static struct ecommunity * +vnc_route_origin_ecom_single (struct in_addr *origin) +{ + struct ecommunity *new; + struct ecommunity_val roec; + + memset (&roec, 0, sizeof (roec)); + roec.val[0] = 0x01; + roec.val[1] = 0x03; + memcpy (roec.val + 2, &origin->s_addr, 4); + roec.val[6] = 0; + roec.val[7] = 0; + + new = ecommunity_new (); + assert (new); + ecommunity_add_val (new, &roec); + + if (!new->size) + { + ecommunity_free (&new); + new = NULL; + } + + return new; +} + + +/* + * New memory allocation approach: make a ghost attr that + * has non-interned parts for the modifications. ghost attr + * memory is allocated by caller. + */ +static int +encap_attr_export ( + struct attr *new, + struct attr *orig, + struct prefix *new_nexthop, + struct route_node *rn) /* for VN addrs for ecom list */ + /* if rn is 0, use route's nexthop */ +{ + struct prefix orig_nexthop; + struct prefix *use_nexthop; + static struct ecommunity *ecom_ro; + + if (new_nexthop) + { + use_nexthop = new_nexthop; + } + else + { + use_nexthop = &orig_nexthop; + orig_nexthop.family = + BGP_MP_NEXTHOP_FAMILY (orig->extra->mp_nexthop_len); + if (orig_nexthop.family == AF_INET) + { + orig_nexthop.prefixlen = 32; + orig_nexthop.u.prefix4 = orig->extra->mp_nexthop_global_in; + } + else if (orig_nexthop.family == AF_INET6) + { + orig_nexthop.prefixlen = 128; + orig_nexthop.u.prefix6 = orig->extra->mp_nexthop_global; + } + else + { + return -1; /* FAIL - can't compute nexthop */ + } + } + + + /* + * Make "new" a ghost attr copy of "orig" + */ + memset (new, 0, sizeof (struct attr)); + bgp_attr_dup (new, orig); + + /* + * Set nexthop + */ + switch (use_nexthop->family) + { + case AF_INET: + new->nexthop = use_nexthop->u.prefix4; + new->extra->mp_nexthop_len = 4; /* bytes */ + new->flag |= ATTR_FLAG_BIT (BGP_ATTR_NEXT_HOP); + break; + + case AF_INET6: + if (!new->extra) + { + new->extra = XCALLOC (MTYPE_ATTR_EXTRA, sizeof (struct attr_extra)); + } + new->extra->mp_nexthop_global = use_nexthop->u.prefix6; + new->extra->mp_nexthop_len = 16; /* bytes */ + break; + + default: + assert (0); + break; + } + + bgp_attr_extra_get (new); + if (rn) + { + ecom_ro = vnc_route_origin_ecom (rn); + } + else + { + /* TBD test/assert for IPv6 */ + ecom_ro = vnc_route_origin_ecom_single (&use_nexthop->u.prefix4); + } + if (new->extra->ecommunity) + { + if (ecom_ro) + { + new->extra->ecommunity = + ecommunity_merge (ecom_ro, new->extra->ecommunity); + } + } + else + { + new->extra->ecommunity = ecom_ro; + } + if (ecom_ro) + { + new->flag |= ATTR_FLAG_BIT (BGP_ATTR_EXT_COMMUNITIES); + } + + /* + * Set MED + * + * Note that it will be deleted when BGP sends to any eBGP + * peer unless PEER_FLAG_MED_UNCHANGED is set: + * + * neighbor NEIGHBOR attribute-unchanged med + */ + if (!CHECK_FLAG (new->flag, BGP_ATTR_MULTI_EXIT_DISC)) + { + if (CHECK_FLAG (new->flag, BGP_ATTR_LOCAL_PREF)) + { + if (new->local_pref > 255) + new->med = 0; + else + new->med = 255 - new->local_pref; + } + else + { + new->med = 255; /* shouldn't happen */ + } + new->flag |= ATTR_FLAG_BIT (BGP_ATTR_MULTI_EXIT_DISC); + } + + /* + * "new" is now a ghost attr: + * - it owns an "extra" struct + * - it owns any non-interned parts + * - any references to interned parts are not counted + * + * Caller should, after using the attr, call: + * - bgp_attr_flush() to free non-interned parts + * - call bgp_attr_extra_free() to free extra + */ + + return 0; +} + +/* + * "Adding a Route" export process + */ +void +vnc_direct_bgp_add_prefix ( + struct bgp *bgp, + struct rfapi_import_table *import_table, + struct route_node *rn) +{ + struct attr attr = { 0 }; + struct listnode *node, *nnode; + struct rfapi_rfg_name *rfgn; + afi_t afi = family2afi (rn->p.family); + + if (!afi) + { + zlog_err ("%s: can't get afi of route node", __func__); + return; + } + + /* check bgp redist flag for vnc direct ("vpn") routes */ + if (!bgp->redist[afi][ZEBRA_ROUTE_VNC_DIRECT]) + { + zlog_debug ("%s: bgp redistribution of VNC direct routes is off", + __func__); + return; + } + + if (!bgp->rfapi_cfg) + { + zlog_debug ("%s: bgp->rfapi_cfg is NULL, skipping", __func__); + return; + } + + if (!VNC_EXPORT_BGP_GRP_ENABLED (bgp->rfapi_cfg)) + { + zlog_debug ("%s: export-to-bgp group mode not enabled, skipping", + __func__); + return; + } + + if (!listcount (bgp->rfapi_cfg->rfg_export_direct_bgp_l)) + { + zlog_debug ("%s: no bgp-direct export nve group, skipping", __func__); + return; + } + + bgp_attr_default_set (&attr, BGP_ORIGIN_INCOMPLETE); + /* TBD set some configured med, see add_vnc_route() */ + + zlog_debug ("%s: looping over nve-groups in direct-bgp export list", + __func__); + + for (ALL_LIST_ELEMENTS (bgp->rfapi_cfg->rfg_export_direct_bgp_l, + node, nnode, rfgn)) + { + + struct listnode *ln; + + /* + * If nve group is not defined yet, skip it + */ + if (!rfgn->rfg) + continue; + + /* + * If the nve group uses a different import table, skip it + */ + if (import_table != rfgn->rfg->rfapi_import_table) + continue; + + /* + * if no NVEs currently associated with this group, skip it + */ + if (!rfgn->rfg->nves) + continue; + + /* + * per-nve-group prefix list check + */ + if (rfgn->rfg->plist_export_bgp[afi]) + { + if (prefix_list_apply (rfgn->rfg->plist_export_bgp[afi], &rn->p) == + PREFIX_DENY) + + continue; + } + + /* + * For each NVE that is assigned to the export nve group, generate + * a route with that NVE as its next hop + */ + for (ln = listhead (rfgn->rfg->nves); ln; ln = listnextnode (ln)) + { + + struct prefix nhp; + struct rfapi_descriptor *irfd; + struct bgp_info info; + struct attr hattr; + struct attr *iattr; + + irfd = listgetdata (ln); + + if (rfapiRaddr2Qprefix (&irfd->vn_addr, &nhp)) + continue; + + /* + * Construct new attribute set with NVE's VN addr as + * nexthop and without Tunnel Encap attr + */ + if (encap_attr_export (&hattr, &attr, &nhp, rn)) + continue; + + if (VNC_DEBUG(EXPORT_BGP_DIRECT_ADD)) + { + zlog_debug ("%s: attr follows", __func__); + rfapiPrintAttrPtrs (NULL, &attr); + zlog_debug ("%s: hattr follows", __func__); + rfapiPrintAttrPtrs (NULL, &hattr); + } + + if (rfgn->rfg->routemap_export_bgp) + { + route_map_result_t ret; + info.peer = irfd->peer; + info.attr = &hattr; + ret = route_map_apply (rfgn->rfg->routemap_export_bgp, &rn->p, + RMAP_BGP, &info); + if (ret == RMAP_DENYMATCH) + { + bgp_attr_flush (&hattr); + bgp_attr_extra_free (&hattr); + zlog_debug + ("%s: route map says DENY, so not calling bgp_update", + __func__); + continue; + } + } + + if (VNC_DEBUG(EXPORT_BGP_DIRECT_ADD)) + { + zlog_debug ("%s: hattr after route_map_apply:", __func__); + rfapiPrintAttrPtrs (NULL, &hattr); + } + + iattr = bgp_attr_intern (&hattr); + bgp_attr_flush (&hattr); + bgp_attr_extra_free (&hattr); + + bgp_update (irfd->peer, &rn->p, /* prefix */ + 0, /* addpath_id */ + iattr, /* bgp_update copies it */ + afi, SAFI_UNICAST, ZEBRA_ROUTE_VNC_DIRECT, BGP_ROUTE_REDISTRIBUTE, NULL, /* RD not used for unicast */ + NULL, /* tag not used for unicast */ + 0); + + bgp_attr_unintern (&iattr); + } + } + + aspath_unintern (&attr.aspath); + bgp_attr_extra_free (&attr); +} + +/* + * "Withdrawing a Route" export process + */ +void +vnc_direct_bgp_del_prefix ( + struct bgp *bgp, + struct rfapi_import_table *import_table, + struct route_node *rn) +{ + struct listnode *node, *nnode; + struct rfapi_rfg_name *rfgn; + afi_t afi = family2afi (rn->p.family); + + if (!afi) + { + zlog_err ("%s: can't get afi route node", __func__); + return; + } + + /* check bgp redist flag for vnc direct ("vpn") routes */ + if (!bgp->redist[afi][ZEBRA_ROUTE_VNC_DIRECT]) + { + zlog_debug ("%s: bgp redistribution of VNC direct routes is off", + __func__); + return; + } + + if (!bgp->rfapi_cfg) + { + zlog_debug ("%s: bgp->rfapi_cfg is NULL, skipping", __func__); + return; + } + + if (!VNC_EXPORT_BGP_GRP_ENABLED (bgp->rfapi_cfg)) + { + zlog_debug ("%s: export-to-bgp group mode not enabled, skipping", + __func__); + return; + } + + if (!listcount (bgp->rfapi_cfg->rfg_export_direct_bgp_l)) + { + zlog_debug ("%s: no bgp-direct export nve group, skipping", __func__); + return; + } + + for (ALL_LIST_ELEMENTS (bgp->rfapi_cfg->rfg_export_direct_bgp_l, + node, nnode, rfgn)) + { + + struct listnode *ln; + + /* + * If nve group is not defined yet, skip it + */ + if (!rfgn->rfg) + continue; + + /* + * if no NVEs currently associated with this group, skip it + */ + if (!rfgn->rfg->nves) + continue; + + /* + * If the nve group uses a different import table, + * skip it + */ + if (import_table != rfgn->rfg->rfapi_import_table) + continue; + + /* + * For each NVE that is assigned to the export nve group, generate + * a route with that NVE as its next hop + */ + for (ln = listhead (rfgn->rfg->nves); ln; ln = listnextnode (ln)) + { + + struct prefix nhp; + struct rfapi_descriptor *irfd; + + irfd = listgetdata (ln); + + if (rfapiRaddr2Qprefix (&irfd->vn_addr, &nhp)) + continue; + + bgp_withdraw (irfd->peer, &rn->p, /* prefix */ + 0, /* addpath_id */ + NULL, /* attr, ignored */ + afi, SAFI_UNICAST, ZEBRA_ROUTE_VNC_DIRECT, BGP_ROUTE_REDISTRIBUTE, NULL, /* RD not used for unicast */ + NULL); /* tag not used for unicast */ + } + } +} + +void +vnc_direct_bgp_add_nve (struct bgp *bgp, struct rfapi_descriptor *rfd) +{ + struct listnode *node, *nnode; + struct rfapi_rfg_name *rfgn; + struct rfapi_nve_group_cfg *rfg = rfd->rfg; + afi_t afi = family2afi (rfd->vn_addr.addr_family); + + if (!afi) + { + zlog_err ("%s: can't get afi of nve vn addr", __func__); + return; + } + + if (!bgp) + return; + if (!bgp->rfapi_cfg) + { + zlog_debug ("%s: bgp->rfapi_cfg is NULL, skipping", __func__); + return; + } + if (!VNC_EXPORT_BGP_GRP_ENABLED (bgp->rfapi_cfg)) + { + zlog_debug ("%s: export-to-bgp group mode not enabled, skipping", + __func__); + return; + } + + if (!bgp->redist[afi][ZEBRA_ROUTE_VNC_DIRECT]) + { + zlog_debug ("%s: bgp redistribution of VNC direct routes is off", + __func__); + return; + } + + /* + * Loop over the list of NVE-Groups configured for + * exporting to direct-bgp and see if this new NVE's + * group is among them. + */ + for (ALL_LIST_ELEMENTS (bgp->rfapi_cfg->rfg_export_direct_bgp_l, + node, nnode, rfgn)) + { + + /* + * Yes, this NVE's group is configured for export to direct-bgp + */ + if (rfgn->rfg == rfg) + { + + struct route_table *rt = NULL; + struct route_node *rn; + struct attr attr = { 0 }; + struct rfapi_import_table *import_table; + + + import_table = rfg->rfapi_import_table; + + bgp_attr_default_set (&attr, BGP_ORIGIN_INCOMPLETE); + /* TBD set some configured med, see add_vnc_route() */ + + if (afi == AFI_IP + || afi == AFI_IP6) + { + rt = import_table->imported_vpn[afi]; + } + else + { + zlog_err ("%s: bad afi %d", __func__, afi); + return; + } + + /* + * Walk the NVE-Group's VNC Import table + */ + for (rn = route_top (rt); rn; rn = route_next (rn)) + { + + if (rn->info) + { + + struct prefix nhp; + struct rfapi_descriptor *irfd = rfd; + struct attr hattr; + struct attr *iattr; + struct bgp_info info; + + if (rfapiRaddr2Qprefix (&irfd->vn_addr, &nhp)) + continue; + + /* + * per-nve-group prefix list check + */ + if (rfgn->rfg->plist_export_bgp[afi]) + { + if (prefix_list_apply (rfgn->rfg->plist_export_bgp[afi], + &rn->p) == PREFIX_DENY) + + continue; + } + + + /* + * Construct new attribute set with NVE's VN addr as + * nexthop and without Tunnel Encap attr + */ + if (encap_attr_export (&hattr, &attr, &nhp, rn)) + continue; + + if (rfgn->rfg->routemap_export_bgp) + { + route_map_result_t ret; + info.peer = irfd->peer; + info.attr = &hattr; + ret = route_map_apply (rfgn->rfg->routemap_export_bgp, + &rn->p, RMAP_BGP, &info); + if (ret == RMAP_DENYMATCH) + { + bgp_attr_flush (&hattr); + bgp_attr_extra_free (&hattr); + continue; + } + + } + + iattr = bgp_attr_intern (&hattr); + bgp_attr_flush (&hattr); + bgp_attr_extra_free (&hattr); + + bgp_update (irfd->peer, &rn->p, /* prefix */ + 0, /* addpath_id */ + iattr, /* bgp_update copies it */ + afi, SAFI_UNICAST, ZEBRA_ROUTE_VNC_DIRECT, BGP_ROUTE_REDISTRIBUTE, NULL, /* RD not used for unicast */ + NULL, /* tag not used for unicast */ + 0); + + bgp_attr_unintern (&iattr); + + } + } + + aspath_unintern (&attr.aspath); + bgp_attr_extra_free (&attr); + } + } +} + + +void +vnc_direct_bgp_del_nve (struct bgp *bgp, struct rfapi_descriptor *rfd) +{ + struct listnode *node, *nnode; + struct rfapi_rfg_name *rfgn; + struct rfapi_nve_group_cfg *rfg = rfd->rfg; + afi_t afi = family2afi (rfd->vn_addr.addr_family); + + if (!afi) + { + zlog_err ("%s: can't get afi of nve vn addr", __func__); + return; + } + + if (!bgp) + return; + if (!bgp->rfapi_cfg) + { + zlog_debug ("%s: bgp->rfapi_cfg is NULL, skipping", __func__); + return; + } + if (!VNC_EXPORT_BGP_GRP_ENABLED (bgp->rfapi_cfg)) + { + zlog_debug ("%s: export-to-bgp group mode not enabled, skipping", + __func__); + return; + } + + if (!bgp->redist[afi][ZEBRA_ROUTE_VNC_DIRECT]) + { + zlog_debug ("%s: bgp redistribution of VNC direct routes is off", + __func__); + return; + } + + /* + * Loop over the list of NVE-Groups configured for + * exporting to direct-bgp and see if this new NVE's + * group is among them. + */ + for (ALL_LIST_ELEMENTS (bgp->rfapi_cfg->rfg_export_direct_bgp_l, + node, nnode, rfgn)) + { + + /* + * Yes, this NVE's group is configured for export to direct-bgp + */ + if (rfg && rfgn->rfg == rfg) + { + + struct route_table *rt = NULL; + struct route_node *rn; + struct rfapi_import_table *import_table; + + import_table = rfg->rfapi_import_table; + + if (afi == AFI_IP + || afi == AFI_IP6) + { + rt = import_table->imported_vpn[afi]; + } + else + { + zlog_err ("%s: bad afi %d", __func__, afi); + return; + } + + /* + * Walk the NVE-Group's VNC Import table + */ + for (rn = route_top (rt); rn; rn = route_next (rn)) + { + + if (rn->info) + { + + struct prefix nhp; + struct rfapi_descriptor *irfd = rfd; + + if (rfapiRaddr2Qprefix (&irfd->vn_addr, &nhp)) + continue; + + bgp_withdraw (irfd->peer, &rn->p, /* prefix */ + 0, /* addpath_id */ + NULL, /* attr, ignored */ + afi, SAFI_UNICAST, ZEBRA_ROUTE_VNC_DIRECT, BGP_ROUTE_REDISTRIBUTE, NULL, /* RD not used for unicast */ + NULL); /* tag not used for unicast */ + + } + } + } + } +} + + + +/* + * Caller is responsible for ensuring that the specified nve-group + * is actually part of the list of exported nve groups. + */ +static void +vnc_direct_bgp_add_group_afi ( + struct bgp *bgp, + struct rfapi_nve_group_cfg *rfg, + afi_t afi) +{ + struct route_table *rt = NULL; + struct route_node *rn; + struct attr attr = { 0 }; + struct rfapi_import_table *import_table; + + zlog_debug ("%s: entry", __func__); + + import_table = rfg->rfapi_import_table; + if (!import_table) + { + zlog_debug ("%s: import table not defined, returning", __func__); + return; + } + + if (afi == AFI_IP + || afi == AFI_IP6) + { + rt = import_table->imported_vpn[afi]; + } + else + { + zlog_err ("%s: bad afi %d", __func__, afi); + return; + } + + if (!rfg->nves) + { + /* avoid segfault below if list doesn't exist */ + zlog_debug ("%s: no NVEs in this group", __func__); + return; + } + + bgp_attr_default_set (&attr, BGP_ORIGIN_INCOMPLETE); + /* TBD set some configured med, see add_vnc_route() */ + + /* + * Walk the NVE-Group's VNC Import table + */ + for (rn = route_top (rt); rn; rn = route_next (rn)) + { + + if (rn->info) + { + + struct listnode *ln; + + /* + * per-nve-group prefix list check + */ + if (rfg->plist_export_bgp[afi]) + { + if (prefix_list_apply (rfg->plist_export_bgp[afi], &rn->p) == + PREFIX_DENY) + + continue; + } + + /* + * For each NVE that is assigned to the export nve group, generate + * a route with that NVE as its next hop + */ + for (ln = listhead (rfg->nves); ln; ln = listnextnode (ln)) + { + + struct prefix nhp; + struct rfapi_descriptor *irfd; + struct bgp_info info; + struct attr hattr; + struct attr *iattr; + + irfd = listgetdata (ln); + + if (rfapiRaddr2Qprefix (&irfd->vn_addr, &nhp)) + continue; + + /* + * Construct new attribute set with NVE's VN addr as + * nexthop and without Tunnel Encap attr + */ + if (encap_attr_export (&hattr, &attr, &nhp, rn)) + continue; + + if (rfg->routemap_export_bgp) + { + route_map_result_t ret; + info.peer = irfd->peer; + info.attr = &hattr; + ret = route_map_apply (rfg->routemap_export_bgp, + &rn->p, RMAP_BGP, &info); + if (ret == RMAP_DENYMATCH) + { + bgp_attr_flush (&hattr); + bgp_attr_extra_free (&hattr); + continue; + } + + } + + iattr = bgp_attr_intern (&hattr); + bgp_attr_flush (&hattr); + bgp_attr_extra_free (&hattr); + + bgp_update (irfd->peer, &rn->p, /* prefix */ + 0, /* addpath_id */ + iattr, /* bgp_update copies it */ + afi, SAFI_UNICAST, ZEBRA_ROUTE_VNC_DIRECT, BGP_ROUTE_REDISTRIBUTE, NULL, /* RD not used for unicast */ + NULL, /* tag not used for unicast */ + 0); + + bgp_attr_unintern (&iattr); + } + } + } + + aspath_unintern (&attr.aspath); + bgp_attr_extra_free (&attr); +} + + +/* + * Caller is responsible for ensuring that the specified nve-group + * is actually part of the list of exported nve groups. + */ +void +vnc_direct_bgp_add_group (struct bgp *bgp, struct rfapi_nve_group_cfg *rfg) +{ + vnc_direct_bgp_add_group_afi (bgp, rfg, AFI_IP); + vnc_direct_bgp_add_group_afi (bgp, rfg, AFI_IP6); +} + + + +/* + * Caller is responsible for ensuring that the specified nve-group + * was actually part of the list of exported nve groups. + */ +static void +vnc_direct_bgp_del_group_afi ( + struct bgp *bgp, + struct rfapi_nve_group_cfg *rfg, + afi_t afi) +{ + struct route_table *rt = NULL; + struct route_node *rn; + struct rfapi_import_table *import_table; + + zlog_debug ("%s: entry", __func__); + + import_table = rfg->rfapi_import_table; + if (!import_table) + { + zlog_debug ("%s: import table not defined, returning", __func__); + return; + } + + assert (afi == AFI_IP + || afi == AFI_IP6); + rt = import_table->imported_vpn[afi]; + + if (!rfg->nves) + { + /* avoid segfault below if list does not exist */ + zlog_debug ("%s: no NVEs in this group", __func__); + return; + } + + /* + * Walk the NVE-Group's VNC Import table + */ + for (rn = route_top (rt); rn; rn = route_next (rn)) + { + + if (rn->info) + { + + struct listnode *ln; + + /* + * For each NVE that is assigned to the export nve group, generate + * a route with that NVE as its next hop + */ + for (ln = listhead (rfg->nves); ln; ln = listnextnode (ln)) + { + + struct rfapi_descriptor *irfd; + + irfd = listgetdata (ln); + + bgp_withdraw (irfd->peer, &rn->p, /* prefix */ + 0, /* addpath_id */ + NULL, /* attr, ignored */ + afi, SAFI_UNICAST, ZEBRA_ROUTE_VNC_DIRECT, BGP_ROUTE_REDISTRIBUTE, NULL, /* RD not used for unicast */ + NULL); /* tag not used for unicast */ + + } + } + } +} + + +/* + * Caller is responsible for ensuring that the specified nve-group + * was actually part of the list of exported nve groups. + */ +void +vnc_direct_bgp_del_group (struct bgp *bgp, struct rfapi_nve_group_cfg *rfg) +{ + vnc_direct_bgp_del_group_afi (bgp, rfg, AFI_IP); + vnc_direct_bgp_del_group_afi (bgp, rfg, AFI_IP6); +} + +void +vnc_direct_bgp_reexport_group_afi ( + struct bgp *bgp, + struct rfapi_nve_group_cfg *rfg, + afi_t afi) +{ + struct listnode *node; + struct rfapi_rfg_name *rfgn; + + if (VNC_EXPORT_BGP_GRP_ENABLED (bgp->rfapi_cfg)) + { + /* + * look in the list of currently-exported groups + */ + for (ALL_LIST_ELEMENTS_RO (bgp->rfapi_cfg->rfg_export_direct_bgp_l, + node, rfgn)) + { + + if (rfgn->rfg == rfg) + { + /* + * If it matches, reexport it + */ + vnc_direct_bgp_del_group_afi (bgp, rfg, afi); + vnc_direct_bgp_add_group_afi (bgp, rfg, afi); + break; + } + } + } +} + + +static void +vnc_direct_bgp_unexport_table ( + afi_t afi, + struct route_table *rt, + struct list *nve_list) +{ + if (nve_list) + { + + struct route_node *rn; + + for (rn = route_top (rt); rn; rn = route_next (rn)) + { + + if (rn->info) + { + + struct listnode *hln; + struct rfapi_descriptor *irfd; + + for (ALL_LIST_ELEMENTS_RO (nve_list, hln, irfd)) + { + + bgp_withdraw (irfd->peer, &rn->p, /* prefix */ + 0, /* addpath_id */ + NULL, /* attr, ignored */ + afi, SAFI_UNICAST, ZEBRA_ROUTE_VNC_DIRECT, BGP_ROUTE_REDISTRIBUTE, NULL, /* RD not used for unicast */ + NULL); /* tag not used for unicast */ + + } + } + } + } +} + +static void +import_table_to_nve_list_direct_bgp ( + struct bgp *bgp, + struct rfapi_import_table *it, + struct list **nves, + uint8_t family) +{ + struct listnode *node; + struct rfapi_rfg_name *rfgn; + + /* + * Loop over the list of NVE-Groups configured for + * exporting to direct-bgp. + * + * Build a list of NVEs that use this import table + */ + *nves = NULL; + for (ALL_LIST_ELEMENTS_RO (bgp->rfapi_cfg->rfg_export_direct_bgp_l, + node, rfgn)) + { + + /* + * If this NVE-Group's import table matches the current one + */ + if (rfgn->rfg && rfgn->rfg->nves && rfgn->rfg->rfapi_import_table == it) + { + + nve_group_to_nve_list (rfgn->rfg, nves, family); + } + } +} + +void +vnc_direct_bgp_vpn_enable (struct bgp *bgp, afi_t afi) +{ + struct listnode *rfgn; + struct rfapi_nve_group_cfg *rfg; + + if (!bgp) + return; + + if (!VNC_EXPORT_BGP_GRP_ENABLED (bgp->rfapi_cfg)) + { + zlog_debug ("%s: export-to-bgp group mode not enabled, skipping", + __func__); + return; + } + + if (afi != AFI_IP + && afi != AFI_IP6) + { + zlog_debug ("%s: bad afi: %d", __func__, afi); + return; + } + + /* + * Policy is applied per-nve-group, so we need to iterate + * over the groups to add everything. + */ + for (ALL_LIST_ELEMENTS_RO (bgp->rfapi_cfg->nve_groups_sequential, + rfgn, rfg)) + { + + /* + * contains policy management + */ + vnc_direct_bgp_add_group_afi (bgp, rfg, afi); + } +} + + +void +vnc_direct_bgp_vpn_disable (struct bgp *bgp, afi_t afi) +{ + struct rfapi_import_table *it; + uint8_t family = afi2family (afi); + + zlog_debug ("%s: entry, afi=%d", __func__, afi); + + if (!bgp) + return; + + if (!bgp->rfapi) + { + zlog_debug ("%s: rfapi not initialized", __func__); + return; + } + + if (!family || (afi != AFI_IP + && afi != AFI_IP6)) + { + zlog_debug ("%s: bad afi: %d", __func__, afi); + return; + } + + for (it = bgp->rfapi->imports; it; it = it->next) + { + + struct list *nve_list = NULL; + + import_table_to_nve_list_direct_bgp (bgp, it, &nve_list, family); + + if (nve_list) + { + vnc_direct_bgp_unexport_table (afi, it->imported_vpn[afi], + nve_list); + list_free (nve_list); + } + } +} + + +/*********************************************************************** + * Export methods that proxy nexthop END + ***********************************************************************/ + + +/*********************************************************************** + * Export methods that preserve original nexthop BEGIN + * rh = "registering nve" + ***********************************************************************/ + + +/* + * "Adding a Route" export process + * TBD do we need to check bi->type and bi->sub_type here, or does + * caller do it? + */ +void +vnc_direct_bgp_rh_add_route ( + struct bgp *bgp, + afi_t afi, + struct prefix *prefix, + struct peer *peer, + struct attr *attr) +{ + struct vnc_export_info *eti; + struct attr hattr; + struct rfapi_cfg *hc; + struct attr *iattr; + + if (!afi) + { + zlog_err ("%s: can't get afi of route node", __func__); + return; + } + + /* check bgp redist flag for vnc direct ("vpn") routes */ + if (!bgp->redist[afi][ZEBRA_ROUTE_VNC_DIRECT]) + { + zlog_debug ("%s: bgp redistribution of VNC direct routes is off", + __func__); + return; + } + + if (!(hc = bgp->rfapi_cfg)) + { + zlog_debug ("%s: bgp->rfapi_cfg is NULL, skipping", __func__); + return; + } + + if (!VNC_EXPORT_BGP_RH_ENABLED (bgp->rfapi_cfg)) + { + zlog_debug ("%s: export-to-bgp RH mode not enabled, skipping", + __func__); + return; + } + + /* + * prefix list check + */ + if (hc->plist_export_bgp[afi]) + { + if (prefix_list_apply (hc->plist_export_bgp[afi], prefix) == + PREFIX_DENY) + return; + } + + /* + * Construct new attribute set with NVE's VN addr as + * nexthop and without Tunnel Encap attr + */ + if (encap_attr_export (&hattr, attr, NULL, NULL)) + return; + if (hc->routemap_export_bgp) + { + struct bgp_info info; + route_map_result_t ret; + + memset (&info, 0, sizeof (info)); + info.peer = peer; + info.attr = &hattr; + ret = + route_map_apply (hc->routemap_export_bgp, prefix, RMAP_BGP, &info); + if (ret == RMAP_DENYMATCH) + { + bgp_attr_flush (&hattr); + bgp_attr_extra_free (&hattr); + return; + } + } + + iattr = bgp_attr_intern (&hattr); + bgp_attr_flush (&hattr); + bgp_attr_extra_free (&hattr); + + /* + * record route information that we will need to expire + * this route + */ + eti = vnc_eti_get (bgp, EXPORT_TYPE_BGP, prefix, peer, + ZEBRA_ROUTE_VNC_DIRECT_RH, BGP_ROUTE_REDISTRIBUTE); + rfapiGetVncLifetime (attr, &eti->lifetime); + eti->lifetime = rfapiGetHolddownFromLifetime (eti->lifetime); + + if (eti->timer) + { + /* + * export expiration timer is already running on + * this route: cancel it + */ + thread_cancel (eti->timer); + eti->timer = NULL; + } + + bgp_update (peer, prefix, /* prefix */ + 0, /* addpath_id */ + iattr, /* bgp_update copies this attr */ + afi, SAFI_UNICAST, ZEBRA_ROUTE_VNC_DIRECT_RH, BGP_ROUTE_REDISTRIBUTE, NULL, /* RD not used for unicast */ + NULL, /* tag not used for unicast */ + 0); + bgp_attr_unintern (&iattr); + +} + +static int +vncExportWithdrawTimer (struct thread *t) +{ + struct vnc_export_info *eti = t->arg; + + /* + * withdraw the route + */ + bgp_withdraw ( + eti->peer, + &eti->node->p, + 0, /* addpath_id */ + NULL, /* attr, ignored */ + family2afi (eti->node->p.family), + SAFI_UNICAST, + eti->type, + eti->subtype, + NULL, /* RD not used for unicast */ + NULL); /* tag not used for unicast */ + + /* + * Free the eti + */ + vnc_eti_delete (eti); + + return 0; +} + +/* + * "Withdrawing a Route" export process + * TBD do we need to check bi->type and bi->sub_type here, or does + * caller do it? + */ +void +vnc_direct_bgp_rh_del_route ( + struct bgp *bgp, + afi_t afi, + struct prefix *prefix, + struct peer *peer) +{ + struct vnc_export_info *eti; + + if (!afi) + { + zlog_err ("%s: can't get afi route node", __func__); + return; + } + + /* check bgp redist flag for vnc direct ("vpn") routes */ + if (!bgp->redist[afi][ZEBRA_ROUTE_VNC_DIRECT]) + { + zlog_debug ("%s: bgp redistribution of VNC direct routes is off", + __func__); + return; + } + + if (!bgp->rfapi_cfg) + { + zlog_debug ("%s: bgp->rfapi_cfg is NULL, skipping", __func__); + return; + } + if (!VNC_EXPORT_BGP_RH_ENABLED (bgp->rfapi_cfg)) + { + zlog_debug ("%s: export-to-bgp group mode not enabled, skipping", + __func__); + return; + } + + eti = vnc_eti_get (bgp, EXPORT_TYPE_BGP, prefix, peer, + ZEBRA_ROUTE_VNC_DIRECT_RH, BGP_ROUTE_REDISTRIBUTE); + + if (!eti->timer && eti->lifetime <= INT32_MAX) + { + eti->timer = thread_add_timer (bm->master, + vncExportWithdrawTimer, + eti, eti->lifetime); + zlog_debug ("%s: set expiration timer for %u seconds", + __func__, eti->lifetime); + } +} + + +void +vnc_direct_bgp_rh_vpn_enable (struct bgp *bgp, afi_t afi) +{ + struct prefix_rd prd; + struct bgp_node *prn; + struct rfapi_cfg *hc; + + zlog_debug ("%s: entry, afi=%d", __func__, afi); + + if (!bgp) + return; + + if (!(hc = bgp->rfapi_cfg)) + return; + + if (!VNC_EXPORT_BGP_RH_ENABLED (bgp->rfapi_cfg)) + { + zlog_debug ("%s: export of RH routes not enabled, skipping", __func__); + return; + } + + if (afi != AFI_IP + && afi != AFI_IP6) + { + zlog_debug ("%s: bad afi: %d", __func__, afi); + return; + } + + /* + * Go through the entire BGP VPN table and export to BGP unicast. + */ + + zlog_debug ("%s: starting RD loop", __func__); + + /* Loop over all the RDs */ + for (prn = bgp_table_top (bgp->rib[afi][SAFI_MPLS_VPN]); prn; + prn = bgp_route_next (prn)) + { + + struct bgp_table *table; + struct bgp_node *rn; + struct bgp_info *ri; + + memset (&prd, 0, sizeof (prd)); + prd.family = AF_UNSPEC; + prd.prefixlen = 64; + memcpy (prd.val, prn->p.u.val, 8); + + /* This is the per-RD table of prefixes */ + table = prn->info; + + for (rn = bgp_table_top (table); rn; rn = bgp_route_next (rn)) + { + + /* + * skip prefix list check if no routes here + */ + if (!rn->info) + continue; + + { + char prefixstr[BUFSIZ]; + + prefixstr[0] = 0; + inet_ntop (rn->p.family, &rn->p.u.prefix, prefixstr, BUFSIZ); + zlog_debug ("%s: checking prefix %s/%d", __func__, prefixstr, + rn->p.prefixlen); + } + + /* + * prefix list check + */ + if (hc->plist_export_bgp[afi]) + { + if (prefix_list_apply (hc->plist_export_bgp[afi], &rn->p) == + PREFIX_DENY) + { + + zlog_debug ("%s: prefix list says DENY", __func__); + continue; + } + } + + for (ri = rn->info; ri; ri = ri->next) + { + + zlog_debug ("%s: ri->sub_type: %d", __func__, ri->sub_type); + + if (ri->sub_type == BGP_ROUTE_NORMAL || + ri->sub_type == BGP_ROUTE_RFP) + { + + struct vnc_export_info *eti; + struct attr hattr; + struct attr *iattr; + + /* + * Construct new attribute set with NVE's VN addr as + * nexthop and without Tunnel Encap attr + */ + if (encap_attr_export (&hattr, ri->attr, NULL, NULL)) + { + zlog_debug ("%s: encap_attr_export failed", __func__); + continue; + } + + if (hc->routemap_export_bgp) + { + struct bgp_info info; + route_map_result_t ret; + + memset (&info, 0, sizeof (info)); + info.peer = ri->peer; + info.attr = &hattr; + ret = route_map_apply (hc->routemap_export_bgp, + &rn->p, RMAP_BGP, &info); + if (ret == RMAP_DENYMATCH) + { + bgp_attr_flush (&hattr); + bgp_attr_extra_free (&hattr); + zlog_debug ("%s: route map says DENY", __func__); + continue; + } + } + + iattr = bgp_attr_intern (&hattr); + bgp_attr_flush (&hattr); + bgp_attr_extra_free (&hattr); + + /* + * record route information that we will need to expire + * this route + */ + eti = vnc_eti_get (bgp, EXPORT_TYPE_BGP, &rn->p, ri->peer, + ZEBRA_ROUTE_VNC_DIRECT_RH, + BGP_ROUTE_REDISTRIBUTE); + rfapiGetVncLifetime (ri->attr, &eti->lifetime); + + if (eti->timer) + { + /* + * export expiration timer is already running on + * this route: cancel it + */ + thread_cancel (eti->timer); + eti->timer = NULL; + } + + zlog_debug ("%s: calling bgp_update", __func__); + + bgp_update (ri->peer, &rn->p, /* prefix */ + 0, /* addpath_id */ + iattr, /* bgp_update copies it */ + AFI_IP, SAFI_UNICAST, ZEBRA_ROUTE_VNC_DIRECT_RH, BGP_ROUTE_REDISTRIBUTE, NULL, /* RD not used for unicast */ + NULL, /* tag not used for unicast */ + 0); + bgp_attr_unintern (&iattr); + } + } + } + } +} + +void +vnc_direct_bgp_rh_vpn_disable (struct bgp *bgp, afi_t afi) +{ + struct bgp_node *rn; + + zlog_debug ("%s: entry, afi=%d", __func__, afi); + + if (!bgp) + return; + + if (afi != AFI_IP + && afi != AFI_IP6) + { + zlog_debug ("%s: bad afi: %d", __func__, afi); + return; + } + + /* + * Go through the entire BGP unicast table and remove routes that + * originated from us + */ + for (rn = bgp_table_top (bgp->rib[afi][SAFI_UNICAST]); rn; + rn = bgp_route_next (rn)) + { + + struct bgp_info *ri; + struct bgp_info *next; + + for (ri = rn->info, next = NULL; ri; ri = next) + { + + next = ri->next; + + if (ri->type == ZEBRA_ROUTE_VNC_DIRECT_RH && + ri->sub_type == BGP_ROUTE_REDISTRIBUTE) + { + + struct vnc_export_info *eti; + + /* + * Delete routes immediately (no timer) + */ + eti = + vnc_eti_checktimer (bgp, EXPORT_TYPE_BGP, &rn->p, ri->peer, + ZEBRA_ROUTE_VNC_DIRECT_RH, + BGP_ROUTE_REDISTRIBUTE); + if (eti) + { + if (eti->timer) + thread_cancel (eti->timer); + vnc_eti_delete (eti); + } + + bgp_withdraw (ri->peer, &rn->p, /* prefix */ + 0, /* addpath_id */ + NULL, /* ignored */ + AFI_IP, SAFI_UNICAST, ZEBRA_ROUTE_VNC_DIRECT_RH, BGP_ROUTE_REDISTRIBUTE, NULL, /* RD not used for unicast */ + NULL); /* tag not used for unicast */ + } + } + } +} + +void +vnc_direct_bgp_rh_reexport (struct bgp *bgp, afi_t afi) +{ + if (VNC_EXPORT_BGP_RH_ENABLED (bgp->rfapi_cfg)) + { + vnc_direct_bgp_rh_vpn_disable (bgp, afi); + vnc_direct_bgp_rh_vpn_enable (bgp, afi); + } +} + +/*********************************************************************** + * Generic Export methods + ***********************************************************************/ + +/* + * Assumes the correct mode bits are already turned on. Thus it + * is OK to call this function from, e.g., bgp_redistribute_set() + * without caring if export is enabled or not + */ +void +vnc_export_bgp_enable (struct bgp *bgp, afi_t afi) +{ + switch (bgp->rfapi_cfg->flags & BGP_VNC_CONFIG_EXPORT_BGP_MODE_BITS) + { + case BGP_VNC_CONFIG_EXPORT_BGP_MODE_NONE: + break; + + case BGP_VNC_CONFIG_EXPORT_BGP_MODE_GRP: + vnc_direct_bgp_vpn_enable (bgp, afi); + break; + + case BGP_VNC_CONFIG_EXPORT_BGP_MODE_RH: + vnc_direct_bgp_rh_vpn_enable (bgp, afi); + break; + + case BGP_VNC_CONFIG_EXPORT_BGP_MODE_CE: + vnc_direct_bgp_vpn_enable_ce (bgp, afi); + break; + } +} + +void +vnc_export_bgp_disable (struct bgp *bgp, afi_t afi) +{ + switch (bgp->rfapi_cfg->flags & BGP_VNC_CONFIG_EXPORT_BGP_MODE_BITS) + { + case BGP_VNC_CONFIG_EXPORT_BGP_MODE_NONE: + break; + + case BGP_VNC_CONFIG_EXPORT_BGP_MODE_GRP: + vnc_direct_bgp_vpn_disable (bgp, afi); + break; + + case BGP_VNC_CONFIG_EXPORT_BGP_MODE_RH: + vnc_direct_bgp_rh_vpn_disable (bgp, afi); + break; + + case BGP_VNC_CONFIG_EXPORT_BGP_MODE_CE: + vnc_direct_bgp_vpn_disable_ce (bgp, afi); + break; + } +} + +void +vnc_export_bgp_prechange (struct bgp *bgp) +{ + vnc_export_bgp_disable (bgp, AFI_IP); + vnc_export_bgp_disable (bgp, AFI_IP6); +} + +void +vnc_export_bgp_postchange (struct bgp *bgp) +{ + vnc_export_bgp_enable (bgp, AFI_IP); + vnc_export_bgp_enable (bgp, AFI_IP6); +} + +void +vnc_direct_bgp_reexport (struct bgp *bgp, afi_t afi) +{ + vnc_export_bgp_disable (bgp, afi); + vnc_export_bgp_enable (bgp, afi); +} diff --git a/bgpd/rfapi/vnc_export_bgp.h b/bgpd/rfapi/vnc_export_bgp.h new file mode 100644 index 0000000000..ae113fdcb2 --- /dev/null +++ b/bgpd/rfapi/vnc_export_bgp.h @@ -0,0 +1,42 @@ +/* + * + * Copyright 2009-2016, LabN Consulting, L.L.C. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#ifndef _QUAGGA_RFAPI_VNC_EXPORT_BGP_H_ +#define _QUAGGA_RFAPI_VNC_EXPORT_BGP_H_ + +#include "lib/zebra.h" +#include "lib/prefix.h" + +#include "bgpd/bgpd.h" +#include "bgpd/bgp_route.h" + + +extern void vnc_direct_bgp_rh_reexport (struct bgp *bgp, afi_t afi); + +extern void vnc_export_bgp_prechange (struct bgp *bgp); + +extern void vnc_export_bgp_postchange (struct bgp *bgp); + +extern void vnc_export_bgp_enable (struct bgp *bgp, afi_t afi); + +extern void vnc_export_bgp_disable (struct bgp *bgp, afi_t afi); + +#endif /* _QUAGGA_RFAPI_VNC_EXPORT_BGP_H_ */ diff --git a/bgpd/rfapi/vnc_export_bgp_p.h b/bgpd/rfapi/vnc_export_bgp_p.h new file mode 100644 index 0000000000..fceab02e05 --- /dev/null +++ b/bgpd/rfapi/vnc_export_bgp_p.h @@ -0,0 +1,95 @@ +/* + * + * Copyright 2009-2016, LabN Consulting, L.L.C. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#ifndef _QUAGGA_RFAPI_VNC_EXPORT_BGP_P_H_ +#define _QUAGGA_RFAPI_VNC_EXPORT_BGP_P_H_ + +#include "lib/zebra.h" +#include "lib/prefix.h" + +#include "bgpd/bgpd.h" +#include "bgpd/bgp_route.h" + +#include "rfapi_private.h" + +extern void +vnc_direct_bgp_add_route_ce ( + struct bgp *bgp, + struct route_node *rn, + struct bgp_info *bi); + +extern void +vnc_direct_bgp_del_route_ce ( + struct bgp *bgp, + struct route_node *rn, + struct bgp_info *bi); + +extern void +vnc_direct_bgp_add_prefix ( + struct bgp *bgp, + struct rfapi_import_table *import_table, + struct route_node *rn); + +extern void +vnc_direct_bgp_del_prefix ( + struct bgp *bgp, + struct rfapi_import_table *import_table, + struct route_node *rn); + +extern void +vnc_direct_bgp_add_nve (struct bgp *bgp, struct rfapi_descriptor *rfd); + +extern void +vnc_direct_bgp_del_nve (struct bgp *bgp, struct rfapi_descriptor *rfd); + +extern void +vnc_direct_bgp_add_group (struct bgp *bgp, struct rfapi_nve_group_cfg *rfg); + +extern void +vnc_direct_bgp_del_group (struct bgp *bgp, struct rfapi_nve_group_cfg *rfg); + +extern void +vnc_direct_bgp_reexport_group_afi ( + struct bgp *bgp, + struct rfapi_nve_group_cfg *rfg, + afi_t afi); + + +extern void +vnc_direct_bgp_rh_add_route ( + struct bgp *bgp, + afi_t afi, + struct prefix *prefix, + struct peer *peer, + struct attr *attr); + + +extern void +vnc_direct_bgp_rh_del_route ( + struct bgp *bgp, + afi_t afi, + struct prefix *prefix, + struct peer *peer); + +extern void +vnc_direct_bgp_reexport (struct bgp *bgp, afi_t afi); + +#endif /* _QUAGGA_RFAPI_VNC_EXPORT_BGP_P_H_ */ diff --git a/bgpd/rfapi/vnc_export_table.c b/bgpd/rfapi/vnc_export_table.c new file mode 100644 index 0000000000..16ffc801ec --- /dev/null +++ b/bgpd/rfapi/vnc_export_table.c @@ -0,0 +1,214 @@ +/* + * + * Copyright 2009-2016, LabN Consulting, L.L.C. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + + +#include "lib/zebra.h" +#include "lib/prefix.h" +#include "lib/table.h" +#include "lib/memory.h" +#include "lib/vty.h" + +#include "bgpd/bgpd.h" +#include "bgpd/bgp_route.h" + +#include "bgpd/rfapi/vnc_export_table.h" +#include "bgpd/rfapi/rfapi_private.h" +#include "bgpd/rfapi/rfapi_import.h" + +struct route_node * +vnc_etn_get (struct bgp *bgp, vnc_export_type_t type, struct prefix *p) +{ + struct route_table *t = NULL; + struct route_node *rn = NULL; + afi_t afi; + + if (!bgp || !bgp->rfapi) + return NULL; + + afi = family2afi (p->family); + assert (afi == AFI_IP || afi == AFI_IP6); + + switch (type) + { + case EXPORT_TYPE_BGP: + if (!bgp->rfapi->rt_export_bgp[afi]) + bgp->rfapi->rt_export_bgp[afi] = route_table_init (); + t = bgp->rfapi->rt_export_bgp[afi]; + break; + + case EXPORT_TYPE_ZEBRA: + if (!bgp->rfapi->rt_export_zebra[afi]) + bgp->rfapi->rt_export_zebra[afi] = route_table_init (); + t = bgp->rfapi->rt_export_zebra[afi]; + break; + } + + if (t) + rn = route_node_get (t, p); + return rn; +} + +struct route_node * +vnc_etn_lookup (struct bgp *bgp, vnc_export_type_t type, struct prefix *p) +{ + struct route_table *t = NULL; + struct route_node *rn = NULL; + afi_t afi; + + if (!bgp || !bgp->rfapi) + return NULL; + + afi = family2afi (p->family); + assert (afi == AFI_IP || afi == AFI_IP6); + + switch (type) + { + case EXPORT_TYPE_BGP: + if (!bgp->rfapi->rt_export_bgp[afi]) + bgp->rfapi->rt_export_bgp[afi] = route_table_init (); + t = bgp->rfapi->rt_export_bgp[afi]; + break; + + case EXPORT_TYPE_ZEBRA: + if (!bgp->rfapi->rt_export_zebra[afi]) + bgp->rfapi->rt_export_zebra[afi] = route_table_init (); + t = bgp->rfapi->rt_export_zebra[afi]; + break; + } + + if (t) + rn = route_node_lookup (t, p); + return rn; +} + +struct vnc_export_info * +vnc_eti_get ( + struct bgp *bgp, + vnc_export_type_t etype, + struct prefix *p, + struct peer *peer, + uint8_t type, + uint8_t subtype) +{ + struct route_node *etn; + struct vnc_export_info *eti; + + etn = vnc_etn_get (bgp, etype, p); + assert (etn); + + for (eti = etn->info; eti; eti = eti->next) + { + if (peer == eti->peer && type == eti->type && subtype == eti->subtype) + { + + break; + } + } + + if (eti) + { + route_unlock_node (etn); + } + else + { + eti = XCALLOC (MTYPE_RFAPI_ETI, sizeof (struct vnc_export_info)); + assert (eti); + eti->node = etn; + eti->peer = peer; + peer_lock (peer); + eti->type = type; + eti->subtype = subtype; + eti->next = etn->info; + etn->info = eti; + } + + return eti; +} + +void +vnc_eti_delete (struct vnc_export_info *goner) +{ + struct route_node *etn; + struct vnc_export_info *eti; + struct vnc_export_info *eti_prev = NULL; + + etn = goner->node; + + for (eti = etn->info; eti; eti_prev = eti, eti = eti->next) + { + if (eti == goner) + break; + } + + if (!eti) + { + zlog_debug ("%s: COULDN'T FIND ETI", __func__); + return; + } + + if (eti_prev) + { + eti_prev->next = goner->next; + } + else + { + etn->info = goner->next; + } + + peer_unlock (eti->peer); + goner->node = NULL; + XFREE (MTYPE_RFAPI_ETI, goner); + + route_unlock_node (etn); +} + +struct vnc_export_info * +vnc_eti_checktimer ( + struct bgp *bgp, + vnc_export_type_t etype, + struct prefix *p, + struct peer *peer, + uint8_t type, + uint8_t subtype) +{ + struct route_node *etn; + struct vnc_export_info *eti; + + etn = vnc_etn_lookup (bgp, etype, p); + if (!etn) + return NULL; + + for (eti = etn->info; eti; eti = eti->next) + { + if (peer == eti->peer && type == eti->type && subtype == eti->subtype) + { + + break; + } + } + + route_unlock_node (etn); + + if (eti && eti->timer) + return eti; + + return NULL; +} diff --git a/bgpd/rfapi/vnc_export_table.h b/bgpd/rfapi/vnc_export_table.h new file mode 100644 index 0000000000..77829ca382 --- /dev/null +++ b/bgpd/rfapi/vnc_export_table.h @@ -0,0 +1,85 @@ +/* + * + * Copyright 2009-2016, LabN Consulting, L.L.C. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#ifndef _QUAGGA_VNC_VNC_EXPORT_TABLE_H_ +#define _QUAGGA_VNC_VNC_EXPORT_TABLE_H_ + +#include "lib/table.h" +#include "lib/thread.h" +#include "lib/vty.h" + +#include "bgpd/bgpd.h" + +#define VNC_EXPORT_TYPE_BGP 1 +#define VNC_EXPORT_TYPE_ZEBRA 2 + +typedef enum vnc_export_type +{ + EXPORT_TYPE_BGP, + EXPORT_TYPE_ZEBRA +} vnc_export_type_t; + +struct vnc_export_info +{ + struct vnc_export_info *next; + struct route_node *node; + struct peer *peer; + u_char type; + u_char subtype; + uint32_t lifetime; + struct thread *timer; +}; + +extern struct route_node * +vnc_etn_get ( + struct bgp *bgp, + vnc_export_type_t type, + struct prefix *p); + +extern struct route_node * +vnc_etn_lookup ( + struct bgp *bgp, + vnc_export_type_t type, + struct prefix *p); + +extern struct vnc_export_info * +vnc_eti_get ( + struct bgp *bgp, + vnc_export_type_t etype, + struct prefix *p, + struct peer *peer, + uint8_t type, + uint8_t subtype); + +extern void +vnc_eti_delete (struct vnc_export_info *goner); + +extern struct vnc_export_info * +vnc_eti_checktimer ( + struct bgp *bgp, + vnc_export_type_t etype, + struct prefix *p, + struct peer *peer, + uint8_t type, + uint8_t subtype); + + +#endif /* _QUAGGA_VNC_VNC_EXPORT_TABLE_H_ */ diff --git a/bgpd/rfapi/vnc_import_bgp.c b/bgpd/rfapi/vnc_import_bgp.c new file mode 100644 index 0000000000..4215ce2bf7 --- /dev/null +++ b/bgpd/rfapi/vnc_import_bgp.c @@ -0,0 +1,3136 @@ +/* + * + * Copyright 2009-2016, LabN Consulting, L.L.C. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +/* + * File: vnc_import_bgp.c + * Purpose: Import routes from BGP unicast directly (not via zebra) + */ + +#include "lib/zebra.h" +#include "lib/prefix.h" +#include "lib/table.h" +#include "lib/vty.h" +#include "lib/log.h" +#include "lib/memory.h" +#include "lib/linklist.h" +#include "lib/plist.h" +#include "lib/routemap.h" + +#include "bgpd/bgpd.h" +#include "bgpd/bgp_ecommunity.h" +#include "bgpd/bgp_attr.h" +#include "bgpd/bgp_mplsvpn.h" /* for RD_TYPE_IP */ + +#include "bgpd/rfapi/vnc_export_bgp.h" +#include "bgpd/rfapi/bgp_rfapi_cfg.h" +#include "bgpd/rfapi/rfapi.h" +#include "bgpd/rfapi/rfapi_import.h" +#include "bgpd/rfapi/rfapi_private.h" +#include "bgpd/rfapi/rfapi_monitor.h" +#include "bgpd/rfapi/rfapi_vty.h" +#include "bgpd/rfapi/vnc_import_bgp.h" +#include "bgpd/rfapi/vnc_import_bgp_p.h" +#include "bgpd/rfapi/vnc_debug.h" + +#define ENABLE_VNC_RHNCK + +#define DEBUG_RHN_LIST 0 + +static struct rfapi_descriptor vncHDBgpDirect; /* dummy nve descriptor */ +static struct rfapi_descriptor vncHDResolveNve; /* dummy nve descriptor */ + +/* + * For routes from another AS: + * + * If MED is set, + * LOCAL_PREF = 255 - MIN(255, MED) + * else + * LOCAL_PREF = default_local_pref + * + * For routes from the same AS: + * + * LOCAL_PREF unchanged + */ +uint32_t +calc_local_pref (struct attr *attr, struct peer *peer) +{ + uint32_t local_pref = 0; + + if (!attr) + { + if (peer) + { + return peer->bgp->default_local_pref; + } + return bgp_get_default ()->default_local_pref; + } + + if (peer && (peer->as != peer->bgp->as)) + { + if (attr->flag & ATTR_FLAG_BIT (BGP_ATTR_MULTI_EXIT_DISC)) + { + if (attr->med > 255) + { + local_pref = 0; + } + else + { + local_pref = 255 - attr->med; + } + } + else + { + local_pref = peer->bgp->default_local_pref; + } + } + else + { + if (attr->flag & ATTR_FLAG_BIT (BGP_ATTR_LOCAL_PREF)) + { + local_pref = attr->local_pref; + } + else + { + if (peer && peer->bgp) + { + local_pref = peer->bgp->default_local_pref; + } + } + } + + return local_pref; +} + +static int +is_host_prefix (struct prefix *p) +{ + switch (p->family) + { + case AF_INET: + return (p->prefixlen == 32); + case AF_INET6: + return (p->prefixlen == 128); + } + return 0; +} + +/*********************************************************************** + * RHN list + ***********************************************************************/ + +struct prefix_bag +{ + struct prefix hpfx; /* ce address = unicast nexthop */ + struct prefix upfx; /* unicast prefix */ + struct bgp_info *ubi; /* unicast route */ +}; + +static const u_char maskbit[] = { 0x00, 0x80, 0xc0, 0xe0, 0xf0, + 0xf8, 0xfc, 0xfe, 0xff +}; + +int +vnc_prefix_cmp (void *pfx1, void *pfx2) +{ + int offset; + int shift; + u_char mask; + + struct prefix *p1 = pfx1; + struct prefix *p2 = pfx2; + + if (p1->family < p2->family) + return -1; + if (p1->family > p2->family) + return 1; + + if (p1->prefixlen < p2->prefixlen) + return -1; + if (p1->prefixlen > p2->prefixlen) + return 1; + + offset = p1->prefixlen / 8; + shift = p1->prefixlen % 8; + if (shift == 0 && offset) + { /* catch aligned case */ + offset--; + shift = 8; + } + + /* Set both prefix's head pointer. */ + const u_char *pp1 = (const u_char *) &p1->u.prefix; + const u_char *pp2 = (const u_char *) &p2->u.prefix; + + while (offset--) + { + if (*pp1 < *pp2) + return -1; + if (*pp1 > *pp2) + return 1; + ++pp1; + ++pp2; + } + + mask = maskbit[shift]; + if ((*pp1 & mask) < (*pp2 & mask)) + return -1; + if ((*pp1 & mask) > (*pp2 & mask)) + return 1; + + return 0; +} + +static void +prefix_bag_free (void *pb) +{ + XFREE (MTYPE_RFAPI_PREFIX_BAG, pb); +} + +#if DEBUG_RHN_LIST +static void +print_rhn_list (const char *tag1, const char *tag2) +{ + struct bgp *bgp = bgp_get_default (); + struct skiplist *sl = bgp->rfapi->resolve_nve_nexthop; + struct skiplistnode *p; + struct prefix_bag *pb; + int count = 0; + + if (!sl) + { + zlog_debug ("%s: %s: RHN List is empty", (tag1 ? tag1 : ""), + (tag2 ? tag2 : "")); + return; + } + + zlog_debug ("%s: %s: RHN list:", (tag1 ? tag1 : ""), (tag2 ? tag2 : "")); + + /* XXX uses secret knowledge of skiplist structure */ + for (p = sl->header->forward[0]; p; p = p->forward[0]) + { + char kbuf[BUFSIZ]; + char hbuf[BUFSIZ]; + char ubuf[BUFSIZ]; + + pb = p->value; + + prefix2str (p->key, kbuf, BUFSIZ); + prefix2str (&pb->hpfx, hbuf, BUFSIZ); + prefix2str (&pb->upfx, ubuf, BUFSIZ); + + zlog_debug ("RHN Entry %d (q=%p): kpfx=%s, upfx=%s, hpfx=%s, ubi=%p", + ++count, p, kbuf, ubuf, hbuf, pb->ubi); + } +} +#endif + +#ifdef ENABLE_VNC_RHNCK +static void +vnc_rhnck (char *tag) +{ + struct bgp *bgp; + struct skiplist *sl; + struct skiplistnode *p; + + bgp = bgp_get_default (); + sl = bgp->rfapi->resolve_nve_nexthop; + + if (!sl) + return; + + /* XXX uses secret knowledge of skiplist structure */ + for (p = sl->header->forward[0]; p; p = p->forward[0]) + { + struct prefix_bag *pb; + struct prefix *pkey; + afi_t afi; + struct prefix pfx_orig_nexthop; + + memset (&pfx_orig_nexthop, 0, sizeof (struct prefix)); /* keep valgrind happy */ + + pkey = p->key; + pb = p->value; + + afi = family2afi (pb->upfx.family); + + rfapiUnicastNexthop2Prefix (afi, pb->ubi->attr, &pfx_orig_nexthop); + + /* pb->hpfx, pb->ubi nexthop, pkey should all reflect the same pfx */ + assert (!vnc_prefix_cmp (&pb->hpfx, pkey)); + if (vnc_prefix_cmp (&pb->hpfx, &pfx_orig_nexthop)) + { + char str_onh[BUFSIZ]; + char str_nve_pfx[BUFSIZ]; + + prefix2str (&pfx_orig_nexthop, str_onh, BUFSIZ); + str_onh[BUFSIZ - 1] = 0; + + prefix2str (&pb->hpfx, str_nve_pfx, BUFSIZ); + str_nve_pfx[BUFSIZ - 1] = 0; + + zlog_debug + ("%s: %s: FATAL: resolve_nve_nexthop list item bi nexthop %s != nve pfx %s", + __func__, tag, str_onh, str_nve_pfx); + assert (0); + } + } + zlog_debug ("%s: vnc_rhnck OK", tag); +} + +#define VNC_RHNCK(n) do {char buf[BUFSIZ];sprintf(buf,"%s: %s", __func__, #n);vnc_rhnck(buf);} while (0) + +#else + +#define VNC_RHNCK(n) + +#endif + +/*********************************************************************** + * Add/Delete Unicast Route + ***********************************************************************/ + +/* + * "Adding a Route" import process + */ + +/* + * extract and package information from the BGP unicast route. + * Return code 0 means OK, non-0 means drop. + * + * If return code is 0, caller MUST release ecom + */ +static int +process_unicast_route ( + struct bgp *bgp, /* in */ + afi_t afi, /* in */ + struct prefix *prefix, /* in */ + struct bgp_info *info, /* in */ + struct ecommunity **ecom, /* OUT */ + struct prefix *unicast_nexthop) /* OUT */ +{ + struct rfapi_cfg *hc = bgp->rfapi_cfg; + struct peer *peer = info->peer; + struct attr *attr = info->attr; + struct attr hattr; + struct route_map *rmap = NULL; + struct prefix pfx_orig_nexthop; + + memset (&pfx_orig_nexthop, 0, sizeof (struct prefix)); /* keep valgrind happy */ + + /* + * prefix list check + */ + if (hc->plist_redist[ZEBRA_ROUTE_BGP_DIRECT][afi]) + { + zlog_debug ("%s: HC prefix list is set, checking", __func__); + if (prefix_list_apply + (hc->plist_redist[ZEBRA_ROUTE_BGP_DIRECT][afi], + prefix) == PREFIX_DENY) + { + zlog_debug ("%s: prefix list returns DENY, blocking route", + __func__); + return -1; + } + zlog_debug ("%s: prefix list returns PASS, allowing route", __func__); + } + + /* apply routemap, if any, later */ + rmap = hc->routemap_redist[ZEBRA_ROUTE_BGP_DIRECT]; + + /* + * Extract original nexthop, which we expect to be a NVE connected router + * Note that this is the nexthop before any possible application of policy + */ + /* + * Incoming prefix is unicast. If v6, it is in multiprotocol area, + * but if v4 it is in attr->nexthop + */ + rfapiUnicastNexthop2Prefix (afi, attr, &pfx_orig_nexthop); + + /* + * route map handling + * This code is here because it allocates an interned attr which + * must be freed before we return. It's easier to put it after + * all of the possible returns above. + */ + memset (&hattr, 0, sizeof (struct attr)); + bgp_attr_dup (&hattr, attr); /* hattr becomes a ghost attr */ + + if (rmap) + { + struct bgp_info info; + route_map_result_t ret; + + memset (&info, 0, sizeof (info)); + info.peer = peer; + info.attr = &hattr; + ret = route_map_apply (rmap, prefix, RMAP_BGP, &info); + if (ret == RMAP_DENYMATCH) + { + bgp_attr_flush (&hattr); + bgp_attr_extra_free (&hattr); + zlog_debug ("%s: route map \"%s\" says DENY, returning", __func__, + rmap->name); + return -1; + } + } + + /* + * Get the (possibly altered by policy) unicast nexthop + * for later lookup in the Import Table by caller + */ + rfapiUnicastNexthop2Prefix (afi, &hattr, unicast_nexthop); + + if (hattr.extra && hattr.extra->ecommunity) + *ecom = ecommunity_dup (hattr.extra->ecommunity); + else + *ecom = ecommunity_new (); + + /* + * Done with hattr, clean up + */ + bgp_attr_flush (&hattr); + bgp_attr_extra_free (&hattr); + + /* + * Add EC that carries original NH of iBGP route (2 bytes = magic + * value indicating it came from an VNC gateway; default 5226, but + * must be user configurable). Note that this is the nexthop before + * any application of policy. + */ + { + struct ecommunity_val vnc_gateway_magic; + uint16_t localadmin; + + /* Using route origin extended community type */ + memset (&vnc_gateway_magic, 0, sizeof (vnc_gateway_magic)); + vnc_gateway_magic.val[0] = 0x01; + vnc_gateway_magic.val[1] = 0x03; + + /* Only works for IPv4 nexthops */ + if (prefix->family == AF_INET) + { + memcpy (vnc_gateway_magic.val + 2, &unicast_nexthop->u.prefix4, 4); + } + localadmin = htons (hc->resolve_nve_roo_local_admin); + memcpy (vnc_gateway_magic.val + 6, (char *) &localadmin, 2); + + ecommunity_add_val (*ecom, &vnc_gateway_magic); + } + + return 0; +} + + +static void +vnc_import_bgp_add_route_mode_resolve_nve_one_bi ( + struct bgp *bgp, + afi_t afi, + struct bgp_info *bi, /* VPN bi */ + struct prefix_rd *prd, /* RD */ + struct prefix *prefix, /* unicast route prefix */ + uint32_t *local_pref,/* NULL = no local_pref */ + uint32_t *med, /* NULL = no med */ + struct ecommunity *ecom) /* generated ecoms */ +{ + struct prefix un; + struct prefix nexthop; + struct rfapi_ip_addr nexthop_h; + uint32_t lifetime; + uint32_t *plifetime; + struct bgp_attr_encap_subtlv *encaptlvs; + + zlog_debug ("%s: entry", __func__); + + if (bi->type != ZEBRA_ROUTE_BGP && bi->type != ZEBRA_ROUTE_BGP_DIRECT) + { + + return; + } + if (bi->sub_type != BGP_ROUTE_NORMAL && + bi->sub_type != BGP_ROUTE_STATIC && bi->sub_type != BGP_ROUTE_RFP) + { + + return; + } + if (CHECK_FLAG (bi->flags, BGP_INFO_REMOVED)) + return; + + vncHDResolveNve.peer = bi->peer; + if (!rfapiGetVncTunnelUnAddr (bi->attr, &un)) + { + if (rfapiQprefix2Raddr (&un, &vncHDResolveNve.un_addr)) + return; + } + else + { + memset (&vncHDResolveNve.un_addr, 0, sizeof (vncHDResolveNve.un_addr)); + } + + /* Use nexthop of VPN route as nexthop of constructed route */ + rfapiNexthop2Prefix (bi->attr, &nexthop); + rfapiQprefix2Raddr (&nexthop, &nexthop_h); + + if (rfapiGetVncLifetime (bi->attr, &lifetime)) + { + plifetime = NULL; + } + else + { + plifetime = &lifetime; + } + + if (bi->attr && bi->attr->extra) + { + encaptlvs = bi->attr->extra->vnc_subtlvs; + } + else + { + encaptlvs = NULL; + } + + struct ecommunity *new_ecom = ecommunity_dup (ecom); + + if (bi->attr && bi->attr->extra && bi->attr->extra->ecommunity) + ecommunity_merge (new_ecom, bi->attr->extra->ecommunity); + + add_vnc_route ( + &vncHDResolveNve, + bgp, + SAFI_MPLS_VPN, + prefix, /* unicast route prefix */ + prd, + &nexthop_h, /* new nexthop */ + local_pref, + plifetime, + (struct bgp_tea_options *) encaptlvs, /* RFP options */ + NULL, + NULL, + new_ecom, + med, /* NULL => don't set med */ + NULL, /* label: default */ + ZEBRA_ROUTE_BGP_DIRECT, + BGP_ROUTE_REDISTRIBUTE, + RFAPI_AHR_RFPOPT_IS_VNCTLV); /* flags */ + + ecommunity_free (&new_ecom); + +} + +static void +vnc_import_bgp_add_route_mode_resolve_nve_one_rd ( + struct prefix_rd *prd, /* RD */ + struct bgp_table *table_rd, /* per-rd VPN route table */ + afi_t afi, + struct bgp *bgp, + struct prefix *prefix, /* unicast prefix */ + struct ecommunity *ecom, /* generated ecoms */ + uint32_t *local_pref, /* NULL = no local_pref */ + uint32_t *med, /* NULL = no med */ + struct prefix *ubi_nexthop) /* unicast nexthop */ +{ + struct bgp_node *bn; + struct bgp_info *bi; + + if (!table_rd) + return; + + { + char str_nh[BUFSIZ]; + + prefix2str (ubi_nexthop, str_nh, BUFSIZ); + str_nh[BUFSIZ - 1] = 0; + + zlog_debug ("%s: ubi_nexthop=%s", __func__, str_nh); + } + + /* exact match */ + bn = bgp_node_lookup (table_rd, ubi_nexthop); + if (!bn) + { + zlog_debug ("%s: no match in RD's table for ubi_nexthop", __func__); + return; + } + + /* Iterate over bgp_info items at this node */ + for (bi = bn->info; bi; bi = bi->next) + { + + vnc_import_bgp_add_route_mode_resolve_nve_one_bi (bgp, afi, bi, /* VPN bi */ + prd, + prefix, + local_pref, + med, ecom); + } + + bgp_unlock_node (bn); +} + +static void +vnc_import_bgp_add_route_mode_resolve_nve ( + struct bgp *bgp, + struct prefix *prefix,/* unicast prefix */ + struct bgp_info *info) /* unicast info */ +{ + afi_t afi = family2afi (prefix->family); + struct rfapi_cfg *hc = NULL; + + struct prefix pfx_unicast_nexthop = { 0 }; /* happy valgrind */ + + struct ecommunity *ecom = NULL; + uint32_t local_pref; + uint32_t *med = NULL; + + struct prefix_bag *pb; + struct bgp_node *bnp; /* prd table node */ + + /*debugging */ + { + char str_pfx[BUFSIZ]; + char str_nh[BUFSIZ]; + struct prefix nh; + + prefix2str (prefix, str_pfx, BUFSIZ); + str_pfx[BUFSIZ - 1] = 0; + + nh.prefixlen = 0; + rfapiUnicastNexthop2Prefix (afi, info->attr, &nh); + if (nh.prefixlen) + { + prefix2str (&nh, str_nh, BUFSIZ); + str_nh[BUFSIZ - 1] = 0; + } + else + { + str_nh[0] = '?'; + str_nh[1] = 0; + } + + zlog_debug ("%s(bgp=%p, unicast prefix=%s, unicast nh=%s)", + __func__, bgp, str_pfx, str_nh); + } + + if (info->type != ZEBRA_ROUTE_BGP) + { + zlog_debug ("%s: unicast type %d=\"%s\" is not %d=%s, skipping", + __func__, info->type, zebra_route_string (info->type), + ZEBRA_ROUTE_BGP, "ZEBRA_ROUTE_BGP"); + return; + } + + /* + * Preliminary checks + */ + + if (!afi) + { + zlog_err ("%s: can't get afi of prefix", __func__); + return; + } + + if (!(hc = bgp->rfapi_cfg)) + { + zlog_debug ("%s: bgp->rfapi_cfg is NULL, skipping", __func__); + return; + } + + /* check vnc redist flag for bgp direct routes */ + if (!bgp->rfapi_cfg->redist[afi][ZEBRA_ROUTE_BGP_DIRECT]) + { + zlog_debug + ("%s: bgp->rfapi_cfg->redist[afi=%d][type=ZEBRA_ROUTE_BGP_DIRECT] is 0, skipping", + __func__, afi); + return; + } + + + if (process_unicast_route (bgp, afi, prefix, info, + &ecom, &pfx_unicast_nexthop)) + { + + zlog_debug ("%s: process_unicast_route error, skipping", __func__); + return; + } + + local_pref = calc_local_pref (info->attr, info->peer); + if (info->attr && + (info->attr->flag & ATTR_FLAG_BIT (BGP_ATTR_MULTI_EXIT_DISC))) + { + + med = &info->attr->med; + } + + + /* + * At this point, we have allocated: + * + * ecom ecommunity ptr, union of unicast and ROO parts (no NVE part) + * + * And we have set: + * + * pfx_unicast_nexthop nexthop of uncast route + */ + + if (!bgp->rfapi->resolve_nve_nexthop) + { + bgp->rfapi->resolve_nve_nexthop = + skiplist_new (SKIPLIST_FLAG_ALLOW_DUPLICATES, vnc_prefix_cmp, + prefix_bag_free); + } + + pb = XCALLOC (MTYPE_RFAPI_PREFIX_BAG, sizeof (struct prefix_bag)); + pb->hpfx = pfx_unicast_nexthop; + pb->ubi = info; + pb->upfx = *prefix; + + bgp_info_lock (info); /* skiplist refers to it */ + skiplist_insert (bgp->rfapi->resolve_nve_nexthop, &pb->hpfx, pb); + + /* + * Iterate over RDs in VPN RIB. For each RD, look up unicast nexthop + * (exact match, /32). If an exact match is found, call add_vnc_route. + */ + + for (bnp = bgp_table_top (bgp->rib[afi][SAFI_MPLS_VPN]); bnp; + bnp = bgp_route_next (bnp)) + { + + struct bgp_table *table; + + table = (struct bgp_table *) (bnp->info); + + if (!table) + continue; + + vnc_import_bgp_add_route_mode_resolve_nve_one_rd ((struct prefix_rd *) + &bnp->p, table, afi, + bgp, prefix, ecom, + &local_pref, med, + &pfx_unicast_nexthop); + + } + + + if (ecom) + ecommunity_free (&ecom); + + zlog_debug ("%s: done", __func__); +} + + +static void +vnc_import_bgp_add_route_mode_plain (struct bgp *bgp, + struct prefix *prefix, + struct bgp_info *info) +{ + afi_t afi = family2afi (prefix->family); + struct peer *peer = info->peer; + struct attr *attr = info->attr; + struct attr hattr; + struct rfapi_cfg *hc = NULL; + struct attr *iattr = NULL; + + struct rfapi_ip_addr vnaddr; + struct prefix vn_pfx_space; + struct prefix *vn_pfx = NULL; + int ahr_flags = 0; + struct ecommunity *ecom = NULL; + struct prefix_rd prd; + struct route_map *rmap = NULL; + uint32_t local_pref; + uint32_t *med = NULL; + + { + char buf[BUFSIZ]; + + buf[0] = 0; + prefix2str (prefix, buf, BUFSIZ); + buf[BUFSIZ - 1] = 0; + zlog_debug ("%s(prefix=%s) entry", __func__, buf); + } + + if (!afi) + { + zlog_err ("%s: can't get afi of prefix", __func__); + return; + } + + if (!(hc = bgp->rfapi_cfg)) + { + zlog_debug ("%s: bgp->rfapi_cfg is NULL, skipping", __func__); + return; + } + + /* check vnc redist flag for bgp direct routes */ + if (!bgp->rfapi_cfg->redist[afi][ZEBRA_ROUTE_BGP_DIRECT]) + { + zlog_debug + ("%s: bgp->rfapi_cfg->redist[afi=%d][type=ZEBRA_ROUTE_BGP_DIRECT] is 0, skipping", + __func__, afi); + return; + } + + /* + * mode "plain" specific code + */ + { + zlog_debug ("%s: NOT using redist RFG", __func__); + + /* + * prefix list check + */ + if (hc->plist_redist[ZEBRA_ROUTE_BGP_DIRECT][afi]) + { + zlog_debug ("%s: HC prefix list is set, checking", __func__); + if (prefix_list_apply + (hc->plist_redist[ZEBRA_ROUTE_BGP_DIRECT][afi], + prefix) == PREFIX_DENY) + { + zlog_debug ("%s: prefix list returns DENY, blocking route", + __func__); + return; + } + zlog_debug ("%s: prefix list returns PASS, allowing route", __func__); + } + + /* apply routemap, if any, later */ + rmap = hc->routemap_redist[ZEBRA_ROUTE_BGP_DIRECT]; + + /* + * Incoming prefix is unicast. If v6, it is in multiprotocol area, + * but if v4 it is in attr->nexthop + */ + rfapiUnicastNexthop2Prefix (afi, attr, &vn_pfx_space); + vn_pfx = &vn_pfx_space; + + /* UN address */ + ahr_flags |= RFAPI_AHR_NO_TUNNEL_SUBTLV; + } + + if (VNC_DEBUG(IMPORT_BGP_ADD_ROUTE)) + { + char buf[BUFSIZ]; + + buf[0] = 0; + prefix2str (vn_pfx, buf, BUFSIZ); + buf[BUFSIZ - 1] = 0; + zlog_debug ("%s vn_pfx=%s", __func__, buf); + } + + /* + * Compute VN address + */ + if (rfapiQprefix2Raddr (vn_pfx, &vnaddr)) + { + zlog_debug ("%s: redist VN invalid, skipping", __func__); + return; + } + + /* + * route map handling + * This code is here because it allocates an interned attr which + * must be freed before we return. It's easier to put it after + * all of the possible returns above. + */ + memset (&hattr, 0, sizeof (struct attr)); + bgp_attr_dup (&hattr, attr); /* hattr becomes a ghost attr */ + + if (rmap) + { + struct bgp_info info; + route_map_result_t ret; + + memset (&info, 0, sizeof (info)); + info.peer = peer; + info.attr = &hattr; + ret = route_map_apply (rmap, prefix, RMAP_BGP, &info); + if (ret == RMAP_DENYMATCH) + { + bgp_attr_flush (&hattr); + bgp_attr_extra_free (&hattr); + zlog_debug ("%s: route map \"%s\" says DENY, returning", __func__, + rmap->name); + return; + } + } + + iattr = bgp_attr_intern (&hattr); + bgp_attr_flush (&hattr); + bgp_attr_extra_free (&hattr); + + /* Now iattr is an allocated interned attr */ + + /* + * Mode "plain" specific code + * + * Sets RD in dummy HD + * Allocates ecom + */ + { + if (vnaddr.addr_family != AF_INET) + { + zlog_debug + ("%s: can't auto-assign RD, VN AF (%d) is not IPv4, skipping", + __func__, vnaddr.addr_family); + if (iattr) + { + bgp_attr_unintern (&iattr); + } + return; + } + memset (&prd, 0, sizeof (prd)); + rfapi_set_autord_from_vn (&prd, &vnaddr); + + if (iattr && iattr->extra && iattr->extra->ecommunity) + ecom = ecommunity_dup (iattr->extra->ecommunity); + + } + + local_pref = calc_local_pref (iattr, peer); + + if (iattr && (iattr->flag & ATTR_FLAG_BIT (BGP_ATTR_MULTI_EXIT_DISC))) + { + med = &iattr->med; + } + + if (VNC_DEBUG(IMPORT_BGP_ADD_ROUTE)) + { + char buf[BUFSIZ]; + + buf[0] = 0; + rfapiRfapiIpAddr2Str (&vnaddr, buf, BUFSIZ); + buf[BUFSIZ - 1] = 0; + zlog_debug ("%s: setting vnaddr to %s", __func__, buf); + } + + vncHDBgpDirect.peer = peer; + add_vnc_route (&vncHDBgpDirect, bgp, SAFI_MPLS_VPN, prefix, &prd, &vnaddr, &local_pref, &(bgp->rfapi_cfg->redist_lifetime), NULL, /* RFP options */ + NULL, NULL, ecom, med, /* med */ + NULL, /* label: default */ + ZEBRA_ROUTE_BGP_DIRECT, BGP_ROUTE_REDISTRIBUTE, ahr_flags); + vncHDBgpDirect.peer = NULL; + + if (ecom) + ecommunity_free (&ecom); +} + +static void +vnc_import_bgp_add_route_mode_nvegroup (struct bgp *bgp, + struct prefix *prefix, + struct bgp_info *info, + struct rfapi_nve_group_cfg *rfg) +{ + afi_t afi = family2afi (prefix->family); + struct peer *peer = info->peer; + struct attr *attr = info->attr; + struct attr hattr; + struct rfapi_cfg *hc = NULL; + struct attr *iattr = NULL; + + struct rfapi_ip_addr vnaddr; + struct prefix *vn_pfx = NULL; + int ahr_flags = 0; + struct ecommunity *ecom = NULL; + struct prefix_rd prd; + struct route_map *rmap = NULL; + uint32_t local_pref; + + { + char buf[BUFSIZ]; + + buf[0] = 0; + prefix2str (prefix, buf, BUFSIZ); + buf[BUFSIZ - 1] = 0; + zlog_debug ("%s(prefix=%s) entry", __func__, buf); + } + + assert (rfg); + + if (!afi) + { + zlog_err ("%s: can't get afi of prefix", __func__); + return; + } + + if (!(hc = bgp->rfapi_cfg)) + { + zlog_debug ("%s: bgp->rfapi_cfg is NULL, skipping", __func__); + return; + } + + /* check vnc redist flag for bgp direct routes */ + if (!bgp->rfapi_cfg->redist[afi][ZEBRA_ROUTE_BGP_DIRECT]) + { + zlog_debug + ("%s: bgp->rfapi_cfg->redist[afi=%d][type=ZEBRA_ROUTE_BGP_DIRECT] is 0, skipping", + __func__, afi); + return; + } + + + /* + * RFG-specific code + */ + { + + struct rfapi_ip_prefix pfx_un; + + zlog_debug ("%s: using redist RFG", __func__); + + /* + * RFG prefix list check + */ + if (rfg->plist_redist[ZEBRA_ROUTE_BGP_DIRECT][afi]) + { + zlog_debug ("%s: RFG prefix list is set, checking", __func__); + if (prefix_list_apply + (rfg->plist_redist[ZEBRA_ROUTE_BGP_DIRECT][afi], + prefix) == PREFIX_DENY) + { + zlog_debug ("%s: prefix list returns DENY, blocking route", + __func__); + return; + } + zlog_debug ("%s: prefix list returns PASS, allowing route", __func__); + } + + /* apply routemap, if any, later */ + rmap = rfg->routemap_redist[ZEBRA_ROUTE_BGP_DIRECT]; + + /* + * export nve group's VN addr prefix must be a /32 which + * will yield the VN addr to use + */ + vn_pfx = &rfg->vn_prefix; + + /* + * UN Address + */ + if (!is_host_prefix (&rfg->un_prefix)) + { + /* NB prefixlen==0 means it has not been configured */ + zlog_debug ("%s: redist RFG UN pfx not host pfx (plen=%d), skipping", + __func__, rfg->un_prefix.prefixlen); + return; + } + + rfapiQprefix2Rprefix (&rfg->un_prefix, &pfx_un); + + vncHDBgpDirect.un_addr = pfx_un.prefix; + } + + if (VNC_DEBUG(IMPORT_BGP_ADD_ROUTE)) + { + char buf[BUFSIZ]; + + buf[0] = 0; + prefix2str (vn_pfx, buf, BUFSIZ); + buf[BUFSIZ - 1] = 0; + zlog_debug ("%s vn_pfx=%s", __func__, buf); + } + + /* + * Compute VN address + */ + if (rfapiQprefix2Raddr (vn_pfx, &vnaddr)) + { + zlog_debug ("%s: redist VN invalid, skipping", __func__); + return; + } + + /* + * route map handling + * This code is here because it allocates an interned attr which + * must be freed before we return. It's easier to put it after + * all of the possible returns above. + */ + memset (&hattr, 0, sizeof (struct attr)); + bgp_attr_dup (&hattr, attr); /* hattr becomes a ghost attr */ + + if (rmap) + { + struct bgp_info info; + route_map_result_t ret; + + memset (&info, 0, sizeof (info)); + info.peer = peer; + info.attr = &hattr; + ret = route_map_apply (rmap, prefix, RMAP_BGP, &info); + if (ret == RMAP_DENYMATCH) + { + bgp_attr_flush (&hattr); + bgp_attr_extra_free (&hattr); + zlog_debug ("%s: route map \"%s\" says DENY, returning", __func__, + rmap->name); + return; + } + } + + iattr = bgp_attr_intern (&hattr); + bgp_attr_flush (&hattr); + bgp_attr_extra_free (&hattr); + + /* Now iattr is an allocated interned attr */ + + /* + * RFG-specific code + * + * Sets RD in dummy HD + * Allocates ecom + */ + { + + memset (&prd, 0, sizeof (prd)); + prd = rfg->rd; + prd.family = AF_UNSPEC; + prd.prefixlen = 64; + + if (rfg->rd.family == AF_UNIX) + { + rfapi_set_autord_from_vn (&prd, &vnaddr); + } + + if (rfg->rt_export_list) + ecom = ecommunity_dup (bgp->rfapi_cfg->rfg_redist->rt_export_list); + else + ecom = ecommunity_new (); + + if (iattr && iattr->extra && iattr->extra->ecommunity) + ecom = ecommunity_merge (ecom, iattr->extra->ecommunity); + } + + local_pref = calc_local_pref (iattr, peer); + + if (VNC_DEBUG(IMPORT_BGP_ADD_ROUTE)) + { + char buf[BUFSIZ]; + + buf[0] = 0; + rfapiRfapiIpAddr2Str (&vnaddr, buf, BUFSIZ); + buf[BUFSIZ - 1] = 0; + zlog_debug ("%s: setting vnaddr to %s", __func__, buf); + } + + vncHDBgpDirect.peer = peer; + add_vnc_route ( + &vncHDBgpDirect, + bgp, + SAFI_MPLS_VPN, + prefix, + &prd, + &vnaddr, + &local_pref, + &(bgp->rfapi_cfg->redist_lifetime), + NULL, /* RFP options */ + NULL, + NULL, + ecom, + NULL, /* med */ + NULL, /* label: default */ + ZEBRA_ROUTE_BGP_DIRECT, + BGP_ROUTE_REDISTRIBUTE, + ahr_flags); + vncHDBgpDirect.peer = NULL; + + if (ecom) + ecommunity_free (&ecom); +} + +static void +vnc_import_bgp_del_route_mode_plain (struct bgp *bgp, + struct prefix *prefix, + struct bgp_info *info) +{ + struct prefix_rd prd; + afi_t afi = family2afi (prefix->family); + struct prefix *vn_pfx = NULL; + struct rfapi_ip_addr vnaddr; + struct prefix vn_pfx_space; + + + assert (afi); + + /* + * Compute VN address + */ + + if (info && info->attr) + { + rfapiUnicastNexthop2Prefix (afi, info->attr, &vn_pfx_space); + } + else + { + zlog_debug ("%s: no attr, can't delete route", __func__); + return; + } + vn_pfx = &vn_pfx_space; + + vnaddr.addr_family = vn_pfx->family; + switch (vn_pfx->family) + { + case AF_INET: + if (vn_pfx->prefixlen != 32) + { + zlog_debug ("%s: redist VN plen (%d) != 32, skipping", + __func__, vn_pfx->prefixlen); + return; + } + vnaddr.addr.v4 = vn_pfx->u.prefix4; + break; + + case AF_INET6: + if (vn_pfx->prefixlen != 128) + { + zlog_debug ("%s: redist VN plen (%d) != 128, skipping", + __func__, vn_pfx->prefixlen); + return; + } + vnaddr.addr.v6 = vn_pfx->u.prefix6; + break; + + default: + zlog_debug ("%s: no redist RFG VN host pfx configured, skipping", + __func__); + return; + } + + + memset (&prd, 0, sizeof (prd)); + if (rfapi_set_autord_from_vn (&prd, &vnaddr)) + { + zlog_debug ("%s: can't auto-assign RD, skipping", __func__); + return; + } + + vncHDBgpDirect.peer = info->peer; + zlog_debug ("%s: setting peer to %p", __func__, vncHDBgpDirect.peer); + del_vnc_route (&vncHDBgpDirect, + info->peer, + bgp, + SAFI_MPLS_VPN, + prefix, + &prd, + ZEBRA_ROUTE_BGP_DIRECT, BGP_ROUTE_REDISTRIBUTE, NULL, 1); + + vncHDBgpDirect.peer = NULL; +} + +static void +vnc_import_bgp_del_route_mode_nvegroup (struct bgp *bgp, + struct prefix *prefix, + struct bgp_info *info) +{ + struct prefix_rd prd; + afi_t afi = family2afi (prefix->family); + struct rfapi_nve_group_cfg *rfg = NULL; + struct prefix *vn_pfx = NULL; + struct rfapi_ip_addr vnaddr; + + + assert (afi); + + assert ((rfg = bgp->rfapi_cfg->rfg_redist)); + + /* + * Compute VN address + */ + + /* + * export nve group's VN addr prefix must be a /32 which + * will yield the VN addr to use + */ + vn_pfx = &rfg->vn_prefix; + + + vnaddr.addr_family = vn_pfx->family; + switch (vn_pfx->family) + { + case AF_INET: + if (vn_pfx->prefixlen != 32) + { + zlog_debug ("%s: redist VN plen (%d) != 32, skipping", + __func__, vn_pfx->prefixlen); + return; + } + vnaddr.addr.v4 = vn_pfx->u.prefix4; + break; + + case AF_INET6: + if (vn_pfx->prefixlen != 128) + { + zlog_debug ("%s: redist VN plen (%d) != 128, skipping", + __func__, vn_pfx->prefixlen); + return; + } + vnaddr.addr.v6 = vn_pfx->u.prefix6; + break; + + default: + zlog_debug ("%s: no redist RFG VN host pfx configured, skipping", + __func__); + return; + } + + memset (&prd, 0, sizeof (prd)); + prd = rfg->rd; + prd.family = AF_UNSPEC; + prd.prefixlen = 64; + + if (rfg->rd.family == AF_UNIX) + { + /* means "auto" with VN addr */ + if (rfapi_set_autord_from_vn (&prd, &vnaddr)) + { + zlog_debug ("%s: can't auto-assign RD, skipping", __func__); + return; + } + } + + + vncHDBgpDirect.peer = info->peer; + zlog_debug ("%s: setting peer to %p", __func__, vncHDBgpDirect.peer); + del_vnc_route (&vncHDBgpDirect, + info->peer, + bgp, + SAFI_MPLS_VPN, + prefix, + &prd, + ZEBRA_ROUTE_BGP_DIRECT, BGP_ROUTE_REDISTRIBUTE, NULL, 1); + + vncHDBgpDirect.peer = NULL; +} + +static void +vnc_import_bgp_del_route_mode_resolve_nve_one_bi ( + struct bgp *bgp, + afi_t afi, + struct bgp_info *bi, /* VPN bi */ + struct prefix_rd *prd, /* RD */ + struct prefix *prefix)/* unicast route prefix */ +{ + struct prefix un; + + if (bi->type != ZEBRA_ROUTE_BGP && bi->type != ZEBRA_ROUTE_BGP_DIRECT) + { + + return; + } + if (bi->sub_type != BGP_ROUTE_NORMAL && + bi->sub_type != BGP_ROUTE_STATIC && bi->sub_type != BGP_ROUTE_RFP) + { + + return; + } + if (CHECK_FLAG (bi->flags, BGP_INFO_REMOVED)) + return; + + vncHDResolveNve.peer = bi->peer; + if (!rfapiGetVncTunnelUnAddr (bi->attr, &un)) + { + if (rfapiQprefix2Raddr (&un, &vncHDResolveNve.un_addr)) + return; + } + else + { + memset (&vncHDResolveNve.un_addr, 0, sizeof (vncHDResolveNve.un_addr)); + } + + del_vnc_route (&vncHDResolveNve, vncHDResolveNve.peer, bgp, SAFI_MPLS_VPN, prefix, /* unicast route prefix */ + prd, ZEBRA_ROUTE_BGP_DIRECT, BGP_ROUTE_REDISTRIBUTE, NULL, 0); /* flags */ + +} + +static void +vnc_import_bgp_del_route_mode_resolve_nve_one_rd ( + struct prefix_rd *prd, + struct bgp_table *table_rd, /* per-rd VPN route table */ + afi_t afi, + struct bgp *bgp, + struct prefix *prefix, /* unicast prefix */ + struct prefix *ubi_nexthop) /* unicast bi's nexthop */ +{ + struct bgp_node *bn; + struct bgp_info *bi; + + if (!table_rd) + return; + + { + char str_nh[BUFSIZ]; + + prefix2str (ubi_nexthop, str_nh, BUFSIZ); + str_nh[BUFSIZ - 1] = 0; + + zlog_debug ("%s: ubi_nexthop=%s", __func__, str_nh); + } + + + /* exact match */ + bn = bgp_node_lookup (table_rd, ubi_nexthop); + if (!bn) + { + zlog_debug ("%s: no match in RD's table for ubi_nexthop", __func__); + return; + } + + /* Iterate over bgp_info items at this node */ + for (bi = bn->info; bi; bi = bi->next) + { + + vnc_import_bgp_del_route_mode_resolve_nve_one_bi (bgp, afi, bi, /* VPN bi */ + prd, /* VPN RD */ + prefix); /* unicast route prefix */ + } + + bgp_unlock_node (bn); +} + +static void +vnc_import_bgp_del_route_mode_resolve_nve (struct bgp *bgp, + afi_t afi, + struct prefix *prefix, + struct bgp_info *info) +{ + struct ecommunity *ecom = NULL; + struct prefix pfx_unicast_nexthop = { 0 }; /* happy valgrind */ + + //struct listnode *hnode; + //struct rfapi_descriptor *rfd; + struct prefix_bag *pb; + void *cursor; + struct skiplist *sl = bgp->rfapi->resolve_nve_nexthop; + int rc; + struct bgp_node *bnp; /* prd table node */ + + if (!sl) + { + zlog_debug ("%s: no RHN entries, skipping", __func__); + return; + } + + if (info->type != ZEBRA_ROUTE_BGP) + { + zlog_debug ("%s: unicast type %d=\"%s\" is not %d=%s, skipping", + __func__, info->type, zebra_route_string (info->type), + ZEBRA_ROUTE_BGP, "ZEBRA_ROUTE_BGP"); + return; + } + + if (process_unicast_route (bgp, afi, prefix, info, + &ecom, &pfx_unicast_nexthop)) + { + + zlog_debug ("%s: process_unicast_route error, skipping", __func__); + return; + } + + rc = skiplist_first_value (sl, &pfx_unicast_nexthop, (void *) &pb, &cursor); + while (!rc) + { + if (pb->ubi == info) + { + skiplist_delete (sl, &pfx_unicast_nexthop, pb); + bgp_info_unlock (info); + break; + } + rc = + skiplist_next_value (sl, &pfx_unicast_nexthop, (void *) &pb, &cursor); + } + + /* + * Iterate over RDs in VPN RIB. For each RD, look up unicast nexthop + * (exact match, /32). If an exact match is found, call add_vnc_route. + */ + + for (bnp = bgp_table_top (bgp->rib[afi][SAFI_MPLS_VPN]); bnp; + bnp = bgp_route_next (bnp)) + { + + struct bgp_table *table; + + table = (struct bgp_table *) (bnp->info); + + if (!table) + continue; + + vnc_import_bgp_del_route_mode_resolve_nve_one_rd ((struct prefix_rd *) &bnp->p, table, afi, bgp, prefix, &pfx_unicast_nexthop); /* TBD how is this set? */ + } + + if (ecom) + ecommunity_free (&ecom); +} + + + + +/*********************************************************************** + * Add/Delete CE->NVE routes + ***********************************************************************/ + +/* + * Should be called whan a bi is added to VPN RIB. This function + * will check if it is a host route and return immediately if not. + */ +void +vnc_import_bgp_add_vnc_host_route_mode_resolve_nve ( + struct bgp *bgp, + struct prefix_rd *prd, /* RD */ + struct bgp_table *table_rd, /* per-rd VPN route table */ + struct prefix *prefix, /* VPN prefix */ + struct bgp_info *bi) /* new VPN host route */ +{ + afi_t afi = family2afi (prefix->family); + struct skiplist *sl = NULL; + int rc; + struct prefix_bag *pb; + void *cursor; + struct rfapi_cfg *hc = NULL; + + zlog_debug ("%s: entry", __func__); + + if (afi != AFI_IP && afi != AFI_IP6) + { + zlog_debug ("%s: bad afi %d, skipping", __func__, afi); + return; + } + + if (!(hc = bgp->rfapi_cfg)) + { + zlog_debug ("%s: bgp->rfapi_cfg is NULL, skipping", __func__); + return; + } + + /* check vnc redist flag for bgp direct routes */ + if (!hc->redist[afi][ZEBRA_ROUTE_BGP_DIRECT]) + { + zlog_debug + ("%s: bgp->rfapi_cfg->redist[afi=%d][type=ZEBRA_ROUTE_BGP_DIRECT] is 0, skipping", + __func__, afi); + return; + } + + if (hc->redist_mode != VNC_REDIST_MODE_RESOLVE_NVE) + { + zlog_debug ("%s: not in resolve-nve mode, skipping", __func__); + return; + } + + if (bgp && bgp->rfapi) + sl = bgp->rfapi->resolve_nve_nexthop; + + if (!sl) + { + zlog_debug ("%s: no resolve_nve_nexthop skiplist, skipping", __func__); + return; + } + + if (!is_host_prefix (prefix)) + { + zlog_debug ("%s: not host prefix, skipping", __func__); + return; + } + + rc = skiplist_first_value (sl, prefix, (void *) &pb, &cursor); + while (!rc) + { + struct ecommunity *ecom; + struct prefix pfx_unicast_nexthop; + uint32_t *med = NULL; + uint32_t local_pref; + + memset (&pfx_unicast_nexthop, 0, sizeof (struct prefix)); /* keep valgrind happy */ + + if (VNC_DEBUG(IMPORT_BGP_ADD_ROUTE)) + { + char hbuf[BUFSIZ]; + char ubuf[BUFSIZ]; + + prefix2str (&pb->hpfx, hbuf, BUFSIZ); + prefix2str (&pb->upfx, ubuf, BUFSIZ); + + zlog_debug + ("%s: examining RHN Entry (q=%p): upfx=%s, hpfx=%s, ubi=%p", + __func__, cursor, ubuf, hbuf, pb->ubi); + } + + if (process_unicast_route (bgp, afi, &pb->upfx, pb->ubi, + &ecom, &pfx_unicast_nexthop)) + { + + zlog_debug ("%s: process_unicast_route error, skipping", __func__); + continue; + } + local_pref = calc_local_pref (pb->ubi->attr, pb->ubi->peer); + + if (pb->ubi->attr && + (pb->ubi->attr->flag & ATTR_FLAG_BIT (BGP_ATTR_MULTI_EXIT_DISC))) + { + + med = &pb->ubi->attr->med; + } + + /* + * Sanity check + */ + if (vnc_prefix_cmp (&pfx_unicast_nexthop, prefix)) + { + char str_unh[BUFSIZ]; + char str_nve_pfx[BUFSIZ]; + + prefix2str (&pfx_unicast_nexthop, str_unh, BUFSIZ); + str_unh[BUFSIZ - 1] = 0; + + prefix2str (prefix, str_nve_pfx, BUFSIZ); + str_nve_pfx[BUFSIZ - 1] = 0; + + zlog_debug + ("%s: FATAL: resolve_nve_nexthop list item bi nexthop %s != nve pfx %s", + __func__, str_unh, str_nve_pfx); + assert (0); + } + + vnc_import_bgp_add_route_mode_resolve_nve_one_bi (bgp, afi, bi, /* VPN bi */ + prd, &pb->upfx, /* unicast prefix */ + &local_pref, + med, ecom); + + if (ecom) + ecommunity_free (&ecom); + +#if DEBUG_RHN_LIST + /* debug */ + { + char pbuf[BUFSIZ]; + + prefix2str (prefix, pbuf, BUFSIZ); + + zlog_debug ("%s: advancing past RHN Entry (q=%p): with prefix %s", + __func__, cursor, pbuf); + print_rhn_list (__func__, NULL); /* debug */ + } +#endif + rc = skiplist_next_value (sl, prefix, (void *) &pb, &cursor); + } + zlog_debug ("%s: done", __func__); +} + + +void +vnc_import_bgp_del_vnc_host_route_mode_resolve_nve ( + struct bgp *bgp, + struct prefix_rd *prd, /* RD */ + struct bgp_table *table_rd, /* per-rd VPN route table */ + struct prefix *prefix, /* VPN prefix */ + struct bgp_info *bi) /* old VPN host route */ +{ + afi_t afi = family2afi (prefix->family); + struct skiplist *sl = NULL; + struct prefix_bag *pb; + void *cursor; + struct rfapi_cfg *hc = NULL; + int rc; + + { + char str_pfx[BUFSIZ]; + + prefix2str (prefix, str_pfx, BUFSIZ); + str_pfx[BUFSIZ - 1] = 0; + + zlog_debug ("%s(bgp=%p, nve prefix=%s)", __func__, bgp, str_pfx); + } + + if (afi != AFI_IP && afi != AFI_IP6) + return; + + if (!(hc = bgp->rfapi_cfg)) + { + zlog_debug ("%s: bgp->rfapi_cfg is NULL, skipping", __func__); + return; + } + + /* check vnc redist flag for bgp direct routes */ + if (!hc->redist[afi][ZEBRA_ROUTE_BGP_DIRECT]) + { + zlog_debug + ("%s: bgp->rfapi_cfg->redist[afi=%d][type=ZEBRA_ROUTE_BGP_DIRECT] is 0, skipping", + __func__, afi); + return; + } + + if (hc->redist_mode != VNC_REDIST_MODE_RESOLVE_NVE) + { + zlog_debug ("%s: not in resolve-nve mode, skipping", __func__); + return; + } + + if (bgp && bgp->rfapi) + sl = bgp->rfapi->resolve_nve_nexthop; + + if (!sl) + { + zlog_debug ("%s: no RHN entries, skipping", __func__); + return; + } + + if (!is_host_prefix (prefix)) + { + zlog_debug ("%s: not host route, skip", __func__); + return; + } + + /* + * Find all entries with key == CE in the RHN list + */ + rc = skiplist_first_value (sl, prefix, (void *) &pb, &cursor); + while (!rc) + { + + struct ecommunity *ecom; + struct prefix pfx_unicast_nexthop; + + memset (&pfx_unicast_nexthop, 0, sizeof (struct prefix)); /* keep valgrind happy */ + + if (process_unicast_route (bgp, afi, &pb->upfx, pb->ubi, + &ecom, &pfx_unicast_nexthop)) + { + + zlog_debug ("%s: process_unicast_route error, skipping", __func__); + continue; + } + + /* + * Sanity check + */ + if (vnc_prefix_cmp (&pfx_unicast_nexthop, prefix)) + { + char str_unh[BUFSIZ]; + char str_nve_pfx[BUFSIZ]; + + prefix2str (&pfx_unicast_nexthop, str_unh, BUFSIZ); + str_unh[BUFSIZ - 1] = 0; + + prefix2str (prefix, str_nve_pfx, BUFSIZ); + str_nve_pfx[BUFSIZ - 1] = 0; + + zlog_debug + ("%s: FATAL: resolve_nve_nexthop list item bi nexthop %s != nve pfx %s", + __func__, str_unh, str_nve_pfx); + assert (0); + } + + vnc_import_bgp_del_route_mode_resolve_nve_one_bi (bgp, + afi, + bi, prd, &pb->upfx); + + if (ecom) + ecommunity_free (&ecom); + + rc = skiplist_next_value (sl, prefix, (void *) &pb, &cursor); + } +} + + +/*********************************************************************** + * Exterior Routes + ***********************************************************************/ + +#define DEBUG_IS_USABLE_INTERIOR 1 + +static int +is_usable_interior_route (struct bgp_info *bi_interior) +{ + if (!VALID_INTERIOR_TYPE (bi_interior->type)) + { +#if DEBUG_IS_USABLE_INTERIOR + zlog_debug ("%s: NO: type %d is not valid interior type", + __func__, bi_interior->type); +#endif + return 0; + } + if (!CHECK_FLAG (bi_interior->flags, BGP_INFO_VALID)) + { +#if DEBUG_IS_USABLE_INTERIOR + zlog_debug ("%s: NO: BGP_INFO_VALID not set", __func__); +#endif + return 0; + } + return 1; +} + +/* + * There should be only one of these per prefix at a time. + * This should be called as a result of selection operation + * + * NB should be called espacially for bgp instances that are named, + * because the exterior routes will always come from one of those. + * We filter here on the instance name to make sure we get only the + * right routes. + */ +static void +vnc_import_bgp_exterior_add_route_it ( + struct bgp *bgp, /* exterior instance, we hope */ + struct prefix *prefix,/* unicast prefix */ + struct bgp_info *info, /* unicast info */ + struct rfapi_import_table *it_only)/* NULL, or limit to this IT */ +{ + struct rfapi *h; + struct rfapi_cfg *hc; + struct prefix pfx_orig_nexthop; + struct rfapi_import_table *it; + struct bgp *bgp_default = bgp_get_default (); + afi_t afi = family2afi (prefix->family); + + h = bgp_default->rfapi; + hc = bgp_default->rfapi_cfg; + + zlog_debug ("%s: entry with it=%p", __func__, it_only); + + if (!h || !hc) + { + zlog_debug ("%s: rfapi or rfapi_cfg not instantiated, skipping", + __func__); + return; + } + if (!hc->redist_bgp_exterior_view) + { + zlog_debug ("%s: exterior view not set, skipping", __func__); + return; + } + if (bgp != hc->redist_bgp_exterior_view) + { + zlog_debug ("%s: bgp %p != hc->redist_bgp_exterior_view %p, skipping", + __func__, bgp, hc->redist_bgp_exterior_view); + return; + } + + if (!hc->redist[afi][ZEBRA_ROUTE_BGP_DIRECT_EXT]) + { + zlog_debug ("%s: redist of exterior routes not enabled, skipping", + __func__); + return; + } + + if (!info->attr) + { + zlog_debug ("%s: no info, skipping", __func__); + return; + } + + /* + * Extract nexthop from exterior route + * + * Incoming prefix is unicast. If v6, it is in multiprotocol area, + * but if v4 it is in attr->nexthop + */ + rfapiUnicastNexthop2Prefix (afi, info->attr, &pfx_orig_nexthop); + + for (it = h->imports; it; it = it->next) + { + struct route_table *table; + struct route_node *rn; + struct route_node *par; + struct bgp_info *bi_interior; + int have_usable_route; + + zlog_debug ("%s: doing it %p", __func__, it); + + if (it_only && (it_only != it)) + { + zlog_debug ("%s: doesn't match it_only %p", __func__, it_only); + continue; + } + + table = it->imported_vpn[afi]; + + for (rn = route_node_match (table, &pfx_orig_nexthop), + have_usable_route = 0; (!have_usable_route) && rn;) + { + + zlog_debug ("%s: it %p trying rn %p", __func__, it, rn); + + for (bi_interior = rn->info; bi_interior; + bi_interior = bi_interior->next) + { + struct prefix_rd *prd; + struct attr new_attr; + u_int32_t label = 0; + + if (!is_usable_interior_route (bi_interior)) + continue; + + zlog_debug ("%s: usable: bi_interior %p", __func__, + bi_interior); + + /* + * have a legitimate route to exterior's nexthop + * via NVE. + * + * Import unicast route to the import table + */ + have_usable_route = 1; + + if (bi_interior->extra) + { + prd = &bi_interior->extra->vnc.import.rd; + label = decode_label (bi_interior->extra->tag); + } + else + prd = NULL; + + /* use local_pref from unicast route */ + memset (&new_attr, 0, sizeof (struct attr)); + bgp_attr_dup (&new_attr, bi_interior->attr); + if (info->attr->flag & ATTR_FLAG_BIT (BGP_ATTR_LOCAL_PREF)) + { + new_attr.local_pref = info->attr->local_pref; + new_attr.flag |= ATTR_FLAG_BIT (BGP_ATTR_LOCAL_PREF); + } + + rfapiBgpInfoFilteredImportVPN (it, FIF_ACTION_UPDATE, bi_interior->peer, NULL, /* rfd */ + prefix, + NULL, + afi, + prd, + &new_attr, + ZEBRA_ROUTE_BGP_DIRECT_EXT, + BGP_ROUTE_REDISTRIBUTE, &label); + + bgp_attr_extra_free (&new_attr); + } + + if (have_usable_route) + { + /* + * Make monitor + * + * TBD factor this out into its own function + */ + struct prefix *pfx_mon = prefix_new (); + if (!RFAPI_MONITOR_EXTERIOR (rn)->source) + { + RFAPI_MONITOR_EXTERIOR (rn)->source = + skiplist_new (0, NULL, (void (*)(void *)) prefix_free); + route_lock_node (rn); /* for skiplist */ + } + route_lock_node (rn); /* for skiplist entry */ + prefix_copy (pfx_mon, prefix); + if (!skiplist_insert (RFAPI_MONITOR_EXTERIOR (rn)->source, + info, pfx_mon)) + { + + bgp_info_lock (info); + } + } + par = rn->parent; + if (par) + route_lock_node (par); + route_unlock_node (rn); + rn = par; + } + if (rn) + route_unlock_node (rn); + + if (!have_usable_route) + { + struct prefix *pfx_mon = prefix_new (); + prefix_copy (pfx_mon, prefix); + if (!skiplist_insert (it->monitor_exterior_orphans, info, pfx_mon)) + { + + bgp_info_lock (info); + } + } + } +} + +void +vnc_import_bgp_exterior_add_route ( + struct bgp *bgp, /* exterior instance, we hope */ + struct prefix *prefix,/* unicast prefix */ + struct bgp_info *info) /* unicast info */ +{ + vnc_import_bgp_exterior_add_route_it (bgp, prefix, info, NULL); +} + +/* + * There should be only one of these per prefix at a time. + * This should probably be called as a result of selection operation. + * + * NB should be called espacially for bgp instances that are named, + * because the exterior routes will always come from one of those. + * We filter here on the instance name to make sure we get only the + * right routes. + */ +void +vnc_import_bgp_exterior_del_route ( + struct bgp *bgp, + struct prefix *prefix, /* unicast prefix */ + struct bgp_info *info) /* unicast info */ +{ + struct rfapi *h; + struct rfapi_cfg *hc; + struct rfapi_import_table *it; + struct prefix pfx_orig_nexthop; + afi_t afi = family2afi (prefix->family); + struct bgp *bgp_default = bgp_get_default (); + + memset (&pfx_orig_nexthop, 0, sizeof (struct prefix)); /* keep valgrind happy */ + + h = bgp_default->rfapi; + hc = bgp_default->rfapi_cfg; + + if (!h || !hc) + { + zlog_debug ("%s: rfapi or rfapi_cfg not instantiated, skipping", + __func__); + return; + } + if (!hc->redist_bgp_exterior_view) + { + zlog_debug ("%s: exterior view not set, skipping", __func__); + return; + } + if (bgp != hc->redist_bgp_exterior_view) + { + zlog_debug ("%s: bgp %p != hc->redist_bgp_exterior_view %p, skipping", + __func__, bgp, hc->redist_bgp_exterior_view); + return; + } + if (!hc->redist[afi][ZEBRA_ROUTE_BGP_DIRECT_EXT]) + { + zlog_debug ("%s: redist of exterior routes no enabled, skipping", + __func__); + return; + } + + if (!info->attr) + { + zlog_debug ("%s: no info, skipping", __func__); + return; + } + + /* + * Extract nexthop from exterior route + * + * Incoming prefix is unicast. If v6, it is in multiprotocol area, + * but if v4 it is in attr->nexthop + */ + rfapiUnicastNexthop2Prefix (afi, info->attr, &pfx_orig_nexthop); + + for (it = h->imports; it; it = it->next) + { + struct route_table *table; + struct route_node *rn; + struct route_node *par; + struct bgp_info *bi_interior; + int have_usable_route; + + table = it->imported_vpn[afi]; + + for (rn = route_node_match (table, &pfx_orig_nexthop), + have_usable_route = 0; (!have_usable_route) && rn;) + { + + for (bi_interior = rn->info; bi_interior; + bi_interior = bi_interior->next) + { + struct prefix_rd *prd; + u_int32_t label = 0; + + if (!is_usable_interior_route (bi_interior)) + continue; + + /* + * have a legitimate route to exterior's nexthop + * via NVE. + * + * Import unicast route to the import table + */ + have_usable_route = 1; + + if (bi_interior->extra) + { + prd = &bi_interior->extra->vnc.import.rd; + label = decode_label (bi_interior->extra->tag); + } + else + prd = NULL; + + rfapiBgpInfoFilteredImportVPN (it, FIF_ACTION_KILL, bi_interior->peer, NULL, /* rfd */ + prefix, + NULL, + afi, + prd, + bi_interior->attr, + ZEBRA_ROUTE_BGP_DIRECT_EXT, + BGP_ROUTE_REDISTRIBUTE, &label); + + /* + * Delete monitor + * + * TBD factor this out into its own function + */ + { + if (RFAPI_MONITOR_EXTERIOR (rn)->source) + { + if (!skiplist_delete (RFAPI_MONITOR_EXTERIOR (rn)->source, + info, NULL)) + { + + bgp_info_unlock (info); + route_unlock_node (rn); /* sl entry */ + } + if (skiplist_empty (RFAPI_MONITOR_EXTERIOR (rn)->source)) + { + skiplist_free (RFAPI_MONITOR_EXTERIOR (rn)->source); + RFAPI_MONITOR_EXTERIOR (rn)->source = NULL; + route_unlock_node (rn); /* skiplist itself */ + } + } + } + } + par = rn->parent; + if (par) + route_lock_node (par); + route_unlock_node (rn); + rn = par; + } + if (rn) + route_unlock_node (rn); + + if (!have_usable_route) + { + if (!skiplist_delete (it->monitor_exterior_orphans, info, NULL)) + { + + bgp_info_unlock (info); + } + } + } +} + +/* + * This function should be called after a new interior VPN route + * has been added to an import_table. + * + * NB should also be called whenever an existing vpn interior route + * becomes valid (e.g., valid_interior_count is inremented) + */ +void +vnc_import_bgp_exterior_add_route_interior ( + struct bgp *bgp, + struct rfapi_import_table *it, + struct route_node *rn_interior, /* VPN IT node */ + struct bgp_info *bi_interior) /* VPN IT route */ +{ + afi_t afi = family2afi (rn_interior->p.family); + struct route_node *par; + struct bgp_info *bi_exterior; + struct prefix *pfx_exterior; /* exterior pfx */ + void *cursor; + int rc; + struct list *list_adopted; + + zlog_debug ("%s: entry", __func__); + + if (!is_usable_interior_route (bi_interior)) + { + zlog_debug ("%s: not usable interior route, skipping", __func__); + return; + } + + if (!bgp->rfapi_cfg->redist[afi][ZEBRA_ROUTE_BGP_DIRECT_EXT]) + { + zlog_debug ("%s: redist of exterior routes no enabled, skipping", + __func__); + return; + } + + if (it == bgp->rfapi->it_ce) + { + zlog_debug ("%s: import table is it_ce, skipping", __func__); + return; + } + + /*debugging */ + { + char str_pfx[BUFSIZ]; + + prefix2str (&rn_interior->p, str_pfx, BUFSIZ); + str_pfx[BUFSIZ - 1] = 0; + + zlog_debug ("%s: interior prefix=%s, bi type=%d", + __func__, str_pfx, bi_interior->type); + } + + if (RFAPI_HAS_MONITOR_EXTERIOR (rn_interior)) + { + + int count = 0; /* debugging */ + + zlog_debug ("%s: has exterior monitor; ext src: %p", __func__, + RFAPI_MONITOR_EXTERIOR (rn_interior)->source); + + /* + * There is a monitor here already. Therefore, we do not need + * to do any pulldown. Just construct exterior routes based + * on the new interior route. + */ + cursor = NULL; + for (rc = skiplist_next (RFAPI_MONITOR_EXTERIOR (rn_interior)->source, + (void **) &bi_exterior, + (void **) &pfx_exterior, &cursor); !rc; + rc = + skiplist_next (RFAPI_MONITOR_EXTERIOR (rn_interior)->source, + (void **) &bi_exterior, (void **) &pfx_exterior, + &cursor)) + { + + struct prefix_rd *prd; + struct attr new_attr; + u_int32_t label = 0; + + + ++count; /* debugging */ + + assert (bi_exterior); + assert (pfx_exterior); + + if (bi_interior->extra) + { + prd = &bi_interior->extra->vnc.import.rd; + label = decode_label (bi_interior->extra->tag); + } + else + prd = NULL; + + /* use local_pref from unicast route */ + memset (&new_attr, 0, sizeof (struct attr)); + bgp_attr_dup (&new_attr, bi_interior->attr); + if (bi_exterior && + (bi_exterior->attr->flag & ATTR_FLAG_BIT (BGP_ATTR_LOCAL_PREF))) + { + new_attr.local_pref = bi_exterior->attr->local_pref; + new_attr.flag |= ATTR_FLAG_BIT (BGP_ATTR_LOCAL_PREF); + } + + rfapiBgpInfoFilteredImportVPN (it, FIF_ACTION_UPDATE, bi_interior->peer, NULL, /* rfd */ + pfx_exterior, + NULL, + afi, + prd, + &new_attr, + ZEBRA_ROUTE_BGP_DIRECT_EXT, + BGP_ROUTE_REDISTRIBUTE, &label); + + bgp_attr_extra_free (&new_attr); + } + zlog_debug + ("%s: finished constructing exteriors based on existing monitors", + __func__); + return; + } + + zlog_debug ("%s: no exterior monitor", __func__); + + /* + * No monitor at this node. Is this the first valid interior + * route at this node? + */ + if (RFAPI_MONITOR_EXTERIOR (rn_interior)->valid_interior_count > 1) + { + zlog_debug + ("%s: new interior route not first valid one, skipping pulldown", + __func__); + return; + } + + /* + * Look up the tree for possible pulldown candidates. + * Find nearest parent with an exterior route monitor + */ + for (par = rn_interior->parent; par; par = par->parent) + { + if (RFAPI_HAS_MONITOR_EXTERIOR (par)) + break; + } + + if (par) + { + + zlog_debug ("%s: checking parent %p for possible pulldowns", + __func__, par); + + /* check monitors at par for possible pulldown */ + cursor = NULL; + for (rc = skiplist_next (RFAPI_MONITOR_EXTERIOR (par)->source, + (void **) &bi_exterior, + (void **) &pfx_exterior, &cursor); !rc; + rc = + skiplist_next (RFAPI_MONITOR_EXTERIOR (par)->source, + (void **) &bi_exterior, (void **) &pfx_exterior, + &cursor)) + { + + struct prefix pfx_nexthop; + + memset (&pfx_nexthop, 0, sizeof (struct prefix)); /* keep valgrind happy */ + + /* check original nexthop for prefix match */ + rfapiUnicastNexthop2Prefix (afi, bi_exterior->attr, &pfx_nexthop); + + if (prefix_match (&rn_interior->p, &pfx_nexthop)) + { + + struct bgp_info *bi; + struct prefix_rd *prd; + struct attr new_attr; + u_int32_t label = 0; + + /* do pull-down */ + + /* + * add monitor to longer prefix + */ + struct prefix *pfx_mon = prefix_new (); + prefix_copy (pfx_mon, pfx_exterior); + if (!RFAPI_MONITOR_EXTERIOR (rn_interior)->source) + { + RFAPI_MONITOR_EXTERIOR (rn_interior)->source = + skiplist_new (0, NULL, (void (*)(void *)) prefix_free); + route_lock_node (rn_interior); + } + skiplist_insert (RFAPI_MONITOR_EXTERIOR (rn_interior)->source, + bi_exterior, pfx_mon); + route_lock_node (rn_interior); + + /* + * Delete constructed exterior routes based on + * parent routes. + */ + for (bi = par->info; bi; bi = bi->next) + { + + if (bi->extra) + { + prd = &bi->extra->vnc.import.rd; + label = decode_label (bi->extra->tag); + } + else + prd = NULL; + + rfapiBgpInfoFilteredImportVPN (it, FIF_ACTION_KILL, bi->peer, NULL, /* rfd */ + pfx_exterior, + NULL, + afi, + prd, + bi->attr, + ZEBRA_ROUTE_BGP_DIRECT_EXT, + BGP_ROUTE_REDISTRIBUTE, + &label); + } + + + /* + * Add constructed exterior routes based on + * the new interior route at longer prefix. + */ + if (bi_interior->extra) + { + prd = &bi_interior->extra->vnc.import.rd; + label = decode_label (bi_interior->extra->tag); + } + else + prd = NULL; + + /* use local_pref from unicast route */ + memset (&new_attr, 0, sizeof (struct attr)); + bgp_attr_dup (&new_attr, bi_interior->attr); + if (bi_exterior && + (bi_exterior->attr->flag & ATTR_FLAG_BIT (BGP_ATTR_LOCAL_PREF))) + { + new_attr.local_pref = bi_exterior->attr->local_pref; + new_attr.flag |= ATTR_FLAG_BIT (BGP_ATTR_LOCAL_PREF); + } + + rfapiBgpInfoFilteredImportVPN (it, FIF_ACTION_UPDATE, bi_interior->peer, NULL, /* rfd */ + pfx_exterior, + NULL, + afi, + prd, + &new_attr, + ZEBRA_ROUTE_BGP_DIRECT_EXT, + BGP_ROUTE_REDISTRIBUTE, &label); + + bgp_attr_extra_free (&new_attr); + } + } + + /* + * The only monitors at rn_interior are the ones we added just + * above, so we can use the rn_interior list to identify which + * monitors to delete from the parent. + */ + cursor = NULL; + for (rc = skiplist_next (RFAPI_MONITOR_EXTERIOR (rn_interior)->source, + (void **) &bi_exterior, NULL, &cursor); + !rc; + rc = skiplist_next (RFAPI_MONITOR_EXTERIOR (rn_interior)->source, + (void **) &bi_exterior, NULL, &cursor)) + { + + + skiplist_delete (RFAPI_MONITOR_EXTERIOR (par)->source, + bi_exterior, NULL); + route_unlock_node (par); /* sl entry */ + } + if (skiplist_empty (RFAPI_MONITOR_EXTERIOR (par)->source)) + { + skiplist_free (RFAPI_MONITOR_EXTERIOR (par)->source); + RFAPI_MONITOR_EXTERIOR (par)->source = NULL; + route_unlock_node (par); /* sl itself */ + } + } + + zlog_debug ("%s: checking orphans", __func__); + + /* + * See if any orphans can be pulled down to the current node + */ + cursor = NULL; + list_adopted = NULL; + for (rc = skiplist_next (it->monitor_exterior_orphans, + (void **) &bi_exterior, (void **) &pfx_exterior, + &cursor); !rc; + rc = + skiplist_next (it->monitor_exterior_orphans, (void **) &bi_exterior, + (void **) &pfx_exterior, &cursor)) + { + + struct prefix pfx_nexthop; + char buf[BUFSIZ]; + afi_t afi_exterior = family2afi (pfx_exterior->family); + + prefix2str (pfx_exterior, buf, sizeof (buf)); + buf[sizeof (buf) - 1] = 0; + zlog_debug ("%s: checking exterior orphan at prefix %s", __func__, buf); + + if (afi_exterior != afi) + { + zlog_debug ("%s: exterior orphan afi %d != interior afi %d, skip", + __func__, afi_exterior, afi); + continue; + } + + /* check original nexthop for prefix match */ + rfapiUnicastNexthop2Prefix (afi, bi_exterior->attr, &pfx_nexthop); + + if (prefix_match (&rn_interior->p, &pfx_nexthop)) + { + + struct prefix_rd *prd; + struct attr new_attr; + u_int32_t label = 0; + + /* do pull-down */ + + /* + * add monitor to longer prefix + */ + + struct prefix *pfx_mon = prefix_new (); + prefix_copy (pfx_mon, pfx_exterior); + if (!RFAPI_MONITOR_EXTERIOR (rn_interior)->source) + { + RFAPI_MONITOR_EXTERIOR (rn_interior)->source = + skiplist_new (0, NULL, (void (*)(void *)) prefix_free); + route_lock_node (rn_interior); /* sl */ + } + skiplist_insert (RFAPI_MONITOR_EXTERIOR (rn_interior)->source, + bi_exterior, pfx_mon); + route_lock_node (rn_interior); /* sl entry */ + if (!list_adopted) + { + list_adopted = list_new (); + } + listnode_add (list_adopted, bi_exterior); + + /* + * Add constructed exterior routes based on the + * new interior route at the longer prefix. + */ + if (bi_interior->extra) + { + prd = &bi_interior->extra->vnc.import.rd; + label = decode_label (bi_interior->extra->tag); + } + else + prd = NULL; + + /* use local_pref from unicast route */ + memset (&new_attr, 0, sizeof (struct attr)); + bgp_attr_dup (&new_attr, bi_interior->attr); + if (bi_exterior && + (bi_exterior->attr->flag & ATTR_FLAG_BIT (BGP_ATTR_LOCAL_PREF))) + { + new_attr.local_pref = bi_exterior->attr->local_pref; + new_attr.flag |= ATTR_FLAG_BIT (BGP_ATTR_LOCAL_PREF); + } + + rfapiBgpInfoFilteredImportVPN (it, FIF_ACTION_UPDATE, bi_interior->peer, NULL, /* rfd */ + pfx_exterior, + NULL, + afi, + prd, + &new_attr, + ZEBRA_ROUTE_BGP_DIRECT_EXT, + BGP_ROUTE_REDISTRIBUTE, &label); + + bgp_attr_extra_free (&new_attr); + } + } + if (list_adopted) + { + struct listnode *node; + struct route_node *bi_exterior; + + for (ALL_LIST_ELEMENTS_RO (list_adopted, node, bi_exterior)) + { + skiplist_delete (it->monitor_exterior_orphans, bi_exterior, NULL); + } + list_delete (list_adopted); + } +} + +/* + * This function should be called after an interior VPN route + * has been deleted from an import_table. + * bi_interior must still be valid, but it must already be detached + * from its route node and the route node's valid_interior_count + * must already be decremented. + * + * NB should also be called whenever an existing vpn interior route + * becomes invalid (e.g., valid_interior_count is decremented) + */ +void +vnc_import_bgp_exterior_del_route_interior ( + struct bgp *bgp, + struct rfapi_import_table *it, + struct route_node *rn_interior, /* VPN IT node */ + struct bgp_info *bi_interior) /* VPN IT route */ +{ + afi_t afi = family2afi (rn_interior->p.family); + struct route_node *par; + struct bgp_info *bi_exterior; + struct prefix *pfx_exterior; /* exterior pfx */ + void *cursor; + int rc; + + if (!VALID_INTERIOR_TYPE (bi_interior->type)) + { + zlog_debug ("%s: type %d not valid interior type, skipping", + __func__, bi_interior->type); + return; + } + + if (!bgp->rfapi_cfg->redist[afi][ZEBRA_ROUTE_BGP_DIRECT_EXT]) + { + zlog_debug ("%s: redist of exterior routes no enabled, skipping", + __func__); + return; + } + + if (it == bgp->rfapi->it_ce) + { + zlog_debug ("%s: it is it_ce, skipping", __func__); + return; + } + + /* If no exterior routes depend on this prefix, nothing to do */ + if (!RFAPI_HAS_MONITOR_EXTERIOR (rn_interior)) + { + zlog_debug ("%s: no exterior monitor, skipping", __func__); + return; + } + + /*debugging */ + { + char str_pfx[BUFSIZ]; + + prefix2str (&rn_interior->p, str_pfx, BUFSIZ); + str_pfx[BUFSIZ - 1] = 0; + + zlog_debug ("%s: interior prefix=%s, bi type=%d", + __func__, str_pfx, bi_interior->type); + } + + /* + * Remove constructed routes based on the deleted interior route + */ + cursor = NULL; + for (rc = skiplist_next (RFAPI_MONITOR_EXTERIOR (rn_interior)->source, + (void **) &bi_exterior, (void **) &pfx_exterior, + &cursor); !rc; + rc = + skiplist_next (RFAPI_MONITOR_EXTERIOR (rn_interior)->source, + (void **) &bi_exterior, (void **) &pfx_exterior, + &cursor)) + { + + struct prefix_rd *prd; + u_int32_t label = 0; + + if (bi_interior->extra) + { + prd = &bi_interior->extra->vnc.import.rd; + label = decode_label (bi_interior->extra->tag); + } + else + prd = NULL; + + rfapiBgpInfoFilteredImportVPN (it, FIF_ACTION_KILL, bi_interior->peer, NULL, /* rfd */ + pfx_exterior, + NULL, + afi, + prd, + bi_interior->attr, + ZEBRA_ROUTE_BGP_DIRECT_EXT, + BGP_ROUTE_REDISTRIBUTE, &label); + } + + /* + * If there are no remaining valid interior routes at this prefix, + * we need to look up the tree for a possible node to move monitors to + */ + if (RFAPI_MONITOR_EXTERIOR (rn_interior)->valid_interior_count) + { + zlog_debug ("%s: interior routes still present, skipping", __func__); + return; + } + + /* + * Find nearest parent with at least one valid interior route + * If none is found, par will end up NULL, and we will move + * the monitors to the orphan list for this import table + */ + for (par = rn_interior->parent; par; par = par->parent) + { + if (RFAPI_MONITOR_EXTERIOR (par)->valid_interior_count) + break; + } + + zlog_debug ("%s: par=%p, ext src: %p", __func__, + par, RFAPI_MONITOR_EXTERIOR (rn_interior)->source); + + /* move all monitors */ + /* + * We will use and delete every element of the source skiplist + */ + while (!skiplist_first (RFAPI_MONITOR_EXTERIOR (rn_interior)->source, + (void **) &bi_exterior, (void **) &pfx_exterior)) + { + + struct prefix *pfx_mon = prefix_new (); + + prefix_copy (pfx_mon, pfx_exterior); + + if (par) + { + + struct bgp_info *bi; + + /* + * Add monitor to parent node + */ + if (!RFAPI_MONITOR_EXTERIOR (par)->source) + { + RFAPI_MONITOR_EXTERIOR (par)->source = + skiplist_new (0, NULL, (void (*)(void *)) prefix_free); + route_lock_node (par); /* sl */ + } + skiplist_insert (RFAPI_MONITOR_EXTERIOR (par)->source, + bi_exterior, pfx_mon); + route_lock_node (par); /* sl entry */ + + /* Add constructed exterior routes based on parent */ + for (bi = par->info; bi; bi = bi->next) + { + + struct prefix_rd *prd; + struct attr new_attr; + u_int32_t label = 0; + + if (bi->type == ZEBRA_ROUTE_BGP_DIRECT_EXT) + continue; + + if (bi->extra) + { + prd = &bi->extra->vnc.import.rd; + label = decode_label (bi->extra->tag); + } + else + prd = NULL; + + /* use local_pref from unicast route */ + memset (&new_attr, 0, sizeof (struct attr)); + bgp_attr_dup (&new_attr, bi->attr); + if (bi_exterior && + (bi_exterior->attr->flag & ATTR_FLAG_BIT (BGP_ATTR_LOCAL_PREF))) + { + new_attr.local_pref = bi_exterior->attr->local_pref; + new_attr.flag |= ATTR_FLAG_BIT (BGP_ATTR_LOCAL_PREF); + } + + rfapiBgpInfoFilteredImportVPN (it, FIF_ACTION_UPDATE, bi->peer, NULL, /* rfd */ + pfx_exterior, + NULL, + afi, + prd, + &new_attr, + ZEBRA_ROUTE_BGP_DIRECT_EXT, + BGP_ROUTE_REDISTRIBUTE, &label); + + bgp_attr_extra_free (&new_attr); + } + + } + else + { + + /* + * No interior route for exterior's nexthop. Save monitor + * in orphan list to await future route. + */ + skiplist_insert (it->monitor_exterior_orphans, + bi_exterior, pfx_mon); + } + + skiplist_delete_first (RFAPI_MONITOR_EXTERIOR (rn_interior)->source); + route_unlock_node (rn_interior); /* sl entry */ + } + if (skiplist_empty (RFAPI_MONITOR_EXTERIOR (rn_interior)->source)) + { + skiplist_free (RFAPI_MONITOR_EXTERIOR (rn_interior)->source); + RFAPI_MONITOR_EXTERIOR (rn_interior)->source = NULL; + route_unlock_node (rn_interior); /* sl itself */ + } + +} + +/*********************************************************************** + * Generic add/delete unicast routes + ***********************************************************************/ + +void +vnc_import_bgp_add_route ( + struct bgp *bgp, + struct prefix *prefix, + struct bgp_info *info) +{ + afi_t afi = family2afi (prefix->family); + + { + struct prefix pfx_nexthop; + char buf[BUFSIZ]; + char buf_nh[BUFSIZ]; + + prefix2str (prefix, buf, BUFSIZ); + rfapiUnicastNexthop2Prefix (afi, info->attr, &pfx_nexthop); + prefix2str (&pfx_nexthop, buf_nh, BUFSIZ); + + zlog_debug ("%s: pfx %s, nh %s", __func__, buf, buf_nh); + } +#if DEBUG_RHN_LIST + print_rhn_list(__func__, "ENTER "); +#endif + VNC_RHNCK (enter); + + if (!afi) + { + zlog_err ("%s: can't get afi of prefix", __func__); + return; + } + + if (!bgp->rfapi_cfg) + { + zlog_debug ("%s: bgp->rfapi_cfg is NULL, skipping", __func__); + return; + } + + /* check vnc redist flag for bgp direct routes */ + if (!bgp->rfapi_cfg->redist[afi][ZEBRA_ROUTE_BGP_DIRECT]) + { + zlog_debug + ("%s: bgp->rfapi_cfg->redist[afi=%d][type=%d=ZEBRA_ROUTE_BGP_DIRECT] is 0, skipping", + __func__, afi, ZEBRA_ROUTE_BGP_DIRECT); + return; + } + + switch (bgp->rfapi_cfg->redist_mode) + { + case VNC_REDIST_MODE_PLAIN: + vnc_import_bgp_add_route_mode_plain (bgp, prefix, info); + break; + + case VNC_REDIST_MODE_RFG: + if (bgp->rfapi_cfg->rfg_redist) + vnc_import_bgp_add_route_mode_nvegroup (bgp, prefix, info, + bgp->rfapi_cfg->rfg_redist); + else + zlog_debug ("%s: mode RFG but no redist RFG", __func__); + break; + + case VNC_REDIST_MODE_RESOLVE_NVE: + vnc_import_bgp_add_route_mode_resolve_nve (bgp, prefix, info); + break; + } +#if DEBUG_RHN_LIST + print_rhn_list(__func__, "LEAVE "); +#endif + VNC_RHNCK (leave); +} + +/* + * "Withdrawing a Route" import process + */ +void +vnc_import_bgp_del_route ( + struct bgp *bgp, + struct prefix *prefix, + struct bgp_info *info) /* unicast info */ +{ + afi_t afi = family2afi (prefix->family); + + assert (afi); + + { + struct prefix pfx_nexthop; + char buf[BUFSIZ]; + char buf_nh[BUFSIZ]; + + prefix2str (prefix, buf, BUFSIZ); + rfapiUnicastNexthop2Prefix (afi, info->attr, &pfx_nexthop); + prefix2str (&pfx_nexthop, buf_nh, BUFSIZ); + + zlog_debug ("%s: pfx %s, nh %s", __func__, buf, buf_nh); + } +#if DEBUG_RHN_LIST + print_rhn_list(__func__, "ENTER "); +#endif + VNC_RHNCK (enter); + + if (!bgp->rfapi_cfg) + { + zlog_debug ("%s: bgp->rfapi_cfg is NULL, skipping", __func__); + return; + } + + /* check bgp redist flag for vnc direct ("vpn") routes */ + if (!bgp->rfapi_cfg->redist[afi][ZEBRA_ROUTE_BGP_DIRECT]) + { + zlog_debug ("%s: bgp redistribution of afi=%d VNC direct routes is off", + __func__, afi); + return; + } + + switch (bgp->rfapi_cfg->redist_mode) + { + case VNC_REDIST_MODE_PLAIN: + vnc_import_bgp_del_route_mode_plain (bgp, prefix, info); + break; + + case VNC_REDIST_MODE_RFG: + if (bgp->rfapi_cfg->rfg_redist) + vnc_import_bgp_del_route_mode_nvegroup (bgp, prefix, info); + else + zlog_debug ("%s: mode RFG but no redist RFG", __func__); + break; + + case VNC_REDIST_MODE_RESOLVE_NVE: + vnc_import_bgp_del_route_mode_resolve_nve (bgp, afi, prefix, info); + break; + + } +#if DEBUG_RHN_LIST + print_rhn_list(__func__, "LEAVE "); +#endif + VNC_RHNCK (leave); +} + + +/*********************************************************************** + * Enable/Disable + ***********************************************************************/ + +void +vnc_import_bgp_redist_enable (struct bgp *bgp, afi_t afi) +{ + /* iterate over bgp unicast v4 and v6 routes, call vnc_import_bgp_add_route */ + + struct bgp_node *rn; + + zlog_debug ("%s: entry, afi=%d", __func__, afi); + + if (bgp->rfapi_cfg->redist[afi][ZEBRA_ROUTE_BGP_DIRECT]) + { + zlog_debug ("%s: already enabled for afi %d, skipping", __func__, afi); + return; + } + bgp->rfapi_cfg->redist[afi][ZEBRA_ROUTE_BGP_DIRECT] = 1; + + for (rn = bgp_table_top (bgp->rib[afi][SAFI_UNICAST]); + rn; rn = bgp_route_next (rn)) + { + + struct bgp_info *bi; + + for (bi = rn->info; bi; bi = bi->next) + { + + if (CHECK_FLAG (bi->flags, BGP_INFO_REMOVED)) + continue; + + vnc_import_bgp_add_route (bgp, &rn->p, bi); + } + } + zlog_debug ("%s: set redist[afi=%d][type=%d=ZEBRA_ROUTE_BGP_DIRECT] return", + __func__, afi, ZEBRA_ROUTE_BGP_DIRECT); +} + +void +vnc_import_bgp_exterior_redist_enable (struct bgp *bgp, afi_t afi) +{ + struct bgp *bgp_exterior; + struct bgp_node *rn; + + bgp_exterior = bgp->rfapi_cfg->redist_bgp_exterior_view; + + if (bgp->rfapi_cfg->redist[afi][ZEBRA_ROUTE_BGP_DIRECT_EXT]) + { + zlog_debug ("%s: already enabled for afi %d, skipping", __func__, afi); + return; + } + bgp->rfapi_cfg->redist[afi][ZEBRA_ROUTE_BGP_DIRECT_EXT] = 1; + + if (!bgp_exterior) + { + zlog_debug ("%s: no exterior view set yet, no routes to import yet", + __func__); + return; + } + + for (rn = bgp_table_top (bgp_exterior->rib[afi][SAFI_UNICAST]); + rn; rn = bgp_route_next (rn)) + { + + struct bgp_info *bi; + + for (bi = rn->info; bi; bi = bi->next) + { + + if (CHECK_FLAG (bi->flags, BGP_INFO_REMOVED)) + continue; + + vnc_import_bgp_exterior_add_route (bgp_exterior, &rn->p, bi); + } + } + zlog_debug ("%s: set redist[afi=%d][type=%d=ZEBRA_ROUTE_BGP_DIRECT] return", + __func__, afi, ZEBRA_ROUTE_BGP_DIRECT); + +} + +/* + * This function is for populating a newly-created Import Table + */ +void +vnc_import_bgp_exterior_redist_enable_it ( + struct bgp *bgp, + afi_t afi, + struct rfapi_import_table *it_only) +{ + struct bgp *bgp_exterior; + struct bgp_node *rn; + + zlog_debug ("%s: entry", __func__); + + bgp_exterior = bgp->rfapi_cfg->redist_bgp_exterior_view; + + if (!bgp->rfapi_cfg->redist[afi][ZEBRA_ROUTE_BGP_DIRECT_EXT]) + { + zlog_debug ("%s: not enabled for afi %d, skipping", __func__, afi); + return; + } + + if (!bgp_exterior) + { + zlog_debug ("%s: no exterior view set yet, no routes to import yet", + __func__); + return; + } + + for (rn = bgp_table_top (bgp_exterior->rib[afi][SAFI_UNICAST]); + rn; rn = bgp_route_next (rn)) + { + + struct bgp_info *bi; + + for (bi = rn->info; bi; bi = bi->next) + { + + if (CHECK_FLAG (bi->flags, BGP_INFO_REMOVED)) + continue; + + vnc_import_bgp_exterior_add_route_it (bgp_exterior, &rn->p, bi, + it_only); + } + } + +} + + +void +vnc_import_bgp_redist_disable (struct bgp *bgp, afi_t afi) +{ + /* + * iterate over vpn routes, find routes of type ZEBRA_ROUTE_BGP_DIRECT, + * delete (call timer expire immediately) + */ + struct bgp_node *rn1; + struct bgp_node *rn2; + + zlog_debug ("%s: entry", __func__); + + if (!bgp->rfapi_cfg->redist[afi][ZEBRA_ROUTE_BGP_DIRECT]) + { + zlog_debug ("%s: already disabled for afi %d, skipping", __func__, afi); + return; + } + + /* + * Two-level table for SAFI_MPLS_VPN + * Be careful when changing the things we iterate over + */ + for (rn1 = bgp_table_top (bgp->rib[afi][SAFI_MPLS_VPN]); + rn1; rn1 = bgp_route_next (rn1)) + { + + if (rn1->info) + { + for (rn2 = bgp_table_top (rn1->info); + rn2; rn2 = bgp_route_next (rn2)) + { + + struct bgp_info *bi; + struct bgp_info *nextbi; + + for (bi = rn2->info; bi; bi = nextbi) + { + + nextbi = bi->next; + + if (bi->type == ZEBRA_ROUTE_BGP_DIRECT) + { + + struct rfapi_descriptor *rfd; + vncHDBgpDirect.peer = bi->peer; + + rfd = bi->extra->vnc.export.rfapi_handle; + + zlog_debug + ("%s: deleting bi=%p, bi->peer=%p, bi->type=%d, bi->sub_type=%d, bi->extra->vnc.export.rfapi_handle=%p [passing rfd=%p]", + __func__, bi, bi->peer, bi->type, bi->sub_type, + (bi->extra ? bi->extra->vnc. + export.rfapi_handle : NULL), rfd); + + + del_vnc_route (rfd, bi->peer, bgp, SAFI_MPLS_VPN, &rn2->p, (struct prefix_rd *) &rn1->p, bi->type, bi->sub_type, NULL, 1); /* kill */ + + vncHDBgpDirect.peer = NULL; + } + } + } + } + } + /* Clear RHN list */ + if (bgp->rfapi->resolve_nve_nexthop) + { + struct prefix_bag *pb; + struct bgp_info *info; + while (!skiplist_first + (bgp->rfapi->resolve_nve_nexthop, NULL, (void *) &pb)) + { + info = pb->ubi; + skiplist_delete_first (bgp->rfapi->resolve_nve_nexthop); + bgp_info_unlock (info); + } + } + + bgp->rfapi_cfg->redist[afi][ZEBRA_ROUTE_BGP_DIRECT] = 0; + zlog_debug ("%s: return", __func__); +} + + +void +vnc_import_bgp_exterior_redist_disable (struct bgp *bgp, afi_t afi) +{ + struct rfapi_cfg *hc = bgp->rfapi_cfg; + struct bgp *bgp_exterior = hc->redist_bgp_exterior_view; + + zlog_debug ("%s: entry", __func__); + + if (!hc->redist[afi][ZEBRA_ROUTE_BGP_DIRECT_EXT]) + { + zlog_debug ("%s: already disabled for afi %d, skipping", __func__, afi); + return; + } + + if (!bgp_exterior) + { + zlog_debug ("%s: bgp exterior view not defined, skipping", __func__); + return; + } + + + { + struct bgp_node *rn; + for (rn = bgp_table_top (bgp_exterior->rib[afi][SAFI_UNICAST]); + rn; rn = bgp_route_next (rn)) + { + + struct bgp_info *bi; + + for (bi = rn->info; bi; bi = bi->next) + { + + if (CHECK_FLAG (bi->flags, BGP_INFO_REMOVED)) + continue; + + vnc_import_bgp_exterior_del_route (bgp_exterior, &rn->p, bi); + } + } +#if DEBUG_RHN_LIST + print_rhn_list (__func__, NULL); +#endif + } + + bgp->rfapi_cfg->redist[afi][ZEBRA_ROUTE_BGP_DIRECT_EXT] = 0; + zlog_debug ("%s: return", __func__); +} diff --git a/bgpd/rfapi/vnc_import_bgp.h b/bgpd/rfapi/vnc_import_bgp.h new file mode 100644 index 0000000000..db739e3320 --- /dev/null +++ b/bgpd/rfapi/vnc_import_bgp.h @@ -0,0 +1,93 @@ +/* + * + * Copyright 2009-2016, LabN Consulting, L.L.C. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#ifndef _QUAGGA_RFAPI_VNC_IMPORT_BGP_H_ +#define _QUAGGA_RFAPI_VNC_IMPORT_BGP_H_ + +#include "lib/zebra.h" +#include "lib/prefix.h" + +#include "bgpd/bgpd.h" +#include "bgpd/bgp_route.h" + +#define VALID_INTERIOR_TYPE(type) \ + (((type) == ZEBRA_ROUTE_BGP) || ((type) == ZEBRA_ROUTE_BGP_DIRECT)) + +extern uint32_t +calc_local_pref (struct attr *attr, struct peer *peer); + +extern int +vnc_prefix_cmp (void *pfx1, void *pfx2); + +extern void +vnc_import_bgp_add_route ( + struct bgp *bgp, + struct prefix *prefix, + struct bgp_info *info); + +extern void +vnc_import_bgp_del_route ( + struct bgp *bgp, + struct prefix *prefix, + struct bgp_info *info); + +extern void +vnc_import_bgp_redist_enable (struct bgp *bgp, afi_t afi); + +extern void +vnc_import_bgp_redist_disable (struct bgp *bgp, afi_t afi); + +extern void +vnc_import_bgp_exterior_redist_enable (struct bgp *bgp, afi_t afi); + +extern void +vnc_import_bgp_exterior_redist_disable (struct bgp *bgp, afi_t afi); + + +extern void +vnc_import_bgp_exterior_add_route ( + struct bgp *bgp, /* exterior instance, we hope */ + struct prefix *prefix,/* unicast prefix */ + struct bgp_info *info); /* unicast info */ + +extern void +vnc_import_bgp_exterior_del_route ( + struct bgp *bgp, + struct prefix *prefix,/* unicast prefix */ + struct bgp_info *info); /* unicast info */ + +extern void +vnc_import_bgp_add_vnc_host_route_mode_resolve_nve ( + struct bgp *bgp, + struct prefix_rd *prd, /* RD */ + struct bgp_table *table_rd, /* per-rd VPN route table */ + struct prefix *prefix, /* VPN prefix */ + struct bgp_info *bi); /* new VPN host route */ + +extern void +vnc_import_bgp_del_vnc_host_route_mode_resolve_nve ( + struct bgp *bgp, + struct prefix_rd *prd, /* RD */ + struct bgp_table *table_rd, /* per-rd VPN route table */ + struct prefix *prefix, /* VPN prefix */ + struct bgp_info *bi); /* old VPN host route */ + +#endif /* _QUAGGA_RFAPI_VNC_IMPORT_BGP_H_ */ diff --git a/bgpd/rfapi/vnc_import_bgp_p.h b/bgpd/rfapi/vnc_import_bgp_p.h new file mode 100644 index 0000000000..85800c1cab --- /dev/null +++ b/bgpd/rfapi/vnc_import_bgp_p.h @@ -0,0 +1,51 @@ +/* + * + * Copyright 2009-2016, LabN Consulting, L.L.C. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#ifndef _QUAGGA_RFAPI_VNC_IMPORT_BGP_P_H_ +#define _QUAGGA_RFAPI_VNC_IMPORT_BGP_P_H_ + +#include "lib/zebra.h" +#include "lib/prefix.h" + +#include "bgpd/bgpd.h" +#include "bgpd/bgp_route.h" + +extern void +vnc_import_bgp_exterior_add_route_interior ( + struct bgp *bgp, + struct rfapi_import_table *it, + struct route_node *rn_interior, /* VPN IT node */ + struct bgp_info *bi_interior); /* VPN IT route */ + +extern void +vnc_import_bgp_exterior_del_route_interior ( + struct bgp *bgp, + struct rfapi_import_table *it, + struct route_node *rn_interior, /* VPN IT node */ + struct bgp_info *bi_interior); /* VPN IT route */ + +extern void +vnc_import_bgp_exterior_redist_enable_it ( + struct bgp *bgp, + afi_t afi, + struct rfapi_import_table *it_only); + +#endif /* _QUAGGA_RFAPI_VNC_IMPORT_BGP_P_H_ */ diff --git a/bgpd/rfapi/vnc_zebra.c b/bgpd/rfapi/vnc_zebra.c new file mode 100644 index 0000000000..e357ef6eff --- /dev/null +++ b/bgpd/rfapi/vnc_zebra.c @@ -0,0 +1,1117 @@ +/* + * + * Copyright 2009-2016, LabN Consulting, L.L.C. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +/* + * File: vnc_zebra.c + * Purpose: Handle exchange of routes between VNC and Zebra + */ + +#include "lib/zebra.h" +#include "lib/prefix.h" +#include "lib/table.h" +#include "lib/log.h" +#include "lib/command.h" +#include "lib/zclient.h" +#include "lib/stream.h" +#include "lib/memory.h" + +#include "bgpd/bgpd.h" +#include "bgpd/bgp_ecommunity.h" +#include "bgpd/bgp_route.h" +#include "bgpd/bgp_debug.h" +#include "bgpd/bgp_advertise.h" + +#include "bgpd/rfapi/bgp_rfapi_cfg.h" +#include "bgpd/rfapi/rfapi.h" +#include "bgpd/rfapi/rfapi_import.h" +#include "bgpd/rfapi/rfapi_private.h" +#include "bgpd/rfapi/vnc_zebra.h" +#include "bgpd/rfapi/rfapi_vty.h" +#include "bgpd/rfapi/rfapi_backend.h" + +static struct rfapi_descriptor vncHD1VR; /* Single-VR export dummy nve descr */ +static struct zclient *zclient_vnc = NULL; + +/*********************************************************************** + * REDISTRIBUTE: Zebra sends updates/withdraws to BGPD + ***********************************************************************/ + +/* + * Routes coming from zebra get added to VNC here + */ +static void +vnc_redistribute_add ( + struct prefix *p, + struct in_addr *nexthop, + u_int32_t metric, + uint8_t type) +{ + struct bgp *bgp = bgp_get_default (); + struct prefix_rd prd; + struct rfapi_ip_addr vnaddr; + afi_t afi; + uint32_t local_pref = rfp_cost_to_localpref (metric > 255 ? 255 : metric); + + if (!bgp) + return; + + if (!bgp->rfapi_cfg) + { + zlog_debug ("%s: bgp->rfapi_cfg is NULL, skipping", __func__); + return; + } + + afi = family2afi (p->family); + if (!afi) + { + zlog_debug ("%s: unknown prefix address family %d", __func__, + p->family); + return; + } + + if (!bgp->rfapi_cfg->redist[afi][type]) + { + zlog_debug + ("%s: bgp->rfapi_cfg->redist[afi=%d][type=%d] is 0, skipping", + __func__, afi, type); + return; + } + if (!bgp->rfapi_cfg->rfg_redist) + { + zlog_debug ("%s: no redist nve group, skipping", __func__); + return; + } + + /* + * Assume nve group's configured VN address prefix is a host + * route which also happens to give the NVE VN address to use + * for redistributing into VNC. + */ + vnaddr.addr_family = bgp->rfapi_cfg->rfg_redist->vn_prefix.family; + switch (bgp->rfapi_cfg->rfg_redist->vn_prefix.family) + { + case AF_INET: + if (bgp->rfapi_cfg->rfg_redist->vn_prefix.prefixlen != 32) + { + zlog_debug + ("%s: redist nve group VN prefix len (%d) != 32, skipping", + __func__, bgp->rfapi_cfg->rfg_redist->vn_prefix.prefixlen); + return; + } + vnaddr.addr.v4 = bgp->rfapi_cfg->rfg_redist->vn_prefix.u.prefix4; + break; + case AF_INET6: + if (bgp->rfapi_cfg->rfg_redist->vn_prefix.prefixlen != 128) + { + zlog_debug + ("%s: redist nve group VN prefix len (%d) != 128, skipping", + __func__, bgp->rfapi_cfg->rfg_redist->vn_prefix.prefixlen); + return; + } + vnaddr.addr.v6 = bgp->rfapi_cfg->rfg_redist->vn_prefix.u.prefix6; + break; + default: + zlog_debug + ("%s: no redist nve group VN host prefix configured, skipping", + __func__); + return; + } + + /* + * Assume nve group's configured UN address prefix is a host + * route which also happens to give the NVE UN address to use + * for redistributing into VNC. + */ + + /* + * Set UN address in dummy nve descriptor so add_vnc_route + * can use it in VNC tunnel SubTLV + */ + { + struct rfapi_ip_prefix pfx_un; + + rfapiQprefix2Rprefix (&bgp->rfapi_cfg->rfg_redist->un_prefix, &pfx_un); + + switch (pfx_un.prefix.addr_family) + { + case AF_INET: + if (pfx_un.length != 32) + { + zlog_debug + ("%s: redist nve group UN prefix len (%d) != 32, skipping", + __func__, pfx_un.length); + return; + } + break; + case AF_INET6: + if (pfx_un.length != 128) + { + zlog_debug + ("%s: redist nve group UN prefix len (%d) != 128, skipping", + __func__, pfx_un.length); + return; + } + break; + default: + zlog_debug + ("%s: no redist nve group UN host prefix configured, skipping", + __func__); + return; + } + + vncHD1VR.un_addr = pfx_un.prefix; + + if (!vncHD1VR.peer) + { + /* + * Same setup as in rfapi_open() + */ + vncHD1VR.peer = peer_new (bgp); + vncHD1VR.peer->status = Established; /* keep bgp core happy */ + bgp_sync_delete (vncHD1VR.peer); /* don't need these */ + if (vncHD1VR.peer->ibuf) + { + stream_free (vncHD1VR.peer->ibuf); /* don't need it */ + vncHD1VR.peer->ibuf = NULL; + } + if (vncHD1VR.peer->obuf) + { + stream_fifo_free (vncHD1VR.peer->obuf); /* don't need it */ + vncHD1VR.peer->obuf = NULL; + } + if (vncHD1VR.peer->work) + { + stream_free (vncHD1VR.peer->work); /* don't need it */ + vncHD1VR.peer->work = NULL; + } + /* base code assumes have valid host pointer */ + vncHD1VR.peer->host = XSTRDUP (MTYPE_BGP_PEER_HOST, ".zebra."); + + /* Mark peer as belonging to HD */ + SET_FLAG (vncHD1VR.peer->flags, PEER_FLAG_IS_RFAPI_HD); + } + } + + memset (&prd, 0, sizeof (prd)); + prd = bgp->rfapi_cfg->rfg_redist->rd; + prd.family = AF_UNSPEC; + prd.prefixlen = 64; + + add_vnc_route (&vncHD1VR, /* cookie + UN addr */ + bgp, SAFI_MPLS_VPN, p, &prd, &vnaddr, &local_pref, &(bgp->rfapi_cfg->redist_lifetime), NULL, /* RFP options */ + NULL, /* struct rfapi_un_option */ + NULL, /* struct rfapi_vn_option */ + bgp->rfapi_cfg->rfg_redist->rt_export_list, NULL, NULL, /* label: default */ + type, BGP_ROUTE_REDISTRIBUTE, 0); /* flags */ +} + +/* + * Route deletions from zebra propagate to VNC here + */ +static void +vnc_redistribute_delete (struct prefix *p, uint8_t type) +{ + struct bgp *bgp = bgp_get_default (); + struct prefix_rd prd; + afi_t afi; + + if (!bgp) + return; + + if (!bgp->rfapi_cfg) + { + zlog_debug ("%s: bgp->rfapi_cfg is NULL, skipping", __func__); + return; + } + afi = family2afi (p->family); + if (!afi) + { + zlog_debug ("%s: unknown prefix address family %d", __func__, + p->family); + return; + } + if (!bgp->rfapi_cfg->redist[afi][type]) + { + zlog_debug + ("%s: bgp->rfapi_cfg->redist[afi=%d][type=%d] is 0, skipping", + __func__, afi, type); + return; + } + if (!bgp->rfapi_cfg->rfg_redist) + { + zlog_debug ("%s: no redist nve group, skipping", __func__); + return; + } + + memset (&prd, 0, sizeof (prd)); + prd = bgp->rfapi_cfg->rfg_redist->rd; + prd.family = AF_UNSPEC; + prd.prefixlen = 64; + + del_vnc_route (&vncHD1VR, /* use dummy ptr as cookie */ + vncHD1VR.peer, + bgp, + SAFI_MPLS_VPN, + p, &prd, type, BGP_ROUTE_REDISTRIBUTE, NULL, 0); +} + +/* + * Flush all redistributed routes of type + */ +static void +vnc_redistribute_withdraw (struct bgp *bgp, afi_t afi, uint8_t type) +{ + struct prefix_rd prd; + struct bgp_table *table; + struct bgp_node *prn; + struct bgp_node *rn; + + zlog_debug ("%s: entry", __func__); + + if (!bgp) + return; + if (!bgp->rfapi_cfg) + { + zlog_debug ("%s: bgp->rfapi_cfg is NULL, skipping", __func__); + return; + } + + /* + * Loop over all the RDs + */ + for (prn = bgp_table_top (bgp->rib[afi][SAFI_MPLS_VPN]); prn; + prn = bgp_route_next (prn)) + { + memset (&prd, 0, sizeof (prd)); + prd.family = AF_UNSPEC; + prd.prefixlen = 64; + memcpy (prd.val, prn->p.u.val, 8); + + /* This is the per-RD table of prefixes */ + table = prn->info; + + for (rn = bgp_table_top (table); rn; rn = bgp_route_next (rn)) + { + + struct bgp_info *ri; + + for (ri = rn->info; ri; ri = ri->next) + { + if (ri->type == type) + { /* has matching redist type */ + break; + } + } + if (ri) + { + del_vnc_route (&vncHD1VR, /* use dummy ptr as cookie */ + vncHD1VR.peer, + bgp, + SAFI_MPLS_VPN, + &(rn->p), + &prd, type, BGP_ROUTE_REDISTRIBUTE, NULL, 0); + } + } + } + zlog_debug ("%s: return", __func__); +} + +/* + * Zebra route add and delete treatment. + * + * Assumes 1 nexthop + */ +static int +vnc_zebra_read_ipv4 ( + int command, + struct zclient *zclient, + zebra_size_t length, + vrf_id_t vrf_id) +{ + struct stream *s; + struct zapi_ipv4 api; + struct in_addr nexthop; + struct prefix_ipv4 p; + + s = zclient->ibuf; + nexthop.s_addr = 0; + + /* Type, flags, message. */ + api.type = stream_getc (s); + api.flags = stream_getc (s); + api.message = stream_getc (s); + + /* IPv4 prefix. */ + memset (&p, 0, sizeof (struct prefix_ipv4)); + p.family = AF_INET; + p.prefixlen = stream_getc (s); + stream_get (&p.prefix, s, PSIZE (p.prefixlen)); + + /* Nexthop, ifindex, distance, metric. */ + if (CHECK_FLAG (api.message, ZAPI_MESSAGE_NEXTHOP)) + { + api.nexthop_num = stream_getc (s); + nexthop.s_addr = stream_get_ipv4 (s); + } + if (CHECK_FLAG (api.message, ZAPI_MESSAGE_IFINDEX)) + { + api.ifindex_num = stream_getc (s); + stream_getl (s); + } + if (CHECK_FLAG (api.message, ZAPI_MESSAGE_DISTANCE)) + api.distance = stream_getc (s); + if (CHECK_FLAG (api.message, ZAPI_MESSAGE_METRIC)) + api.metric = stream_getl (s); + else + api.metric = 0; + + if (command == ZEBRA_IPV4_ROUTE_ADD) + { + if (BGP_DEBUG (zebra, ZEBRA)) + { + char buf[2][INET_ADDRSTRLEN]; + zlog_debug + ("%s: Zebra rcvd: IPv4 route add %s %s/%d nexthop %s metric %u", + __func__, zebra_route_string (api.type), inet_ntop (AF_INET, + &p.prefix, + buf[0], + sizeof (buf + [0])), + p.prefixlen, inet_ntop (AF_INET, &nexthop, buf[1], + sizeof (buf[1])), api.metric); + } + vnc_redistribute_add ((struct prefix *) &p, &nexthop, api.metric, + api.type); + } + else + { + if (BGP_DEBUG (zebra, ZEBRA)) + { + char buf[2][INET_ADDRSTRLEN]; + zlog_debug ("%s: Zebra rcvd: IPv4 route delete %s %s/%d " + "nexthop %s metric %u", + __func__, + zebra_route_string (api.type), + inet_ntop (AF_INET, &p.prefix, buf[0], sizeof (buf[0])), + p.prefixlen, + inet_ntop (AF_INET, &nexthop, buf[1], sizeof (buf[1])), + api.metric); + } + vnc_redistribute_delete ((struct prefix *) &p, api.type); + } + + return 0; +} + +/* Zebra route add and delete treatment. */ +static int +vnc_zebra_read_ipv6 ( + int command, + struct zclient *zclient, + zebra_size_t length, + vrf_id_t vrf_id) +{ + struct stream *s; + struct zapi_ipv6 api; + struct in6_addr nexthop; + struct prefix_ipv6 p; + + s = zclient->ibuf; + memset (&nexthop, 0, sizeof (struct in6_addr)); + + /* Type, flags, message. */ + api.type = stream_getc (s); + api.flags = stream_getc (s); + api.message = stream_getc (s); + + /* IPv6 prefix. */ + memset (&p, 0, sizeof (struct prefix_ipv6)); + p.family = AF_INET6; + p.prefixlen = stream_getc (s); + stream_get (&p.prefix, s, PSIZE (p.prefixlen)); + + /* Nexthop, ifindex, distance, metric. */ + if (CHECK_FLAG (api.message, ZAPI_MESSAGE_NEXTHOP)) + { + api.nexthop_num = stream_getc (s); + stream_get (&nexthop, s, 16); + } + if (CHECK_FLAG (api.message, ZAPI_MESSAGE_IFINDEX)) + { + api.ifindex_num = stream_getc (s); + stream_getl (s); + } + if (CHECK_FLAG (api.message, ZAPI_MESSAGE_DISTANCE)) + api.distance = stream_getc (s); + else + api.distance = 0; + if (CHECK_FLAG (api.message, ZAPI_MESSAGE_METRIC)) + api.metric = stream_getl (s); + else + api.metric = 0; + + /* Simply ignore link-local address. */ + if (IN6_IS_ADDR_LINKLOCAL (&p.prefix)) + return 0; + + if (command == ZEBRA_IPV6_ROUTE_ADD) + { + if (BGP_DEBUG (zebra, ZEBRA)) + { + char buf[INET6_ADDRSTRLEN]; + zlog_debug ("Zebra rcvd: IPv6 route add %s %s/%d metric %u", + zebra_route_string (api.type), + inet_ntop (AF_INET6, &p.prefix, buf, sizeof (buf)), + p.prefixlen, api.metric); + } + vnc_redistribute_add ((struct prefix *) &p, NULL, api.metric, api.type); + } + else + { + if (BGP_DEBUG (zebra, ZEBRA)) + { + char buf[INET6_ADDRSTRLEN]; + zlog_debug ("Zebra rcvd: IPv6 route delete %s %s/%d metric %u", + zebra_route_string (api.type), + inet_ntop (AF_INET6, &p.prefix, buf, sizeof (buf)), + p.prefixlen, api.metric); + } + vnc_redistribute_delete ((struct prefix *) &p, api.type); + } + + return 0; +} + +/*********************************************************************** + * vnc_bgp_zebra_*: VNC sends updates/withdraws to Zebra + ***********************************************************************/ + +/* + * low-level message builder + */ +static void +vnc_zebra_route_msg ( + struct prefix *p, + int nhp_count, + void *nhp_ary, + int add) /* 1 = add, 0 = del */ +{ + if (!nhp_count) + { + zlog_debug ("%s: empty nexthop list, skipping", __func__); + return; + } + + if (p->family == AF_INET) + { + + struct zapi_ipv4 api; + + api.flags = 0; + api.vrf_id = VRF_DEFAULT; + api.type = ZEBRA_ROUTE_VNC; + api.message = 0; + SET_FLAG (api.message, ZAPI_MESSAGE_NEXTHOP); /* TBD what's it mean? */ + api.nexthop_num = nhp_count; + api.nexthop = nhp_ary; + api.ifindex_num = 0; + + if (BGP_DEBUG (zebra, ZEBRA)) + { + + char buf[INET_ADDRSTRLEN]; + zlog_debug ("%s: Zebra send: IPv4 route %s %s/%d, nhp_count=%d", + __func__, + (add ? "add" : "del"), + inet_ntop (AF_INET, &p->u.prefix4, buf, sizeof (buf)), + p->prefixlen, nhp_count); + } + + zapi_ipv4_route ((add ? ZEBRA_IPV4_NEXTHOP_ADD : + ZEBRA_IPV4_NEXTHOP_DELETE), zclient_vnc, + (struct prefix_ipv4 *) p, &api); + + } + else if (p->family == AF_INET6) + { + + struct zapi_ipv6 api; + ifindex_t ifindex = 0; + + /* Make Zebra API structure. */ + api.flags = 0; + api.vrf_id = VRF_DEFAULT; + api.type = ZEBRA_ROUTE_VNC; + api.message = 0; + SET_FLAG (api.message, ZAPI_MESSAGE_NEXTHOP); /* TBD means? */ + api.nexthop_num = nhp_count; + api.nexthop = nhp_ary; + SET_FLAG (api.message, ZAPI_MESSAGE_IFINDEX); + api.ifindex_num = 1; + api.ifindex = &ifindex; + + if (BGP_DEBUG (zebra, ZEBRA)) + { + + char buf[INET6_ADDRSTRLEN]; + zlog_debug ("%s: Zebra send: IPv6 route %s %s/%d nhp_count=%d", + __func__, + (add ? "add" : "del"), + inet_ntop (AF_INET6, &p->u.prefix6, buf, sizeof (buf)), + p->prefixlen, nhp_count); + } + + zapi_ipv6_route ((add ? ZEBRA_IPV6_NEXTHOP_ADD : + ZEBRA_IPV6_NEXTHOP_DELETE), zclient_vnc, + (struct prefix_ipv6 *) p, &api); + } + else + { + zlog_debug ("%s: unknown prefix address family, skipping", __func__); + return; + } +} + + +static void +nve_list_to_nh_array ( + u_char family, + struct list *nve_list, + int *nh_count_ret, + void **nh_ary_ret, /* returned address array */ + void **nhp_ary_ret) /* returned pointer array */ +{ + int nve_count = listcount (nve_list); + + *nh_count_ret = 0; + *nh_ary_ret = NULL; + *nhp_ary_ret = NULL; + + if (!nve_count) + { + zlog_debug ("%s: empty nve_list, skipping", __func__); + return; + } + + if (family == AF_INET) + { + struct listnode *ln; + struct in_addr *iap; + struct in_addr **v; + + /* + * Array of nexthop addresses + */ + *nh_ary_ret = XCALLOC (MTYPE_TMP, nve_count * sizeof (struct in_addr)); + + /* + * Array of pointers to nexthop addresses + */ + *nhp_ary_ret = + XCALLOC (MTYPE_TMP, nve_count * sizeof (struct in_addr *)); + iap = *nh_ary_ret; + v = *nhp_ary_ret; + + for (ln = listhead (nve_list); ln; ln = listnextnode (ln)) + { + + struct rfapi_descriptor *irfd; + struct prefix nhp; + + irfd = listgetdata (ln); + + if (rfapiRaddr2Qprefix (&irfd->vn_addr, &nhp)) + continue; + + *iap = nhp.u.prefix4; + *v = iap; + zlog_debug ("%s: ipadr: (%p)<-0x%x, ptr: (%p)<-%p", + __func__, iap, nhp.u.prefix4.s_addr, v, iap); + + ++iap; + ++v; + ++*nh_count_ret; + } + + } + else if (family == AF_INET6) + { + + struct listnode *ln; + + *nh_ary_ret = XCALLOC (MTYPE_TMP, nve_count * sizeof (struct in6_addr)); + + *nhp_ary_ret = XCALLOC (MTYPE_TMP, + nve_count * sizeof (struct in6_addr *)); + + for (ln = listhead (nve_list); ln; ln = listnextnode (ln)) + { + + struct rfapi_descriptor *irfd; + struct in6_addr *iap = *nh_ary_ret; + struct in6_addr **v = *nhp_ary_ret; + struct prefix nhp; + + irfd = listgetdata (ln); + + if (rfapiRaddr2Qprefix (&irfd->vn_addr, &nhp)) + continue; + + *iap = nhp.u.prefix6; + *v = iap; + + ++iap; + ++v; + ++*nh_count_ret; + } + } +} + +static void +import_table_to_nve_list_zebra ( + struct bgp *bgp, + struct rfapi_import_table *it, + struct list **nves, + uint8_t family) +{ + struct listnode *node; + struct rfapi_rfg_name *rfgn; + + /* + * Loop over the list of NVE-Groups configured for + * exporting to direct-bgp. + * + * Build a list of NVEs that use this import table + */ + *nves = NULL; + for (ALL_LIST_ELEMENTS_RO (bgp->rfapi_cfg->rfg_export_zebra_l, node, rfgn)) + { + + /* + * If this NVE-Group's import table matches the current one + */ + if (rfgn->rfg && rfgn->rfg->nves && rfgn->rfg->rfapi_import_table == it) + { + + nve_group_to_nve_list (rfgn->rfg, nves, family); + } + } +} + +static void +vnc_zebra_add_del_prefix ( + struct bgp *bgp, + struct rfapi_import_table *import_table, + struct route_node *rn, + int add) /* !0 = add, 0 = del */ +{ + struct list *nves; + + int nexthop_count = 0; + void *nh_ary = NULL; + void *nhp_ary = NULL; + + zlog_debug ("%s: entry, add=%d", __func__, add); + + if (zclient_vnc->sock < 0) + return; + + if (rn->p.family != AF_INET + && rn->p.family != AF_INET6) + { + zlog_err ("%s: invalid route node addr family", __func__); + return; + } + + if (!zclient_vnc->redist[family2afi(rn->p.family)][ZEBRA_ROUTE_VNC]) + return; + + if (!bgp->rfapi_cfg) + { + zlog_debug ("%s: bgp->rfapi_cfg is NULL, skipping", __func__); + return; + } + if (!listcount (bgp->rfapi_cfg->rfg_export_zebra_l)) + { + zlog_debug ("%s: no zebra export nve group, skipping", __func__); + return; + } + + import_table_to_nve_list_zebra (bgp, import_table, &nves, rn->p.family); + + if (nves) + { + nve_list_to_nh_array (rn->p.family, + nves, &nexthop_count, &nh_ary, &nhp_ary); + + list_delete (nves); + + if (nexthop_count) + vnc_zebra_route_msg (&rn->p, nexthop_count, nhp_ary, add); + } + + if (nhp_ary) + XFREE (MTYPE_TMP, nhp_ary); + if (nh_ary) + XFREE (MTYPE_TMP, nh_ary); +} + +void +vnc_zebra_add_prefix ( + struct bgp *bgp, + struct rfapi_import_table *import_table, + struct route_node *rn) +{ + vnc_zebra_add_del_prefix (bgp, import_table, rn, 1); +} + +void +vnc_zebra_del_prefix ( + struct bgp *bgp, + struct rfapi_import_table *import_table, + struct route_node *rn) +{ + vnc_zebra_add_del_prefix (bgp, import_table, rn, 0); +} + + + +static void +vnc_zebra_add_del_nve ( + struct bgp *bgp, + struct rfapi_descriptor *rfd, + int add) /* 0 = del, !0 = add */ +{ + struct listnode *node; + struct rfapi_rfg_name *rfgn; + struct rfapi_nve_group_cfg *rfg = rfd->rfg; + afi_t afi = family2afi (rfd->vn_addr.addr_family); + struct prefix nhp; +// struct prefix *nhpp; + void *pAddr; + + zlog_debug ("%s: entry, add=%d", __func__, add); + + if (zclient_vnc->sock < 0) + return; + + if (!zclient_vnc->redist[afi][ZEBRA_ROUTE_VNC]) + return; + + if (afi != AFI_IP && afi != AFI_IP6) + { + zlog_err ("%s: invalid vn addr family", __func__); + return; + } + + if (!bgp) + return; + if (!bgp->rfapi_cfg) + { + zlog_debug ("%s: bgp->rfapi_cfg is NULL, skipping", __func__); + return; + } + + if (rfapiRaddr2Qprefix (&rfd->vn_addr, &nhp)) + { + zlog_debug ("%s: can't convert vn address, skipping", __func__); + return; + } + + pAddr = &nhp.u.prefix4; + + /* + * Loop over the list of NVE-Groups configured for + * exporting to zebra and see if this new NVE's + * group is among them. + */ + for (ALL_LIST_ELEMENTS_RO (bgp->rfapi_cfg->rfg_export_zebra_l, node, rfgn)) + { + + /* + * Yes, this NVE's group is configured for export to zebra + */ + if (rfgn->rfg == rfg) + { + + struct route_table *rt = NULL; + struct route_node *rn; + struct rfapi_import_table *import_table; + import_table = rfg->rfapi_import_table; + + zlog_debug ("%s: this nve's group is in zebra export list", + __func__); + + rt = import_table->imported_vpn[afi]; + + /* + * Walk the NVE-Group's VNC Import table + */ + for (rn = route_top (rt); rn; rn = route_next (rn)) + { + + if (rn->info) + { + + zlog_debug ("%s: sending %s", __func__, + (add ? "add" : "del")); + vnc_zebra_route_msg (&rn->p, 1, &pAddr, add); + } + } + } + } +} + +void +vnc_zebra_add_nve (struct bgp *bgp, struct rfapi_descriptor *rfd) +{ + vnc_zebra_add_del_nve (bgp, rfd, 1); +} + +void +vnc_zebra_del_nve (struct bgp *bgp, struct rfapi_descriptor *rfd) +{ + vnc_zebra_add_del_nve (bgp, rfd, 0); +} + +static void +vnc_zebra_add_del_group_afi ( + struct bgp *bgp, + struct rfapi_nve_group_cfg *rfg, + afi_t afi, + int add) +{ + struct route_table *rt = NULL; + struct route_node *rn; + struct rfapi_import_table *import_table; + uint8_t family = afi2family (afi); + + struct list *nves = NULL; + int nexthop_count = 0; + void *nh_ary = NULL; + void *nhp_ary = NULL; + + zlog_debug ("%s: entry", __func__); + import_table = rfg->rfapi_import_table; + if (!import_table) + { + zlog_debug ("%s: import table not defined, returning", __func__); + return; + } + + if (afi == AFI_IP + || afi == AFI_IP6) + { + rt = import_table->imported_vpn[afi]; + } + else + { + zlog_err ("%s: bad afi %d", __func__, afi); + return; + } + + if (!family) + { + zlog_err ("%s: computed bad family: %d", __func__, family); + return; + } + + if (!rfg->nves) + { + /* avoid segfault below if list doesn't exist */ + zlog_debug ("%s: no NVEs in this group", __func__); + return; + } + + nve_group_to_nve_list (rfg, &nves, family); + if (nves) + { + zlog_debug ("%s: have nves", __func__); + nve_list_to_nh_array (family, nves, &nexthop_count, &nh_ary, &nhp_ary); + + zlog_debug ("%s: family: %d, nve count: %d", __func__, family, + nexthop_count); + + list_delete (nves); + + if (nexthop_count) + { + /* + * Walk the NVE-Group's VNC Import table + */ + for (rn = route_top (rt); rn; rn = route_next (rn)) + { + if (rn->info) + { + vnc_zebra_route_msg (&rn->p, nexthop_count, nhp_ary, add); + } + } + } + if (nhp_ary) + XFREE (MTYPE_TMP, nhp_ary); + if (nh_ary) + XFREE (MTYPE_TMP, nh_ary); + } +} + +void +vnc_zebra_add_group (struct bgp *bgp, struct rfapi_nve_group_cfg *rfg) +{ + vnc_zebra_add_del_group_afi (bgp, rfg, AFI_IP, 1); + vnc_zebra_add_del_group_afi (bgp, rfg, AFI_IP6, 1); +} + +void +vnc_zebra_del_group (struct bgp *bgp, struct rfapi_nve_group_cfg *rfg) +{ + zlog_debug ("%s: entry", __func__); + vnc_zebra_add_del_group_afi (bgp, rfg, AFI_IP, 0); + vnc_zebra_add_del_group_afi (bgp, rfg, AFI_IP6, 0); +} + +void +vnc_zebra_reexport_group_afi ( + struct bgp *bgp, + struct rfapi_nve_group_cfg *rfg, + afi_t afi) +{ + struct listnode *node; + struct rfapi_rfg_name *rfgn; + + for (ALL_LIST_ELEMENTS_RO (bgp->rfapi_cfg->rfg_export_zebra_l, node, rfgn)) + { + + if (rfgn->rfg == rfg) + { + vnc_zebra_add_del_group_afi (bgp, rfg, afi, 0); + vnc_zebra_add_del_group_afi (bgp, rfg, afi, 1); + break; + } + } +} + + +/*********************************************************************** + * CONTROL INTERFACE + ***********************************************************************/ + + +/* Other routes redistribution into BGP. */ +int +vnc_redistribute_set (struct bgp *bgp, afi_t afi, int type) +{ + if (!bgp->rfapi_cfg) + { + return CMD_WARNING; + } + + /* Set flag to BGP instance. */ + bgp->rfapi_cfg->redist[afi][type] = 1; + +// bgp->redist[afi][type] = 1; + + /* Return if already redistribute flag is set. */ + if (zclient_vnc->redist[afi][type]) + return CMD_WARNING; + + vrf_bitmap_set (zclient_vnc->redist[afi][type], VRF_DEFAULT); + + //zclient_vnc->redist[afi][type] = 1; + + /* Return if zebra connection is not established. */ + if (zclient_vnc->sock < 0) + return CMD_WARNING; + + if (BGP_DEBUG (zebra, ZEBRA)) + zlog_debug ("Zebra send: redistribute add %s", zebra_route_string (type)); + + /* Send distribute add message to zebra. */ + zebra_redistribute_send (ZEBRA_REDISTRIBUTE_ADD, zclient_vnc, afi, type, 0, VRF_DEFAULT); + + return CMD_SUCCESS; +} + +/* Unset redistribution. */ +int +vnc_redistribute_unset (struct bgp *bgp, afi_t afi, int type) +{ + zlog_debug ("%s: type=%d entry", __func__, type); + + if (!bgp->rfapi_cfg) + { + zlog_debug ("%s: return (no rfapi_cfg)", __func__); + return CMD_WARNING; + } + + /* Unset flag from BGP instance. */ + bgp->rfapi_cfg->redist[afi][type] = 0; + + /* Return if zebra connection is disabled. */ + if (!zclient_vnc->redist[afi][type]) + return CMD_WARNING; + zclient_vnc->redist[afi][type] = 0; + + if (bgp->rfapi_cfg->redist[AFI_IP][type] == 0 + && bgp->rfapi_cfg->redist[AFI_IP6][type] == 0 && zclient_vnc->sock >= 0) + { + /* Send distribute delete message to zebra. */ + if (BGP_DEBUG (zebra, ZEBRA)) + zlog_debug ("Zebra send: redistribute delete %s", + zebra_route_string (type)); + zebra_redistribute_send (ZEBRA_REDISTRIBUTE_DELETE, zclient_vnc, afi, type, + 0, VRF_DEFAULT); + } + + /* Withdraw redistributed routes from current BGP's routing table. */ + vnc_redistribute_withdraw (bgp, afi, type); + + zlog_debug ("%s: return", __func__); + + return CMD_SUCCESS; +} + + +/* + * Modeled after bgp_zebra.c'bgp_zebra_init() + * Charriere asks, "Is it possible to carry two?" + */ +void +vnc_zebra_init (struct thread_master *master) +{ + /* Set default values. */ + zclient_vnc = zclient_new (master); + zclient_init (zclient_vnc, ZEBRA_ROUTE_VNC, 0); + + zclient_vnc->redistribute_route_ipv4_add = vnc_zebra_read_ipv4; + zclient_vnc->redistribute_route_ipv4_del = vnc_zebra_read_ipv4; + zclient_vnc->redistribute_route_ipv6_add = vnc_zebra_read_ipv6; + zclient_vnc->redistribute_route_ipv6_del = vnc_zebra_read_ipv6; +} + +void +vnc_zebra_destroy (void) +{ + if (zclient_vnc == NULL) + return; + zclient_stop (zclient_vnc); + zclient_free (zclient_vnc); + zclient_vnc = NULL; +} diff --git a/bgpd/rfapi/vnc_zebra.h b/bgpd/rfapi/vnc_zebra.h new file mode 100644 index 0000000000..ad24844423 --- /dev/null +++ b/bgpd/rfapi/vnc_zebra.h @@ -0,0 +1,67 @@ +/* + * + * Copyright 2009-2016, LabN Consulting, L.L.C. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +/* + * File: vnc_zebra.h + */ + +#ifndef _QUAGGA_BGP_VNC_ZEBRA_H +#define _QUAGGA_BGP_VNC_ZEBRA_H + +#include "lib/zebra.h" + +extern void +vnc_zebra_add_prefix ( + struct bgp *bgp, + struct rfapi_import_table *import_table, + struct route_node *rn); + +extern void +vnc_zebra_del_prefix ( + struct bgp *bgp, + struct rfapi_import_table *import_table, + struct route_node *rn); + +extern void +vnc_zebra_add_nve (struct bgp *bgp, struct rfapi_descriptor *rfd); + +extern void +vnc_zebra_del_nve (struct bgp *bgp, struct rfapi_descriptor *rfd); + +extern void +vnc_zebra_add_group (struct bgp *bgp, struct rfapi_nve_group_cfg *rfg); + +extern void +vnc_zebra_del_group (struct bgp *bgp, struct rfapi_nve_group_cfg *rfg); + +extern void +vnc_zebra_reexport_group_afi ( + struct bgp *bgp, + struct rfapi_nve_group_cfg *rfg, + afi_t afi); + +extern int +vnc_redistribute_set (struct bgp *bgp, afi_t afi, int type); + +extern int +vnc_redistribute_unset (struct bgp *bgp, afi_t afi, int type); + +#endif /* _QUAGGA_BGP_VNC_ZEBRA_H */ diff --git a/bgpd/rfp-example/librfp/Makefile.am b/bgpd/rfp-example/librfp/Makefile.am new file mode 100644 index 0000000000..fc66a40f00 --- /dev/null +++ b/bgpd/rfp-example/librfp/Makefile.am @@ -0,0 +1,40 @@ +# +# This file has been modified by LabN Consulting, L.L.C. +# +# +## Process this file with automake to produce Makefile.in. + +if ENABLE_BGP_VNC +BGP_VNC_RFAPI_INC=-I$(top_srcdir)/bgpd/rfapi +BGP_VNC_RFP_LIBDIR=. +BGP_VNC_RFP_INCDIR=$(BGP_VNC_RFP_LIBDIR) +BGP_VNC_RFP_LIB=librfp.a +BGP_VNC_RFP_INC=-I$(BGP_VNC_RFP_INCDIR) + +librfp_a_SOURCES = \ + rfp_example.c + +librfp_a_INCLUDES = \ + rfp.h \ + rfp_internal.h + +else +BGP_VNC_RFAPI_INC= +BGP_VNC_RFAPI_SRC= +BGP_VNC_RFP_LIB= +BGP_VNC_RFP_INC= +endif + +AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/lib \ + -I$(top_builddir) -I$(top_builddir)/lib \ + $(BGP_VNC_RFAPI_INC) $(BGP_VNC_RFP_INC) +DEFS = @DEFS@ -DSYSCONFDIR=\"$(sysconfdir)/\" +INSTALL_SDATA=@INSTALL@ -m 600 + +AM_CFLAGS = $(PICFLAGS) +AM_LDFLAGS = $(PILDFLAGS) + +noinst_LIBRARIES = $(BGP_VNC_RFP_LIB) + +noinst_HEADERS = \ + $(librfp_a_INCLUDES) diff --git a/bgpd/rfp-example/librfp/rfp.h b/bgpd/rfp-example/librfp/rfp.h new file mode 100644 index 0000000000..91dbf5e71f --- /dev/null +++ b/bgpd/rfp-example/librfp/rfp.h @@ -0,0 +1,31 @@ +/* + * + * Copyright 2015-2016, LabN Consulting, L.L.C. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +/* Sample header file */ +#ifndef _RFP_H +#define _RFP_H + +#include "bgpd/rfapi/rfapi.h" +extern int bgp_rfp_cfg_write (void *vty, void *bgp); +/* TO BE REMOVED */ +void rfp_clear_vnc_nve_all (void); + +#endif /* _RFP_H */ diff --git a/bgpd/rfp-example/librfp/rfp_example.c b/bgpd/rfp-example/librfp/rfp_example.c new file mode 100644 index 0000000000..e8b546ddf4 --- /dev/null +++ b/bgpd/rfp-example/librfp/rfp_example.c @@ -0,0 +1,286 @@ +/* + * + * Copyright 2015-2016, LabN Consulting, L.L.C. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +/* stub rfp */ +#include "rfp_internal.h" +#include "bgpd/rfapi/rfapi.h" +#include "lib/command.h" + +struct rfp_instance_t +{ + struct rfapi_rfp_cfg rfapi_config; + struct rfapi_rfp_cb_methods rfapi_callbacks; + struct thread_master *master; + uint32_t config_var; +}; + +struct rfp_instance_t global_rfi; /* dynamically allocate in full implementation */ + +/*********************************************************************** + * Sample VTY / internal function + **********************************************************************/ +#define RFP_SHOW_STR "RFP information\n" +DEFUN (rfp_example_config_value, + rfp_example_config_value_cmd, + "rfp example-config-value VALUE", + RFP_SHOW_STR "Example value to be configured\n") +{ + uint32_t value = 0; + struct rfp_instance_t *rfi = NULL; + rfi = rfapi_get_rfp_start_val (vty->index); /* index=bgp for BGP_NODE */ + assert (rfi != NULL); + + VTY_GET_INTEGER ("Example value", value, argv[0]); + if (rfi) + rfi->config_var = value; + return CMD_SUCCESS; +} + +static void +rfp_vty_install () +{ + static int installed = 0; + if (installed) /* do this only once */ + return; + installed = 1; + /* example of new cli command */ + install_element (BGP_NODE, &rfp_example_config_value_cmd); +} + +/*********************************************************************** + * RFAPI Callbacks + **********************************************************************/ + +/*------------------------------------------ + * rfp_response_cb + * + * Callbacks of this type are used to provide asynchronous + * route updates from RFAPI to the RFP client. + * + * response_cb + * called to notify the rfp client that a next hop list + * that has previously been provided in response to an + * rfapi_query call has been updated. Deleted routes are indicated + * with lifetime==RFAPI_REMOVE_RESPONSE_LIFETIME. + * + * By default, the routes an NVE receives via this callback include + * its own routes (that it has registered). However, these may be + * filtered out if the global BGP_VNC_CONFIG_FILTER_SELF_FROM_RSP + * flag is set. + * + * input: + * next_hops a list of possible next hops. + * This is a linked list allocated within the + * rfapi. The response_cb callback function is responsible + * for freeing this memory via rfapi_free_next_hop_list() + * in order to avoid memory leaks. + * + * userdata value (cookie) originally specified in call to + * rfapi_open() + * + *------------------------------------------*/ +static void +rfp_response_cb (struct rfapi_next_hop_entry *next_hops, void *userdata) +{ + /* + * Identify NVE based on userdata, which is a value passed + * to RFAPI in the rfapi_open call + */ + + /* process list of next_hops */ + + /* free next hops */ + rfapi_free_next_hop_list (next_hops); + return; +} + +/*------------------------------------------ + * rfp_local_cb + * + * Callbacks of this type are used to provide asynchronous + * route updates from RFAPI to the RFP client. + * + * local_cb + * called to notify the rfp client that a local route + * has been added or deleted. Deleted routes are indicated + * with lifetime==RFAPI_REMOVE_RESPONSE_LIFETIME. + * + * input: + * next_hops a list of possible next hops. + * This is a linked list allocated within the + * rfapi. The local_cb callback function is responsible + * for freeing this memory via rfapi_free_next_hop_list() + * in order to avoid memory leaks. + * + * userdata value (cookie) originally specified in call to + * rfapi_open() + * + *------------------------------------------*/ +static void +rfp_local_cb (struct rfapi_next_hop_entry *next_hops, void *userdata) +{ + /* + * Identify NVE based on userdata, which is a value passed + * to RFAPI in the rfapi_open call + */ + + /* process list of local next_hops */ + + /* free next hops */ + rfapi_free_next_hop_list (next_hops); + return; +} + +/*------------------------------------------ + * rfp_close_cb + * + * Callbacks used to provide asynchronous + * notification that an rfapi_handle was invalidated + * + * input: + * pHandle Firmerly valid rfapi_handle returned to + * client via rfapi_open(). + * + * reason EIDRM handle administratively closed (clear nve ...) + * ESTALE handle invalidated by configuration change + * + *------------------------------------------*/ +static void +rfp_close_cb (rfapi_handle pHandle, int reason) +{ + /* close / invalidate NVE with the pHandle returned by the rfapi_open call */ + return; +} + +/*------------------------------------------ + * rfp_cfg_write_cb + * + * This callback is used to generate output for any config parameters + * that may supported by RFP via RFP defined vty commands at the bgp + * level. See loglevel as an example. + * + * input: + * vty -- quagga vty context + * rfp_start_val -- value returned by rfp_start + * + * output: + * to vty, rfp related configuration + * + * return value: + * lines written +--------------------------------------------*/ +static int +rfp_cfg_write_cb (struct vty *vty, void *rfp_start_val) +{ + struct rfp_instance_t *rfi = rfp_start_val; + int write = 0; + assert (rfp_start_val != NULL); + if (rfi->config_var != 0) + { + vty_out (vty, " rfp example-config-value %u", rfi->config_var); + vty_out (vty, "%s", VTY_NEWLINE); + write++; + } + + return write; +} + +/*********************************************************************** + * RFAPI required functions + **********************************************************************/ + +/*------------------------------------------ + * rfp_start + * + * This function will start the RFP code + * + * input: + * master quagga thread_master to tie into bgpd threads + * + * output: + * cfgp Pointer to rfapi_rfp_cfg (null = use defaults), + * copied by caller, updated via rfp_set_configuration + * cbmp Pointer to rfapi_rfp_cb_methods, may be null + * copied by caller, updated via rfapi_rfp_set_cb_methods + * + * return value: + * rfp_start_val rfp returned value passed on rfp_stop and rfp_cfg_write + * +--------------------------------------------*/ +void * +rfp_start (struct thread_master *master, + struct rfapi_rfp_cfg **cfgp, struct rfapi_rfp_cb_methods **cbmp) +{ + memset (&global_rfi, 0, sizeof (struct rfp_instance_t)); + global_rfi.master = master; /* for BGPD threads */ + + /* initilize struct rfapi_rfp_cfg, see rfapi.h */ + global_rfi.rfapi_config.download_type = RFAPI_RFP_DOWNLOAD_FULL; /* default=partial */ + global_rfi.rfapi_config.ftd_advertisement_interval = + RFAPI_RFP_CFG_DEFAULT_FTD_ADVERTISEMENT_INTERVAL; + global_rfi.rfapi_config.holddown_factor = 0; /* default: RFAPI_RFP_CFG_DEFAULT_HOLDDOWN_FACTOR */ + global_rfi.rfapi_config.use_updated_response = 1; /* 0=no */ + global_rfi.rfapi_config.use_removes = 1; /* 0=no */ + + + /* initilize structrfapi_rfp_cb_methods , see rfapi.h */ + global_rfi.rfapi_callbacks.cfg_cb = rfp_cfg_write_cb; + /* no group config */ + global_rfi.rfapi_callbacks.response_cb = rfp_response_cb; + global_rfi.rfapi_callbacks.local_cb = rfp_local_cb; + global_rfi.rfapi_callbacks.close_cb = rfp_close_cb; + + if (cfgp != NULL) + *cfgp = &global_rfi.rfapi_config; + if (cbmp != NULL) + *cbmp = &global_rfi.rfapi_callbacks; + + rfp_vty_install (); + + return &global_rfi; +} + +/*------------------------------------------ + * rfp_stop + * + * This function is called on shutdown to trigger RFP cleanup + * + * input: + * none + * + * output: + * none + * + * return value: + * rfp_start_val +--------------------------------------------*/ +void +rfp_stop (void *rfp_start_val) +{ + assert (rfp_start_val != NULL); +} + +/* TO BE REMOVED */ +void +rfp_clear_vnc_nve_all (void) +{ + return; +} diff --git a/bgpd/rfp-example/librfp/rfp_internal.h b/bgpd/rfp-example/librfp/rfp_internal.h new file mode 100644 index 0000000000..64452d2397 --- /dev/null +++ b/bgpd/rfp-example/librfp/rfp_internal.h @@ -0,0 +1,29 @@ +/* + * + * Copyright 2015-2016, LabN Consulting, L.L.C. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +/* Sample header file */ +#ifndef _RFP_INTERNAL_H +#define _RFP_INTERNAL_H +#include "lib/zebra.h" +#include "rfp.h" +#include "bgpd/rfapi/rfapi.h" + +#endif /* _RFP_INTERNAL_H */ diff --git a/bgpd/rfp-example/rfptest/Makefile.am b/bgpd/rfp-example/rfptest/Makefile.am new file mode 100644 index 0000000000..a1001e4ef1 --- /dev/null +++ b/bgpd/rfp-example/rfptest/Makefile.am @@ -0,0 +1,52 @@ +# +# This file has been modified by LabN Consulting, L.L.C. +# +# +## Process this file with automake to produce Makefile.in. + +if ENABLE_BGP_VNC +BGP_VNC_RFAPI_INC=-I$(top_srcdir)/bgpd/rfapi +BGP_VNC_RFP_LIBDIR=../librfp +BGP_VNC_RFP_INCDIR=$(BGP_VNC_RFP_LIBDIR) +BGP_VNC_RFP_LIB=$(BGP_VNC_RFP_LIBDIR)/librfp.a +BGP_VNC_RFP_INC=-I$(BGP_VNC_RFP_INCDIR) + +rfptest_SOURCES = \ + rfptest.c + +rfptest_INCLUDES = \ + rfptest.h + + +RFPTEST_BIN = rfptest + +else +BGP_VNC_RFAPI_INC= +BGP_VNC_RFAPI_SRC= +BGP_VNC_RFP_LIB= +BGP_VNC_RFP_INC= +RFPTEST_BIN= +endif + +AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/lib \ + -I$(top_builddir) -I$(top_builddir)/lib \ + $(BGP_VNC_RFAPI_INC) $(BGP_VNC_RFP_INC) + +DEFS = @DEFS@ -DSYSCONFDIR=\"$(sysconfdir)/\" +INSTALL_SDATA=@INSTALL@ -m 600 + + +AM_CFLAGS = $(PICFLAGS) +AM_LDFLAGS = $(PILDFLAGS) + + +noinst_HEADERS = \ + $(rfptest_INCLUDES) + +noinst_LIBRARIES = +sbin_PROGRAMS = $(RFPTEST_BIN) + +examplesdir = $(exampledir) + +rfptest_LDADD = $(top_builddir)/lib/libzebra.la $(BGP_VNC_RFP_LIB) +dist_examples_DATA = diff --git a/bgpd/rfp-example/rfptest/rfptest.c b/bgpd/rfp-example/rfptest/rfptest.c new file mode 100644 index 0000000000..39b798e516 --- /dev/null +++ b/bgpd/rfp-example/rfptest/rfptest.c @@ -0,0 +1,32 @@ +/* + * + * Copyright 2015-2016, LabN Consulting, L.L.C. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + + +/* dummy test program */ +#include +#include +#include "rfptest.h" +int +main () +{ + printf ("Your test code goes here.\n"); + exit (1); +} diff --git a/bgpd/rfp-example/rfptest/rfptest.h b/bgpd/rfp-example/rfptest/rfptest.h new file mode 100644 index 0000000000..00effb8673 --- /dev/null +++ b/bgpd/rfp-example/rfptest/rfptest.h @@ -0,0 +1,26 @@ +/* + * + * Copyright 2015-2016, LabN Consulting, L.L.C. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +/* Sample header file */ +#ifndef _RFPTEST_H +#define _RFPTEST_H + +#endif /* _RFPTEST_H */ diff --git a/common.am b/common.am new file mode 100644 index 0000000000..ac7a3230da --- /dev/null +++ b/common.am @@ -0,0 +1,41 @@ +# +# Automake fragment intended to be shared by Makefile.am files in the +# tree. +# + +if HAVE_PROTOBUF + +# Uncomment to use an non-system version of libprotobuf-c. +# +# Q_PROTOBUF_C_CLIENT_INCLUDES = -I$(top_srcdir)/third-party/protobuf-c/src +# Q_PROTOBUF_C_CLIENT_LDOPTS = $(top_builddir)/third-party/protobuf-c/src/libprotobuf-c.la + +Q_PROTOBUF_C_CLIENT_INCLUDES= +Q_PROTOBUF_C_CLIENT_LDOPTS=-lprotobuf-c + +Q_PROTOC=protoc +Q_PROTOC_C=protoc-c + +Q_PROTOBUF_CFILES = $(filter %.pb-c.c,$(SOURCES)) + +Q_PROTOBUF_SRCS = $(Q_PROTOBUF_CFILES) $(Q_PROTOBUF_HFILES) + +# Rules +%.pb.h: %.proto + $(Q_PROTOC) $(PROTOBUF_INCLUDES) --cpp_out=$(top_srcdir) $(top_srcdir)/$(PROTOBUF_PACKAGE)/$^ + +%.pb-c.c %.pb-c.h: %.proto + $(Q_PROTOC_C) $(PROTOBUF_INCLUDES) --c_out=$(top_srcdir) $(top_srcdir)/$(PROTOBUF_PACKAGE)/$^ + +# +# Information about how to link to various libraries. +# +Q_QUAGGA_PB_CLIENT_LDOPTS = $(top_srcdir)/qpb/libquagga_pb.la $(Q_PROTOBUF_C_CLIENT_LDOPTS) + +Q_FPM_PB_CLIENT_LDOPTS = $(top_srcdir)/fpm/libfpm_pb.la $(Q_QUAGGA_PB_CLIENT_LDOPTS) + +endif # HAVE_PROTOBUF + +Q_CLEANFILES = $(Q_PROTOBUF_SRCS) + +Q_BUILT_SRCS = $(Q_PROTOBUF_SRCS) diff --git a/configure.ac b/configure.ac index be8d27fbf6..cf0628d15b 100755 --- a/configure.ac +++ b/configure.ac @@ -7,7 +7,7 @@ ## AC_PREREQ(2.60) -AC_INIT(Quagga, 0.99.24+cl3u3, [https://bugzilla.quagga.net]) +AC_INIT(Quagga, 0.99.24+cl3u4, [https://bugzilla.quagga.net]) CONFIG_ARGS="$*" AC_SUBST(CONFIG_ARGS) AC_CONFIG_SRCDIR(lib/zebra.h) @@ -20,7 +20,9 @@ AC_CANONICAL_BUILD() AC_CANONICAL_HOST() AC_CANONICAL_TARGET() -AM_INIT_AUTOMAKE(1.6) +# Disable portability warnings -- our automake code (in particular +# common.am) uses some constructs specific to gmake. +AM_INIT_AUTOMAKE([1.6 -Wno-portability]) m4_ifndef([AM_SILENT_RULES], [m4_define([AM_SILENT_RULES],[])]) AM_SILENT_RULES([yes]) AC_CONFIG_HEADERS(config.h) @@ -246,6 +248,8 @@ AC_ARG_ENABLE(ospfd, AS_HELP_STRING([--disable-ospfd], [do not build ospfd])) AC_ARG_ENABLE(ospf6d, AS_HELP_STRING([--disable-ospf6d], [do not build ospf6d])) +AC_ARG_ENABLE(ldpd, + AS_HELP_STRING([--enable-ldpd], [build ldpd])) AC_ARG_ENABLE(watchquagga, AS_HELP_STRING([--disable-watchquagga], [do not build watchquagga])) AC_ARG_ENABLE(isisd, @@ -254,6 +258,10 @@ AC_ARG_ENABLE(pimd, AS_HELP_STRING([--disable-pimd], [do not build pimd])) AC_ARG_ENABLE(bgp-announce, AS_HELP_STRING([--disable-bgp-announce,], [turn off BGP route announcement])) +AC_ARG_ENABLE(bgp-vnc, + AS_HELP_STRING([--disable-bgp-vnc],[turn off BGP VNC support])) +AC_ARG_WITH(rfp-path, + AS_HELP_STRING([--with-rfp-path[=DIR]],[path to replaced stub RFP used with BGP VNC])) AC_ARG_ENABLE(snmp, AS_HELP_STRING([--enable-snmp=ARG], [enable SNMP support (smux or agentx)])) AC_ARG_WITH(libpam, @@ -313,6 +321,8 @@ AC_ARG_ENABLE(cumulus, AS_HELP_STRING([--enable-cumulus], [enable Cumulus Switch Special Extensions])) AC_ARG_ENABLE(rr-semantics, AS_HELP_STRING([--disable-rr-semantics], [disable the v6 Route Replace semantics])) +AC_ARG_ENABLE([protobuf], + AS_HELP_STRING([--enable-protobuf], [Enable experimental protobuf support])) AC_CHECK_HEADERS(json-c/json.h) AC_CHECK_LIB(json-c, json_object_get, LIBS="$LIBS -ljson-c") @@ -323,6 +333,9 @@ if test $ac_cv_lib_json_c_json_object_get = no; then fi fi +AC_ARG_ENABLE([dev_build], + AS_HELP_STRING([--enable-dev-build], [build for development])) + if test x"${enable_gcc_rdynamic}" != x"no" ; then if test x"${enable_gcc_rdynamic}" = x"yes" -o x"$COMPILER" = x"GCC"; then LDFLAGS="${LDFLAGS} -rdynamic" @@ -358,6 +371,26 @@ if test "${enable_poll}" = "yes" ; then AC_DEFINE(HAVE_POLL,,Compile systemd support in) fi +dnl ---------- +dnl MPLS check +dnl ---------- +AC_MSG_CHECKING(whether this OS has MPLS stack) +case "$host" in + *-linux*) + MPLS_METHOD="zebra_mpls_netlink.o" + AC_MSG_RESULT(Linux MPLS) + ;; + *-openbsd*) + MPLS_METHOD="zebra_mpls_openbsd.o" + AC_MSG_RESULT(OpenBSD MPLS) + ;; + *) + MPLS_METHOD="zebra_mpls_null.o" + AC_MSG_RESULT(Unsupported kernel) + ;; +esac +AC_SUBST(MPLS_METHOD) + if test "${enable_cumulus}" = "yes" ; then AC_DEFINE(HAVE_CUMULUS,,Compile Special Cumulus Code in) fi @@ -370,6 +403,53 @@ if test "${enable_fpm}" = "yes"; then AC_DEFINE(HAVE_FPM,,Forwarding Plane Manager support) fi +if test "x${enable_dev_build}" = "xyes"; then + AC_DEFINE(DEV_BUILD,,Build for development) +fi +AM_CONDITIONAL([DEV_BUILD], [test "x$enable_dev_build" = "xyes"]) + +# +# Logic for protobuf support. +# +if test "$enable_protobuf" = "yes"; then + have_protobuf=yes + + # Check for protoc-c + AC_CHECK_PROG([PROTOC_C], [protoc-c], [protoc-c], [/bin/false]) + if test "x$PROTOC_C" = "x/bin/false"; then + have_protobuf=no + else + found_protobuf_c=no + PKG_CHECK_MODULES([PROTOBUF_C], libprotobuf-c >= 0.14, + [found_protobuf_c=yes], + [AC_MSG_RESULT([pkg-config did not find libprotobuf-c])]) + + if test "x$found_protobuf_c" = "xyes"; then + LDFLAGS="$LDFLAGS $PROTOBUF_C_LIBS" + CFLAGS="$CFLAGS $PROTOBUF_C_CFLAGS" + else + AC_CHECK_HEADER([google/protobuf-c/protobuf-c.h], [], + [have_protobuf=no; AC_MSG_RESULT([Couldn't find google/protobuf-c.h])]) + fi + fi +fi + +# Fail if the user explicity enabled protobuf support and we couldn't +# find the compiler or libraries. +if test "x$have_protobuf" = "xno" && test "x$enable_protobuf" = "xyes"; then + AC_MSG_ERROR([Protobuf enabled explicitly but can't find libraries/tools]) +fi + +if test "x$have_protobuf" = "xyes"; then + AC_DEFINE(HAVE_PROTOBUF,, protobuf) +fi + +AM_CONDITIONAL([HAVE_PROTOBUF], [test "x$have_protobuf" = "xyes"]) + +# +# End of logic for protobuf support. +# + if test "${enable_tcp_zebra}" = "yes"; then AC_DEFINE(HAVE_TCP_ZEBRA,,Use TCP for zebra communication) fi @@ -838,7 +918,7 @@ AC_CHECK_FUNCS([dup2 ftruncate getcwd gethostbyname getpagesize gettimeofday \ strtol strtoul strlcat strlcpy \ daemon snprintf vsnprintf \ if_nametoindex if_indextoname getifaddrs \ - uname fcntl getgrouplist]) + uname fcntl getgrouplist pledge]) AC_CHECK_HEADER([asm-generic/unistd.h], [AC_CHECK_DECL(__NR_setns, @@ -1231,6 +1311,13 @@ else fi AM_CONDITIONAL(OSPFD, test "x$OSPFD" = "xospfd") +if test "${enable_ldpd}" = "yes";then + LDPD="ldpd" +else + LDPD="" +fi +AM_CONDITIONAL(LDPD, test "x$LDPD" = "xldpd") + if test "${enable_watchquagga}" = "no";then WATCHQUAGGA="" else @@ -1279,13 +1366,38 @@ else AC_DEFINE(DISABLE_BGP_ANNOUNCE,0,Disable BGP installation to zebra) fi +if test "${with_rfp_path}" = "yes" || test x"${with_rfp_path}" = x""; then + with_rfp_path="bgpd/rfp-example" +fi +if test "${with_rfp_path}" != "no"; then + VNC_RFP_PATH="${with_rfp_path}" + AC_SUBST(VNC_RFP_PATH) +fi + +if test "${enable_bgp_vnc}" != "no";then + AC_DEFINE(ENABLE_BGP_VNC,1,Enable BGP VNC support) + RFPTEST="${with_rfp_path}/rfptest" + LIBRFP="${with_rfp_path}/librfp" + RFPINC="${with_rfp_path}/librfp" +else + RFPTEST= + LIBRFP= + RFPINC="bgpd/rfp-example/librfp" +fi +# set +AM_CONDITIONAL([ENABLE_BGP_VNC], [test x${enable_bgp_vnc} != xno]) + AC_SUBST(DOC) AC_SUBST(ZEBRA) +AC_SUBST(RFPTEST) +AC_SUBST(LIBRFP) +AC_SUBST(RFPINC) AC_SUBST(BGPD) AC_SUBST(RIPD) AC_SUBST(RIPNGD) AC_SUBST(OSPFD) AC_SUBST(OSPF6D) +AC_SUBST(LDPD) AC_SUBST(WATCHQUAGGA) AC_SUBST(ISISD) AC_SUBST(PIMD) @@ -1432,6 +1544,32 @@ AC_TRY_COMPILE([#include ], [ AC_MSG_RESULT(no) ]) +dnl ---------------------- +dnl checking for SO_BINDANY +dnl ---------------------- +AC_MSG_CHECKING(for SO_BINDANY) +AC_TRY_COMPILE([#include ], [ + int opt = SO_BINDANY; +], [ + AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_SO_BINDANY, 1, [Have SO_BINDANY]) +], [ + AC_MSG_RESULT(no) +]) + +dnl ---------------------- +dnl checking for IP_FREEBIND +dnl ---------------------- +AC_MSG_CHECKING(for IP_FREEBIND) +AC_TRY_COMPILE([#include ], [ + int opt = IP_FREEBIND; +], [ + AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_IP_FREEBIND, 1, [Have IP_FREEBIND]) +], [ + AC_MSG_RESULT(no) +]) + dnl -------------------------------------- dnl checking for getrusage struct and call dnl -------------------------------------- @@ -1591,6 +1729,8 @@ AC_DEFINE_UNQUOTED(PATH_RIPNGD_PID, "$quagga_statedir/ripngd.pid",ripngd PID) AC_DEFINE_UNQUOTED(PATH_BGPD_PID, "$quagga_statedir/bgpd.pid",bgpd PID) AC_DEFINE_UNQUOTED(PATH_OSPFD_PID, "$quagga_statedir/ospfd.pid",ospfd PID) AC_DEFINE_UNQUOTED(PATH_OSPF6D_PID, "$quagga_statedir/ospf6d.pid",ospf6d PID) +AC_DEFINE_UNQUOTED(PATH_LDPD_PID, "$quagga_statedir/ldpd.pid",ldpd PID) +AC_DEFINE_UNQUOTED(LDPD_SOCKET, "$quagga_statedir/ldpd.sock",ldpd control socket) AC_DEFINE_UNQUOTED(PATH_ISISD_PID, "$quagga_statedir/isisd.pid",isisd PID) AC_DEFINE_UNQUOTED(PATH_PIMD_PID, "$quagga_statedir/pimd.pid",pimd PID) AC_DEFINE_UNQUOTED(PATH_WATCHQUAGGA_PID, "$quagga_statedir/watchquagga.pid",watchquagga PID) @@ -1601,6 +1741,7 @@ AC_DEFINE_UNQUOTED(RIPNG_VTYSH_PATH, "$quagga_statedir/ripngd.vty",ripng vty soc AC_DEFINE_UNQUOTED(BGP_VTYSH_PATH, "$quagga_statedir/bgpd.vty",bgpd vty socket) AC_DEFINE_UNQUOTED(OSPF_VTYSH_PATH, "$quagga_statedir/ospfd.vty",ospfd vty socket) AC_DEFINE_UNQUOTED(OSPF6_VTYSH_PATH, "$quagga_statedir/ospf6d.vty",ospf6d vty socket) +AC_DEFINE_UNQUOTED(LDP_VTYSH_PATH, "$quagga_statedir/ldpd.vty",ldpd vty socket) AC_DEFINE_UNQUOTED(ISIS_VTYSH_PATH, "$quagga_statedir/isisd.vty",isisd vty socket) AC_DEFINE_UNQUOTED(PIM_VTYSH_PATH, "$quagga_statedir/pimd.vty",pimd vty socket) AC_DEFINE_UNQUOTED(DAEMON_VTY_DIR, "$quagga_statedir",daemon vty directory) @@ -1624,9 +1765,9 @@ AC_CACHE_VAL(ac_cv_htonl_works, ) AC_MSG_RESULT($ac_cv_htonl_works) -AC_CONFIG_FILES([Makefile lib/Makefile zebra/Makefile ripd/Makefile +AC_CONFIG_FILES([Makefile lib/Makefile qpb/Makefile zebra/Makefile ripd/Makefile ripngd/Makefile bgpd/Makefile ospfd/Makefile watchquagga/Makefile - ospf6d/Makefile isisd/Makefile vtysh/Makefile + ospf6d/Makefile ldpd/Makefile isisd/Makefile vtysh/Makefile doc/Makefile ospfclient/Makefile tests/Makefile m4/Makefile pimd/Makefile tests/bgpd.tests/Makefile @@ -1635,15 +1776,26 @@ AC_CONFIG_FILES([Makefile lib/Makefile zebra/Makefile ripd/Makefile tools/Makefile cumulus/Makefile pkgsrc/Makefile + fpm/Makefile redhat/quagga.spec lib/version.h doc/defines.texi isisd/topology/Makefile pkgsrc/bgpd.sh pkgsrc/ospf6d.sh pkgsrc/ospfd.sh pkgsrc/ripd.sh pkgsrc/ripngd.sh pkgsrc/zebra.sh]) + +if test "${enable_bgp_vnc}" != "no"; then + if test "${with_rfp_path}" = "bgpd/rfp-example" ; then + AC_CONFIG_FILES([bgpd/rfp-example/rfptest/Makefile bgpd/rfp-example/librfp/Makefile]) + else + AC_CONFIG_FILES([${with_rfp_path}/rfptest/Makefile ${with_rfp_path}/librfp/Makefile]) + fi +fi + AC_CONFIG_FILES([solaris/Makefile]) AC_CONFIG_FILES([vtysh/extract.pl],[chmod +x vtysh/extract.pl]) + ## Hack, but working solution to avoid rebuilding of quagga.info. ## It's already in CVS until texinfo 4.7 is more common. AC_OUTPUT @@ -1666,6 +1818,7 @@ group to run as : ${enable_group} group for vty sockets : ${enable_vty_group} config file mask : ${enable_configfile_mask} log file mask : ${enable_logfile_mask} +zebra protobuf enabled : ${have_protobuf:-no} The above user and group must have read/write access to the state file directory and to the config files in the config file directory." diff --git a/debian/changelog b/debian/changelog index 3114db3fd1..44974cecd4 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +quagga (0.99.24+cl3u4) RELEASED; urgency=medium + + * Closes: CM-12687 - Buffer overflow in zebra RA code + + -- dev-support Wed, 31 Aug 2016 12:36:10 -0400 + quagga (0.99.24+cl3u3) RELEASED; urgency=medium * New Enabled: Merge up-to 0.99.24 code from upstream diff --git a/debian/rules b/debian/rules index 25461b89d8..b100813182 100755 --- a/debian/rules +++ b/debian/rules @@ -54,7 +54,8 @@ override_dh_auto_configure: --enable-poll=yes \ --enable-cumulus=yes \ --enable-pimd=no \ - --enable-dependency-tracking; \ + --enable-dependency-tracking \ + --enable-bgp-vnc=no; \ fi override_dh_auto_build: diff --git a/doc/Makefile.am b/doc/Makefile.am index 4a39f0b011..d5db6cf497 100644 --- a/doc/Makefile.am +++ b/doc/Makefile.am @@ -19,13 +19,24 @@ PNGTOEPS = convert -antialias -contrast -despeckle PNGTOPDF = $(PNGTOEPS) EPSTOPDF = epstopdf +VNCFIGURES_PNG = +VNCFIGURES_DIA = -vnc-mesh -vnc-quagga-route-reflector \ +-vnc-commercial-route-reflector -vnc-redundant-route-reflectors \ +-vnc-gw -vnc-gw-rr + +# TODO: A target that creates an empty text file for each member of +# VNCFIGURES_TXT +VNCFIGURES_TXT = $(VNCFIGURES:%.png=%.txt) + # The figure sources figures_names_parts = -normal-processing -rs-processing \ - _topologies_full _topologies_rs + _topologies_full _topologies_rs \ + $(VNCFIGURES_DIA) + figures_sources = $(figures_names_parts:%=fig%.dia) -figures_png = $(figures_names_parts:%=fig%.png) -figures_pdf = $(figures_names_parts:%=fig%.pdf) -figures_eps = $(figures_names_parts:%=fig%.eps) +figures_png = $(figures_names_parts:%=fig%.png) $(VNCFIGURES_PNG) +figures_pdf = $(figures_names_parts:%=fig%.pdf) $(VNCFIGURES_PNG:%.png=%.pdf) +figures_eps = $(figures_names_parts:%=fig%.eps) $(VNCFIGURES_PNG:%.png=%.eps) figures_txt = $(figures_names_parts:%=fig%.txt) # rather twisted logic because we have to build PDFs of the EPS figures for @@ -46,7 +57,8 @@ info_TEXINFOS = quagga.texi quagga.pdf: $(info_TEXINFOS) $(figures_pdf) $(quagga_TEXINFOS) $(TEXI2PDF) -o "$@" $< || true -quagga_TEXINFOS = appendix.texi basic.texi bgpd.texi filter.texi \ +quagga_TEXINFOS = appendix.texi basic.texi bgpd.texi isisd.texi filter.texi \ + vnc.texi \ install.texi ipv6.texi kernel.texi main.texi ospf6d.texi ospfd.texi \ overview.texi protocol.texi ripd.texi ripngd.texi routemap.texi \ snmp.texi vtysh.texi routeserver.texi defines.texi $(figures_png) \ @@ -87,6 +99,10 @@ if OSPFD man_MANS += ospfd.8 endif +if LDPD +man_MANS += ldpd.8 +endif + if RIPD man_MANS += ripd.8 endif @@ -108,7 +124,7 @@ man_MANS += zebra.8 endif EXTRA_DIST = BGP-TypeCode draft-zebra-00.ms draft-zebra-00.txt \ - bgpd.8 isisd.8 ospf6d.8 ospfclient.8 ospfd.8 ripd.8 \ + bgpd.8 isisd.8 ospf6d.8 ospfclient.8 ospfd.8 ldpd.8 ripd.8 \ ripngd.8 pimd.8 vtysh.1 watchquagga.8 zebra.8 quagga.1 \ mpls/ChangeLog.opaque.txt mpls/cli_summary.txt \ mpls/opaque_lsa.txt mpls/ospfd.conf \ @@ -116,3 +132,11 @@ EXTRA_DIST = BGP-TypeCode draft-zebra-00.ms draft-zebra-00.txt \ draft-zebra-00.txt: draft-zebra-00.ms groff -T ascii -ms $< > $@ + +# Ensure that all of the figures are copied into the html directory +html-local: $(HTMLS) + if test -d $(HTMLS) ; then \ + cp -p $(figures_png) $(HTMLS) ; \ + else \ + echo "$(HTMLS) is not a directory. Make it so, the rerun make."; \ + fi diff --git a/doc/bgpd.texi b/doc/bgpd.texi index 3ef7c8f72f..54bed102f3 100644 --- a/doc/bgpd.texi +++ b/doc/bgpd.texi @@ -581,6 +581,10 @@ Redistribute RIP route to BGP process. Redistribute OSPF route to BGP process. @end deffn +@deffn {BGP} {redistribute vpn} {} +Redistribute VNC routes to BGP process. +@end deffn + @deffn {BGP} {update-delay @var{max-delay}} {} @deffnx {BGP} {update-delay @var{max-delay} @var{establish-wait}} {} This feature is used to enable read-only mode on BGP process restart or when diff --git a/doc/fig-vnc-commercial-route-reflector.dia b/doc/fig-vnc-commercial-route-reflector.dia new file mode 100644 index 0000000000..0da5bd1c86 --- /dev/null +++ b/doc/fig-vnc-commercial-route-reflector.dia @@ -0,0 +1,794 @@ + + + + + + + + + + + + + #Letter# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ## + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVE 4 +VN 172.16.4.1# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVE 5 +VN 172.16.130.1# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVE 6 +VN 172.16.132.1# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVE 7 +VN 172.16.6.1# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVE 8 +VN 172.16.8.1# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVE 9 +VN 172.16.134.1# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ## + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVA 3 +192.168.1.102# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVA 2 +192.168.1.101# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #Commercial Router +Route Reflector +192.168.1.104# + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/fig-vnc-commercial-route-reflector.png b/doc/fig-vnc-commercial-route-reflector.png new file mode 100644 index 0000000000..ca8a248502 Binary files /dev/null and b/doc/fig-vnc-commercial-route-reflector.png differ diff --git a/doc/fig-vnc-commercial-route-reflector.txt b/doc/fig-vnc-commercial-route-reflector.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/doc/fig-vnc-gw-rr.dia b/doc/fig-vnc-gw-rr.dia new file mode 100644 index 0000000000..dab27f7007 --- /dev/null +++ b/doc/fig-vnc-gw-rr.dia @@ -0,0 +1,1155 @@ + + + + + + + + + + + + + #Letter# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ## + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVE 1 +VN 172.16.1.1# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVE 2 +VN 172.16.2.1# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVE 3 +VN 172.16.3.1# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVE 4 +VN 172.16.4.1# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #CE 1 +172.16.1.2# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #CE 2 +172.16.2.2# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #CE 3 +172.16.3.2# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #CE 4 +172.16.4.2# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #VNC Gateway 1 +192.168.1.101# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVA 1 (NVA) +192.168.1.103# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVA 2 (NVA) +192.168.1.104# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #VNC Gateway 2 +192.168.1.102# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #RR +192.168.1.105# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/fig-vnc-gw-rr.png b/doc/fig-vnc-gw-rr.png new file mode 100644 index 0000000000..7ae0630f62 Binary files /dev/null and b/doc/fig-vnc-gw-rr.png differ diff --git a/doc/fig-vnc-gw-rr.txt b/doc/fig-vnc-gw-rr.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/doc/fig-vnc-gw.dia b/doc/fig-vnc-gw.dia new file mode 100644 index 0000000000..8270e208b5 --- /dev/null +++ b/doc/fig-vnc-gw.dia @@ -0,0 +1,1058 @@ + + + + + + + + + + + + + #Letter# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ## + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVE 1 +VN 172.16.1.1# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVE 2 +VN 172.16.2.1# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVE 3 +VN 172.16.3.1# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVE 4 +VN 172.16.4.1# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #CE 1 +172.16.1.2# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #CE 2 +172.16.2.2# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #CE 3 +172.16.3.2# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #CE 4 +172.16.4.2# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #VNC Gateway 1 +192.168.1.101# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVA 1 (NVA) +192.168.1.103# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVA 2 (NVA) +192.168.1.104# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #VNC Gateway 2 +192.168.1.102# + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/fig-vnc-gw.png b/doc/fig-vnc-gw.png new file mode 100644 index 0000000000..df8f23f438 Binary files /dev/null and b/doc/fig-vnc-gw.png differ diff --git a/doc/fig-vnc-gw.txt b/doc/fig-vnc-gw.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/doc/fig-vnc-mesh.dia b/doc/fig-vnc-mesh.dia new file mode 100644 index 0000000000..a8f702f768 --- /dev/null +++ b/doc/fig-vnc-mesh.dia @@ -0,0 +1,1071 @@ + + + + + + + + + + + + + #Letter# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ## + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ## + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ## + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVA 1 +192.168.1.100# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVA 2 +192.168.1.101# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVA 3 +192.168.1.102# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVE 7 +VN 172.16.6.1# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVE 8 +VN 172.16.8.1# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVE 9 +VN 172.16.134.1# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVE 4 +VN 172.16.4.1# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVE 5 +VN 172.16.130.1# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVE 6 +VN 172.16.132.1# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVE 1 +VN 172.16.0.1# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVE 2 +VN 172.16.2.1# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVE 3 +VN 172.16.128.1# + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/fig-vnc-mesh.png b/doc/fig-vnc-mesh.png new file mode 100644 index 0000000000..fa0762d16b Binary files /dev/null and b/doc/fig-vnc-mesh.png differ diff --git a/doc/fig-vnc-mesh.txt b/doc/fig-vnc-mesh.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/doc/fig-vnc-quagga-route-reflector.dia b/doc/fig-vnc-quagga-route-reflector.dia new file mode 100644 index 0000000000..634f0b17ff --- /dev/null +++ b/doc/fig-vnc-quagga-route-reflector.dia @@ -0,0 +1,763 @@ + + + + + + + + + + + + + #Letter# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ## + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVE 4 +VN 172.16.4.1# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVE 5 +VN 172.16.130.1# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVE 6 +VN 172.16.132.1# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVE 7 +VN 172.16.6.1# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVE 8 +VN 172.16.8.1# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVE 9 +VN 172.16.134.1# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #BGP Route Reflector 1 +192.168.1.100# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVA 2 +192.168.1.101# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVA 3 +192.168.1.102# + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/fig-vnc-quagga-route-reflector.png b/doc/fig-vnc-quagga-route-reflector.png new file mode 100644 index 0000000000..477052184f Binary files /dev/null and b/doc/fig-vnc-quagga-route-reflector.png differ diff --git a/doc/fig-vnc-quagga-route-reflector.txt b/doc/fig-vnc-quagga-route-reflector.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/doc/fig-vnc-redundant-route-reflectors.dia b/doc/fig-vnc-redundant-route-reflectors.dia new file mode 100644 index 0000000000..4065b8ba1f --- /dev/null +++ b/doc/fig-vnc-redundant-route-reflectors.dia @@ -0,0 +1,871 @@ + + + + + + + + + + + + + #Letter# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ## + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVE 4 +VN 172.16.4.1# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVE 5 +VN 172.16.130.1# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVE 6 +VN 172.16.132.1# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVE 7 +VN 172.16.6.1# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVE 8 +VN 172.16.8.1# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVE 9 +VN 172.16.134.1# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #BGP Route Reflector 1 +192.168.1.100# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #Commercial Router +Route Reflector +192.168.1.104# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVA 2 +192.168.1.101# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #NVA 3 +192.168.1.102# + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/fig-vnc-redundant-route-reflectors.png b/doc/fig-vnc-redundant-route-reflectors.png new file mode 100644 index 0000000000..06a27b6575 Binary files /dev/null and b/doc/fig-vnc-redundant-route-reflectors.png differ diff --git a/doc/fig-vnc-redundant-route-reflectors.txt b/doc/fig-vnc-redundant-route-reflectors.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/doc/ldpd-basic-test-setup.md b/doc/ldpd-basic-test-setup.md new file mode 100644 index 0000000000..e5e987f9e3 --- /dev/null +++ b/doc/ldpd-basic-test-setup.md @@ -0,0 +1,681 @@ +## Topology + +The goal of this test is to verify that the all the basic functionality +of ldpd is working as expected, be it running on Linux or OpenBSD. In +addition to that, more advanced features are also tested, like LDP +sessions over IPv6, MD5 authentication and pseudowire signaling. + +In the topology below there are 3 PE routers, 3 CE routers and one P +router (not attached to any consumer site). + +All routers have IPv4 addresses and OSPF is used as the IGP. The +three routers from the bottom of the picture, P, PE2 and PE3, are also +configured for IPv6 (dual-stack) and static IPv6 routes are used to +provide connectivity among them. + +The three CEs share the same VPLS membership. LDP is used to set up the +LSPs among the PEs and to signal the pseudowires. MD5 authentication is +used to protect all LDP sessions. + +``` + CE1 172.16.1.1/24 + + + | + +---+---+ + | PE1 | + | IOS XE| + | | + +---+---+ + | + | 10.0.1.0/24 + | + +---+---+ + | P | + +------+ IOS XR+------+ + | | | | + | +-------+ | + 10.0.2.0/24 | | 10.0.3.0/24 +2001:db8:2::/64 | | 2001:db8:3::/64 + | | + +---+---+ +---+---+ + | PE2 | | PE3 | + |OpenBSD+-------------+ Linux | + | | | | + +---+---+ 10.0.4.0/24 +---+---+ + | 2001:db8:4::/64 | + + + + 172.16.1.2/24 CE2 CE3 172.16.1.3/24 +``` + +## Configuration + +#### Linux +1 - Enable IPv4/v6 forwarding: +``` +# sysctl -w net.ipv4.ip_forward=1 +# sysctl -w net.ipv6.conf.all.forwarding=1 +``` + +2 - Enable MPLS forwarding: +``` +# modprobe mpls-router +# modprobe mpls-iptunnel +# echo 100000 > /proc/sys/net/mpls/platform_labels +# echo 1 > /proc/sys/net/mpls/conf/eth1/input +# echo 1 > /proc/sys/net/mpls/conf/eth2/input +``` + +3 - Set up the interfaces: +``` +# ip link add name lo1 type dummy +# ip link set dev lo1 up +# ip addr add 4.4.4.4/32 dev lo1 +# ip -6 addr add 4:4:4::4/128 dev lo1 +# ip link set dev eth1 up +# ip addr add 10.0.4.4/24 dev eth1 +# ip -6 addr add 2001:db8:4::4/64 dev eth1 +# ip link set dev eth2 up +# ip addr add 10.0.3.4/24 dev eth2 +# ip -6 addr add 2001:db8:3::4/64 dev eth2 +``` + +4 - Set up the bridge and pseudowire interfaces: +``` +# ip link add type bridge +# ip link set dev bridge0 up +# ip link set dev eth0 up +# ip link set dev eth0 master bridge0 +# ip link add name mpw0 type dummy +# ip link set dev mpw0 up +# ip link set dev mpw0 master bridge0 +# ip link add name mpw1 type dummy +# ip link set dev mpw1 up +# ip link set dev mpw1 master bridge0 +``` + +> NOTE: MPLS support in the Linux kernel is very recent and it still +doesn't support pseudowire interfaces. We are using here dummy interfaces +just to show how the VPLS configuration should look like in the future. + +5 - Add static IPv6 routes for the remote loopbacks: +``` +# ip -6 route add 2:2:2::2/128 via 2001:db8:3::2 +# ip -6 route add 3:3:3::3/128 via 2001:db8:4::3 +``` + +6 - Edit /etc/quagga/ospfd.conf: +``` +router ospf + network 4.4.4.4/32 area 0.0.0.0 + network 10.0.3.4/24 area 0.0.0.0 + network 10.0.4.4/24 area 0.0.0.0 +! +``` + +7 - Edit /etc/quagga/ldpd.conf: +``` +debug mpls ldp messages recv +debug mpls ldp messages sent +debug mpls ldp zebra +! +mpls ldp + router-id 4.4.4.4 + dual-stack cisco-interop + neighbor 1.1.1.1 password opensourcerouting + neighbor 2.2.2.2 password opensourcerouting + neighbor 3.3.3.3 password opensourcerouting + ! + address-family ipv4 + discovery transport-address 4.4.4.4 + label local advertise explicit-null + ! + interface eth2 + ! + interface eth1 + ! + ! + address-family ipv6 + discovery transport-address 4:4:4::4 + ttl-security disable + ! + interface eth2 + ! + interface eth1 + ! + ! +! +l2vpn ENG type vpls + bridge br0 + member interface eth0 + ! + member pseudowire mpw0 + neighbor lsr-id 1.1.1.1 + pw-id 100 + ! + member pseudowire mpw1 + neighbor lsr-id 3.3.3.3 + neighbor address 3:3:3::3 + pw-id 100 + ! +! +``` + +> NOTE: We have to disable ttl-security under the ipv6 address-family +in order to interoperate with the IOS-XR router. GTSM is mandatory for +LDPv6 but the IOS-XR implementation is not RFC compliant in this regard. + +8 - Run zebra, ospfd and ldpd. + +#### OpenBSD +1 - Enable IPv4/v6 forwarding: +``` +# sysctl net.inet.ip.forwarding=1 +# sysctl net.inet6.ip6.forwarding=1 +``` + +2 - Enable MPLS forwarding: +``` +# ifconfig em2 10.0.2.3/24 mpls +# ifconfig em3 10.0.4.3/24 mpls +``` + +3 - Set up the interfaces: +``` +# ifconfig lo1 alias 3.3.3.3 netmask 255.255.255.255 +# ifconfig lo1 inet6 3:3:3::3/128 +# ifconfig em2 inet6 2001:db8:2::3/64 +# ifconfig em3 inet6 2001:db8:4::3/64 +``` + +4 - Set up the bridge and pseudowire interfaces: +``` +# ifconfig bridge0 create +# ifconfig bridge0 up +# ifconfig em1 up +# ifconfig bridge0 add em1 +# ifconfig mpw0 create +# ifconfig mpw0 up +# ifconfig bridge0 add mpw0 +# ifconfig mpw1 create +# ifconfig mpw1 up +# ifconfig bridge0 add mpw1 +``` + +5 - Add static IPv6 routes for the remote loopbacks: +``` +# route -n add 4:4:4::4/128 2001:db8:4::4 +# route -n add 2:2:2::2/128 2001:db8:2::2 +``` + +6 - Edit /etc/quagga/ospfd.conf: +``` +router ospf + network 10.0.2.3/24 area 0 + network 10.0.4.3/24 area 0 + network 3.3.3.3/32 area 0 +! +``` + +7 - Edit /etc/quagga/ldpd.conf: +``` +debug mpls ldp messages recv +debug mpls ldp messages sent +debug mpls ldp zebra +! +mpls ldp + router-id 3.3.3.3 + dual-stack cisco-interop + neighbor 1.1.1.1 password opensourcerouting + neighbor 2.2.2.2 password opensourcerouting + neighbor 4.4.4.4 password opensourcerouting + ! + address-family ipv4 + discovery transport-address 3.3.3.3 + label local advertise explicit-null + ! + interface em3 + ! + interface em2 + ! + ! + address-family ipv6 + discovery transport-address 3:3:3::3 + ttl-security disable + ! + interface em3 + ! + interface em2 + ! + ! +! +l2vpn ENG type vpls + bridge br0 + member interface em1 + ! + member pseudowire mpw0 + neighbor lsr-id 1.1.1.1 + pw-id 100 + ! + member pseudowire mpw1 + neighbor lsr-id 4.4.4.4 + neighbor address 4:4:4::4 + pw-id 100 + ! +! +``` + +8 - Run zebra, ospfd and ldpd. + +#### Cisco routers +CE1 (IOS): +``` +interface FastEthernet0/0 + ip address 172.16.1.1 255.255.255.0 + ! +! +``` + +CE2 (IOS): +``` +interface FastEthernet0/0 + ip address 172.16.1.2 255.255.255.0 + ! +! +``` + +CE3 (IOS): +``` +interface FastEthernet0/0 + ip address 172.16.1.3 255.255.255.0 + ! +! +``` + +PE1 - IOS-XE (1): +``` +mpls ldp neighbor 2.2.2.2 password opensourcerouting +mpls ldp neighbor 3.3.3.3 password opensourcerouting +mpls ldp neighbor 4.4.4.4 password opensourcerouting +! +l2vpn vfi context VFI + vpn id 1 + member pseudowire2 + member pseudowire1 +! +bridge-domain 1 + member GigabitEthernet1 service-instance 1 + member vfi VFI +! +interface Loopback1 + ip address 1.1.1.1 255.255.255.255 +! +interface pseudowire1 + encapsulation mpls + neighbor 3.3.3.3 100 +! +interface pseudowire2 + encapsulation mpls + neighbor 4.4.4.4 100 +! +interface GigabitEthernet3 + ip address 10.0.1.1 255.255.255.0 + mpls ip +! +router ospf 1 + network 0.0.0.0 255.255.255.255 area 0 +! +``` + +P - IOS-XR (2): +``` +interface Loopback1 + ipv4 address 2.2.2.2 255.255.255.255 + ipv6 address 2:2:2::2/128 +! +interface GigabitEthernet0/0/0/0 + ipv4 address 10.0.1.2 255.255.255.0 +! +interface GigabitEthernet0/0/0/1 + ipv4 address 10.0.2.2 255.255.255.0 + ipv6 address 2001:db8:2::2/64 + ipv6 enable +! +interface GigabitEthernet0/0/0/2 + ipv4 address 10.0.3.2 255.255.255.0 + ipv6 address 2001:db8:3::2/64 + ipv6 enable +! +router static + address-family ipv6 unicast + 3:3:3::3/128 2001:db8:2::3 + 4:4:4::4/128 2001:db8:3::4 + ! +! +router ospf 1 + router-id 2.2.2.2 + address-family ipv4 unicast + area 0 + interface Loopback1 + ! + interface GigabitEthernet0/0/0/0 + ! + interface GigabitEthernet0/0/0/1 + ! + interface GigabitEthernet0/0/0/2 + ! + ! +! +mpls ldp + router-id 2.2.2.2 + neighbor + 1.1.1.1:0 password clear opensourcerouting + 3.3.3.3:0 password clear opensourcerouting + 4.4.4.4:0 password clear opensourcerouting + ! + address-family ipv4 + ! + address-family ipv6 + discovery transport-address 2:2:2::2 + ! + interface GigabitEthernet0/0/0/0 + address-family ipv4 + ! + ! + interface GigabitEthernet0/0/0/1 + address-family ipv4 + ! + address-family ipv6 + ! + ! + interface GigabitEthernet0/0/0/2 + address-family ipv4 + ! + address-family ipv6 + ! + ! +! +``` + +## Verification - Control Plane + +Using the CLI on the Linux box, the goal is to ensure that everything +is working as expected. + +First, verify that all the required adjacencies and neighborships sessions +were established: + +``` +linux# show mpls ldp discovery +Local LDP Identifier: 4.4.4.4:0 +Discovery Sources: + Interfaces: + eth1: xmit/recv + LDP Id: 3.3.3.3:0, Transport address: 3.3.3.3 + Hold time: 15 sec + LDP Id: 3.3.3.3:0, Transport address: 3:3:3::3 + Hold time: 15 sec + eth2: xmit/recv + LDP Id: 2.2.2.2:0, Transport address: 2.2.2.2 + Hold time: 15 sec + LDP Id: 2.2.2.2:0, Transport address: 2:2:2::2 + Hold time: 15 sec + Targeted Hellos: + 4.4.4.4 -> 1.1.1.1: xmit/recv + LDP Id: 1.1.1.1:0, Transport address: 1.1.1.1 + Hold time: 45 sec + 4:4:4::4 -> 3:3:3::3: xmit/recv + LDP Id: 3.3.3.3:0, Transport address: 3:3:3::3 + Hold time: 45 sec + +linux# show mpls ldp neighbor +Peer LDP Identifier: 1.1.1.1:0 + TCP connection: 4.4.4.4:40921 - 1.1.1.1:646 + Session Holdtime: 180 sec + State: OPERATIONAL; Downstream-Unsolicited + Up time: 00:06:02 + LDP Discovery Sources: + IPv4: + Targeted Hello: 1.1.1.1 + +Peer LDP Identifier: 2.2.2.2:0 + TCP connection: 4:4:4::4:52286 - 2:2:2::2:646 + Session Holdtime: 180 sec + State: OPERATIONAL; Downstream-Unsolicited + Up time: 00:06:02 + LDP Discovery Sources: + IPv4: + Interface: eth2 + IPv6: + Interface: eth2 + +Peer LDP Identifier: 3.3.3.3:0 + TCP connection: 4:4:4::4:60575 - 3:3:3::3:646 + Session Holdtime: 180 sec + State: OPERATIONAL; Downstream-Unsolicited + Up time: 00:05:57 + LDP Discovery Sources: + IPv4: + Interface: eth1 + IPv6: + Targeted Hello: 3:3:3::3 + Interface: eth1 +``` + +Note that the neighborships with the P and PE2 routers were established +over IPv6, since this is the default behavior for dual-stack LSRs, as +specified in RFC 7552. If desired, the **dual-stack transport-connection +prefer ipv4** command can be used to establish these sessions over IPv4 +(the command should be applied an all routers). + +Now, verify that there's a remote label for each PE address: +``` +linux# show mpls ldp binding +1.1.1.1/32 + Local binding: label: 20 + Remote bindings: + Peer Label + ----------------- --------- + 1.1.1.1 imp-null + 2.2.2.2 24000 + 3.3.3.3 20 +2.2.2.2/32 + Local binding: label: 21 + Remote bindings: + Peer Label + ----------------- --------- + 1.1.1.1 18 + 2.2.2.2 imp-null + 3.3.3.3 21 +3.3.3.3/32 + Local binding: label: 22 + Remote bindings: + Peer Label + ----------------- --------- + 1.1.1.1 21 + 2.2.2.2 24003 + 3.3.3.3 imp-null +4.4.4.4/32 + Local binding: label: imp-null + Remote bindings: + Peer Label + ----------------- --------- + 1.1.1.1 22 + 2.2.2.2 24001 + 3.3.3.3 22 +10.0.1.0/24 + Local binding: label: 23 + Remote bindings: + Peer Label + ----------------- --------- + 1.1.1.1 imp-null + 2.2.2.2 imp-null + 3.3.3.3 23 +10.0.2.0/24 + Local binding: label: 24 + Remote bindings: + Peer Label + ----------------- --------- + 1.1.1.1 20 + 2.2.2.2 imp-null + 3.3.3.3 imp-null +10.0.3.0/24 + Local binding: label: imp-null + Remote bindings: + Peer Label + ----------------- --------- + 1.1.1.1 19 + 2.2.2.2 imp-null + 3.3.3.3 24 +10.0.4.0/24 + Local binding: label: imp-null + Remote bindings: + Peer Label + ----------------- --------- + 1.1.1.1 23 + 2.2.2.2 24002 + 3.3.3.3 imp-null +2:2:2::2/128 + Local binding: label: 18 + Remote bindings: + Peer Label + ----------------- --------- + 2.2.2.2 imp-null + 3.3.3.3 18 +3:3:3::3/128 + Local binding: label: 19 + Remote bindings: + Peer Label + ----------------- --------- + 2.2.2.2 24007 +4:4:4::4/128 + Local binding: label: imp-null + Remote bindings: + Peer Label + ----------------- --------- + 2.2.2.2 24006 + 3.3.3.3 19 +2001:db8:2::/64 + Local binding: label: - + Remote bindings: + Peer Label + ----------------- --------- + 2.2.2.2 imp-null + 3.3.3.3 imp-null +2001:db8:3::/64 + Local binding: label: imp-null + Remote bindings: + Peer Label + ----------------- --------- + 2.2.2.2 imp-null +2001:db8:4::/64 + Local binding: label: imp-null + Remote bindings: + Peer Label + ----------------- --------- + 3.3.3.3 imp-null +``` + +Check if the pseudowires are up: +``` +linux# show l2vpn atom vc +Interface Peer ID VC ID Name Status +--------- --------------- ---------- ---------------- ---------- +mpw1 3.3.3.3 100 ENG UP +mpw0 1.1.1.1 100 ENG UP +``` + +Check the label bindings of the pseudowires: +``` +linux# show l2vpn atom binding + Destination Address: 1.1.1.1, VC ID: 100 + Local Label: 25 + Cbit: 1, VC Type: Ethernet, GroupID: 0 + MTU: 1500 + Remote Label: 16 + Cbit: 1, VC Type: Ethernet, GroupID: 0 + MTU: 1500 + Destination Address: 3.3.3.3, VC ID: 100 + Local Label: 26 + Cbit: 1, VC Type: Ethernet, GroupID: 0 + MTU: 1500 + Remote Label: 26 + Cbit: 1, VC Type: Ethernet, GroupID: 0 + MTU: 1500 +``` + +## Verification - Data Plane + +Verify that all the exchanged label mappings were installed in zebra: +``` +linux# show mpls table + Inbound Outbound + Label Type Nexthop Label +-------- ------- --------------- -------- + 17 LDP 2001:db8:3::2 3 + 19 LDP 2001:db8:3::2 24005 + 20 LDP 10.0.3.2 24000 + 21 LDP 10.0.3.2 3 + 22 LDP 10.0.3.2 24001 + 23 LDP 10.0.3.2 3 + 24 LDP 10.0.3.2 3 + 25 LDP 10.0.3.2 3 + +linux# show ip route ldp +Codes: K - kernel route, C - connected, S - static, R - RIP, + O - OSPF, I - IS-IS, B - BGP, P - PIM, A - Babel, L - LDP, + > - selected route, * - FIB route + +L>* 1.1.1.1/32 [0/0] via 10.0.3.2, eth2 label 24000 +L>* 3.3.3.3/32 [0/0] via 10.0.3.2, eth2 label 24001 +``` + +Verify that all the exchanged label mappings were installed in the kernel: +``` +$ ip -M ro +17 via inet6 2001:db8:3::2 dev eth2 proto zebra +19 as to 24005 via inet6 2001:db8:3::2 dev eth2 proto zebra +20 as to 24000 via inet 10.0.3.2 dev eth2 proto zebra +21 via inet 10.0.3.2 dev eth2 proto zebra +22 as to 24001 via inet 10.0.3.2 dev eth2 proto zebra +23 via inet 10.0.3.2 dev eth2 proto zebra +24 via inet 10.0.3.2 dev eth2 proto zebra +25 via inet 10.0.3.2 dev eth2 proto zebra +$ +$ ip route | grep mpls +1.1.1.1 encap mpls 24000 via 10.0.3.2 dev eth2 proto zebra metric 20 +3.3.3.3 encap mpls 24001 via 10.0.3.2 dev eth2 proto zebra metric 20 +``` + +Now ping PE1's loopback using lo1's address as a source address: +``` +$ ping -c 5 -I 4.4.4.4 1.1.1.1 +PING 1.1.1.1 (1.1.1.1) from 4.4.4.4 : 56(84) bytes of data. +64 bytes from 1.1.1.1: icmp_seq=1 ttl=253 time=3.02 ms +64 bytes from 1.1.1.1: icmp_seq=2 ttl=253 time=3.13 ms +64 bytes from 1.1.1.1: icmp_seq=3 ttl=253 time=3.19 ms +64 bytes from 1.1.1.1: icmp_seq=4 ttl=253 time=3.07 ms +64 bytes from 1.1.1.1: icmp_seq=5 ttl=253 time=3.27 ms + +--- 1.1.1.1 ping statistics --- +5 packets transmitted, 5 received, 0% packet loss, time 4005ms +rtt min/avg/max/mdev = 3.022/3.140/3.278/0.096 ms +``` + +Verify that the ICMP echo request packets are leaving with the MPLS +label advertised by the P router. Also, verify that the ICMP echo reply +packets are arriving with an explicit-null MPLS label: +``` +# tcpdump -n -i eth2 mpls and icmp +tcpdump: verbose output suppressed, use -v or -vv for full protocol decode +listening on eth2, link-type EN10MB (Ethernet), capture size 262144 bytes +10:01:40.758771 MPLS (label 24000, exp 0, [S], ttl 64) IP 4.4.4.4 > 1.1.1.1: ICMP echo request, id 13370, seq 1, length 64 +10:01:40.761777 MPLS (label 0, exp 0, [S], ttl 254) IP 1.1.1.1 > 4.4.4.4: ICMP echo reply, id 13370, seq 1, length 64 +10:01:41.760343 MPLS (label 24000, exp 0, [S], ttl 64) IP 4.4.4.4 > 1.1.1.1: ICMP echo request, id 13370, seq 2, length 64 +10:01:41.763448 MPLS (label 0, exp 0, [S], ttl 254) IP 1.1.1.1 > 4.4.4.4: ICMP echo reply, id 13370, seq 2, length 64 +10:01:42.761758 MPLS (label 24000, exp 0, [S], ttl 64) IP 4.4.4.4 > 1.1.1.1: ICMP echo request, id 13370, seq 3, length 64 +10:01:42.764924 MPLS (label 0, exp 0, [S], ttl 254) IP 1.1.1.1 > 4.4.4.4: ICMP echo reply, id 13370, seq 3, length 64 +10:01:43.763193 MPLS (label 24000, exp 0, [S], ttl 64) IP 4.4.4.4 > 1.1.1.1: ICMP echo request, id 13370, seq 4, length 64 +10:01:43.766237 MPLS (label 0, exp 0, [S], ttl 254) IP 1.1.1.1 > 4.4.4.4: ICMP echo reply, id 13370, seq 4, length 64 +10:01:44.764552 MPLS (label 24000, exp 0, [S], ttl 64) IP 4.4.4.4 > 1.1.1.1: ICMP echo request, id 13370, seq 5, length 64 +10:01:44.767803 MPLS (label 0, exp 0, [S], ttl 254) IP 1.1.1.1 > 4.4.4.4: ICMP echo reply, id 13370, seq 5, length 64 +``` diff --git a/doc/ldpd.8 b/doc/ldpd.8 new file mode 100644 index 0000000000..092ff39d49 --- /dev/null +++ b/doc/ldpd.8 @@ -0,0 +1,109 @@ +.TH LDPD 8 "29 March 2016" "Quagga LDP daemon" "Version 1.0.20160309" +.SH NAME +ldpd \- an LDP engine for use with Quagga routing software. +.SH SYNOPSIS +.B ldpd +[ +.B \-dhv +] [ +.B \-f +.I config-file +] [ +.B \-i +.I pid-file +] [ +.B \-P +.I port-number +] [ +.B \-A +.I vty-address +] [ +.B \-u +.I user +] [ +.B \-g +.I group +] +.SH DESCRIPTION +.B ldpd +is a component that works with the +.B Quagga +routing engine. +.SH OPTIONS +Options available for the +.B ldpd +command: +.TP +\fB\-d\fR, \fB\-\-daemon\fR +Runs in daemon mode, forking and exiting from tty. +.TP +\fB\-f\fR, \fB\-\-config-file \fR\fIconfig-file\fR +Specifies the config file to use for startup. If not specified this +option will likely default to \fB\fI/usr/local/etc/ldpd.conf\fR. +.TP +\fB\-g\fR, \fB\-\-group \fR\fIgroup\fR +Specify the group to run as. Default is \fIquagga\fR. +.TP +\fB\-h\fR, \fB\-\-help\fR +A brief message. +.TP +\fB\-i\fR, \fB\-\-pid_file \fR\fIpid-file\fR +When ldpd starts its process identifier is written to +\fB\fIpid-file\fR. The init system uses the recorded PID to stop or +restart ldpd. The likely default is \fB\fI/var/run/ldpd.pid\fR. +.TP +\fB\-P\fR, \fB\-\-vty_port \fR\fIport-number\fR +Specify the port that the ldpd VTY will listen on. This defaults to +2612, as specified in \fB\fI/etc/services\fR. +.TP +\fB\-A\fR, \fB\-\-vty_addr \fR\fIvty-address\fR +Specify the address that the ldpd VTY will listen on. Default is all +interfaces. +.TP +\fB\-u\fR, \fB\-\-user \fR\fIuser\fR +Specify the user to run as. Default is \fIquagga\fR. +.TP +\fB\-v\fR, \fB\-\-version\fR +Print the version and exit. +.SH FILES +.TP +.BI /usr/local/sbin/ldpd +The default location of the +.B ldpd +binary. +.TP +.BI /usr/local/etc/ldpd.conf +The default location of the +.B ldpd +config file. +.TP +.BI $(PWD)/ldpd.log +If the +.B ldpd +process is config'd to output logs to a file, then you will find this +file in the directory where you started \fBldpd\fR. +.SH WARNING +This man page is intended to be a quick reference for command line +options. The definitive document is the Info file \fBQuagga\fR. +.SH DIAGNOSTICS +The ldpd process may log to standard output, to a VTY, to a log +file, or through syslog to the system logs. \fBldpd\fR supports many +debugging options, see the Info file, or the source for details. +.SH "SEE ALSO" +.BR bgpd (8), +.BR ripd (8), +.BR ripngd (8), +.BR ospfd (8), +.BR ospf6d (8), +.BR isisd (8), +.BR zebra (8), +.BR vtysh (1) +.SH BUGS +.B ldpd +eats bugs for breakfast. If you have food for the maintainers try +.BI http://bugzilla.quagga.net +.SH AUTHORS +See +.BI http://www.quagga.net +or the Info file for an accurate list of authors. + diff --git a/doc/main.texi b/doc/main.texi index 29ed17c82b..5302c9687d 100644 --- a/doc/main.texi +++ b/doc/main.texi @@ -407,8 +407,32 @@ routes that it may have picked up from the kernel. The existing interaction of zebra with the kernel remains unchanged -- that is, the kernel continues to receive FIB updates as before. -The format of the messages exchanged with the FPM is defined by the -file @file{fpm/fpm.h} in the quagga tree. +The encapsulation header for the messages exchanged with the FPM is +defined by the file @file{fpm/fpm.h} in the quagga tree. The routes +themselves are encoded in netlink or protobuf format, with netlink +being the default. + +Protobuf is one of a number of new serialization formats wherein the +message schema is expressed in a purpose-built language. Code for +encoding/decoding to/from the wire format is generated from the +schema. Protobuf messages can be extended easily while maintaining +backward-compatibility with older code. Protobuf has the following +advantages over netlink: + +@itemize +@item +Code for serialization/deserialization is generated +automatically. This reduces the likelihood of bugs, allows third-party +programs to be integrated quickly, and makes it easy to add fields. +@item +The message format is not tied to an OS (Linux), and can be evolved +independently. +@end itemize + +As mentioned before, zebra encodes routes sent to the FPM in netlink +format by default. The format can be controlled via the +@code{--fpm_format} command-line option to zebra, which currently +takes the values @code{netlink} and @code{protobuf}. The zebra FPM interface uses replace semantics. That is, if a 'route add' message for a prefix is followed by another 'route add' message, diff --git a/doc/ospfd.texi b/doc/ospfd.texi index 96dffe0000..7b9df8e78e 100644 --- a/doc/ospfd.texi +++ b/doc/ospfd.texi @@ -580,6 +580,10 @@ redistributed into OSPF (@pxref{OSPF redistribute}). @deffnx {OSPF Command} {no distance ospf} {} @end deffn +@deffn {Command} {router zebra} {} +@deffnx {Command} {no router zebra} {} +@end deffn + @node Showing OSPF information @section Showing OSPF information diff --git a/doc/quagga.texi b/doc/quagga.texi index 6831b30cdf..13b885b69c 100644 --- a/doc/quagga.texi +++ b/doc/quagga.texi @@ -1,5 +1,8 @@ \input texinfo @c -*- texinfo -*- +@c Set variables - sourced from defines.texi +@include defines.texi + @c %**start of header @setfilename quagga.info @c Set variables - sourced from defines.texi @@ -28,6 +31,7 @@ Permission is granted to copy and distribute translations of this manual into another language, under the above conditions for modified versions, except that this permission notice may be stated in a translation approved by Kunihiro Ishiguro. + @end quotation @end copying @@ -66,7 +70,7 @@ Version @value{VERSION}. @ifnottex @node Top -@top Quagga +@top Quagga -- With Virtual Network Control @uref{http://www.quagga.net,,Quagga} is an advanced routing software package that provides a suite of TCP/IP based routing protocols. This is the Manual @@ -88,6 +92,7 @@ for @value{PACKAGE_STRING}. @uref{http://www.quagga.net,,Quagga} is a fork of * ISIS:: * BGP:: * Configuring Quagga as a Route Server:: +* VNC and VNC-GW:: * VTY shell:: * Filtering:: * Route Map:: @@ -113,6 +118,7 @@ for @value{PACKAGE_STRING}. @uref{http://www.quagga.net,,Quagga} is a fork of @include isisd.texi @include bgpd.texi @include routeserver.texi +@include vnc.texi @include vtysh.texi @include filter.texi @include routemap.texi diff --git a/doc/routemap.texi b/doc/routemap.texi index b3ef7ca76f..33062a7f61 100644 --- a/doc/routemap.texi +++ b/doc/routemap.texi @@ -167,7 +167,7 @@ Set the BGP nexthop address. @end deffn @deffn {Route-map Command} {set local-preference @var{local_pref}} {} -Set the BGP local preference. +Set the BGP local preference to @var{local_pref}. @end deffn @deffn {Route-map Command} {set weight @var{weight}} {} diff --git a/doc/vnc.texi b/doc/vnc.texi new file mode 100644 index 0000000000..341cbfcce8 --- /dev/null +++ b/doc/vnc.texi @@ -0,0 +1,1584 @@ +@c -*-texinfo-*- +@c This is part of the Quagga Manual. +@c @value{COPYRIGHT_STR} +@c See file quagga.texi for copying conditions. + +@node VNC and VNC-GW +@chapter VNC and VNC-GW +This chapter describes how to use +Virtual Network Control (@acronym{VNC}) services, +including Network Virtualization Authority (@acronym{NVA}) and +VNC Gateway (@acronym{VNC-GW}) functions. +Background information on NVAs, +Network Virtualization Edges (@acronym{NVE}s), underlay networks (@acronym{UN}s), +and virtual networks (@acronym{VN}s) is available from the +@url{https://datatracker.ietf.org/wg/nvo3,IETF Network Virtualization Overlays (@acronym{NVO3}) Working Group}. +VNC Gateways (@acronym{VNC-GW}s) support the import/export of routing +information between VNC and customer edge routers (@acronym{CE}s) +operating within a VN. Both IP/Layer 3 (L3) VNs, and IP with +Ethernet/Layer 2 (L2) VNs are supported. + +BGP, with IP VPNs and Tunnel Encapsulation, is used to distribute VN +information between NVAs. BGP based IP VPN support is defined in +@cite{RFC4364, BGP/MPLS IP Virtual Private Networks (VPNs)}, and +@cite{RFC4659, BGP-MPLS IP Virtual Private Network (VPN) Extension for +IPv6 VPN }. Both the Encapsulation Subsequent Address Family Identifier +(SAFI) and the Tunnel Encapsulation Attribute, @cite{RFC5512, The BGP +Encapsulation Subsequent Address Family Identifier (SAFI) and the BGP +Tunnel Encapsulation Attribute}, are supported. + +The protocol that is used to communicate routing and Ethernet / Layer 2 +(L2) forwarding information between NVAs and NVEs is referred to as the +Remote Forwarder Protocol (RFP). @code{OpenFlow} is an example +RFP. Specific RFP implementations may choose to implement either a +@code{hard-state} or @code{soft-state} prefix and address registration +model. To support a @code{soft-state} refresh model, a @var{lifetime} +in seconds is associated with all registrations and responses. + +The chapter also provides sample configurations for basic example scenarios. + +@menu +* Configuring VNC Services:: +* Manual Address Control:: +* Other VNC-Related Commands:: +* Example VNC and VNC-GW Configurations:: +* Release Notes:: +@end menu + +@node Configuring VNC Services +@section Configuring VNC + +Virtual Network Control (@acronym{VNC}) service configuration commands +appear in the @code{router bgp} section of the BGPD configuration file +(@pxref{BGP Configuration Examples}). The commands are broken down into +the following areas: + +@menu +* General VNC Configuration:: +* RFP Related Configuration:: +* VNC Defaults Configuration:: +* VNC NVE Group Configuration:: +* VNC L2 Group Configuration:: +* Configuring Redistribution of Routes from Other Routing Protocols:: +* Configuring Export of Routes to Other Routing Protocols:: +@end menu + +@code{General VNC} configuration applies to general VNC operation and is +primarily used to control the method used to advertise tunnel +information. + +@code{Remote Forwarder Protocol (RFP)} configuration relates to the +protocol used between NVAs and NVEs. + +@code{VNC Defaults} provides default parameters for registered NVEs. + +@code{VNC NVE Group} provides for configuration of a specific set of +registered NVEs and overrides default parameters. + +@code{Redistribution} and @code{Export} control VNC-GW operation, i.e., +the import/export of routing +information between VNC and customer edge routers (@acronym{CE}s) +operating within a VN. + +@node General VNC Configuration +@subsection General VNC Configuration + +@deffn {VNC} {vnc advertise-un-method encap-safi|encap-attr} {} +Advertise NVE underlay-network IP addresses using the encapsulation SAFI +(@code{encap-safi}) or the UN address sub-TLV of the Tunnel Encapsulation attribute +(@code{encap-attr}). When @code{encap-safi} is used, neighbors under +@code{address-family encap} and/or @code{address-family encapv6} must be +configured. The default is @code{encap-attr}. +@end deffn + +@node RFP Related Configuration +@subsection RFP Related Configuration + +The protocol that is used to communicate routing and Ethernet / L2 +forwarding information between NVAs and NVEs is referred to as the +Remote Forwarder Protocol (RFP). Currently, only a simple example RFP +is included in Quagga. Developers may use this example as a starting +point to integrate Quagga with an RFP of their choosing, e.g., +@code{OpenFlow}. The example code includes the following sample +configuration: + +@deffn {RFP} {rfp example-config-value @var{VALUE}} +This is a simple example configuration parameter included as part of the +RFP example code. @code{VALUE} must be in the range of 0 to 4294967295. +@end deffn + +@node VNC Defaults Configuration +@subsection VNC Defaults Configuration + +The VNC Defaults section allows the user to specify default values for +configuration parameters for all registered NVEs. +Default values are overridden by @ref{VNC NVE Group Configuration}. + +@deffn {VNC} {vnc defaults} {} +Enter VNC configuration mode for specifying VNC default behaviors. Use +@code{exit-vnc} to leave VNC configuration mode. @code{vnc +defaults} is optional. + +@example +vnc defaults + ... various VNC defaults +exit-vnc +@end example +@end deffn + +These are the statements that can appear between @code{vnc defaults} +and @code{exit-vnc}. + +@deffn {VNC} {rt import @var{rt-list}} {} +@deffnx {VNC} {rt export @var{rt-list}} {} +@deffnx {VNC} {rt both @var{rt-list}} {} + +Specify default route target import and export lists. @var{rt-list} is a +space-separated list of route targets, each element of which is +in one of the following forms: +@itemize +@item @var{IPv4-address}:@var{two-byte-integer} +@item @var{four-byte-autonomous-system-number}:@var{two-byte-integer} +@item @var{two-byte-autonomous-system-number}:@var{four-byte-integer} +@end itemize + +If no default import RT list is specified, then the default import RT +list is empty. +If no default export RT list is specified, then the default export RT +list is empty. + +A complete definition of these parameters is +given below (@pxref{VNC NVE Group Configuration}). + +@end deffn + +@deffn {VNC} {rd @var{route-distinguisher}} + +Specify the default route distinguisher (RD) for routes advertised via BGP +VPNs. The route distinguisher must be in one of four forms: +@itemize +@item @var{IPv4-address}:@var{two-byte-integer} +@item @var{four-byte-autonomous-system-number}:@var{two-byte-integer} +@item @var{two-byte-autonomous-system-number}:@var{four-byte-integer} +@item auto:vn:@var{two-byte-integer} +@end itemize + +If RD is specified in the defaults section, the default RD +value is @var{two-byte-autonomous-system-number=0}:@var{four-byte-integer=0}. + +A complete definition of this parameter is +given below (@pxref{VNC NVE Group Configuration}). +@end deffn + +@deffn {VNC} {l2rd @var{nve-id-value}} +Set the value used to distinguish NVEs connected to the same logical +Ethernet segment (i.e., L2VPN). + +A complete definition of this parameter is +given below (@pxref{VNC NVE Group Configuration}). +@end deffn + +@deffn {VNC} {response-lifetime @var{lifetime}|infinite} {} +Specify the default lifetime to be included in RFP +response messages sent to NVEs. + +A complete definition of this parameter is +given below (@pxref{VNC NVE Group Configuration}). + +@end deffn + +@deffn {VNC} {export bgp|zebra route-map MAP-NAME} +Specify that the named route-map should be applied to routes +being exported to bgp or zebra. +@end deffn + +@deffn {VNC} {export bgp|zebra no route-map} +Specify that no route-map should be applied to routes +being exported to bgp or zebra. +@end deffn + +@deffn {VNC} {export bgp|zebra ipv4|ipv6 prefix-list LIST-NAME} +Specify that the named prefix-list filter should be applied to +routes being exported to bgp or zebra. +Prefix-lists for ipv4 and ipv6 are independent of each other. +@end deffn + +@deffn {VNC} {export bgp|zebra no ipv4|ipv6 prefix-list} +Specify that no prefix-list filter should be applied to +routes being exported to bgp or zebra. +@end deffn + +@deffn {VNC} {exit-vnc} {} +Exit VNC configuration mode. +@end deffn + +@c The following example @code{vnc defaults} defines a route target import-export +@c list for the route targets 1000:1 and 1000:2; a default route +@c distinguisher, 4444:10; and a default response lifetime of 500 +@c seconds. +@c +@c @example +@c vnc defaults +@c rt both 1000:1 1000:2 +@c rd 4444:10 +@c response-lifetime 500 +@c exit-vnc +@c @end example + +@node VNC NVE Group Configuration +@subsection VNC NVE Group Configuration + +A NVE Group corresponds to a specific set of NVEs. A Client NVE is +assigned to an NVE Group based on whether there is a match for either +its virtual or underlay network address against the VN and/or UN address +prefixes specified in the NVE Group definition. When an NVE Group +definition specifies both VN and UN address prefixes, then an NVE must +match both prefixes in order to be assigned to the NVE Group. In the +event that multiple NVE Groups match based on VN and/or UN addresses, +the NVE is assigned to the first NVE Group listed in the configuration. +If an NVE is not assigned to an NVE Group, its messages will be ignored. + +Configuration values specified for an NVE group apply to all +member NVEs and override configuration values specified in the VNC +Defaults section. + +@strong{At least one @code{nve-group} is mandatory for useful VNC +operation.} + +@deffn {VNC} {vnc nve-group @var{name}} {} +Enter VNC configuration mode for defining the NVE group @var{name}. +Use @code{exit} or @code{exit-vnc} to exit group configuration mode. + +@example +vnc nve-group group1 + ... configuration commands +exit-vnc +@end example +@end deffn + +@deffn {VNC} {no vnc nve-group @var{name}} {} +Delete the NVE group named @var{name}. +@end deffn + +The following statements are valid in an NVE group definition: + +@deffn {VNC} {l2rd @var{nve-id-value}} +Set the value used to distinguish NVEs connected to the same physical +Ethernet segment (i.e., at the same location)@footnote{The nve-id is +carried in the route +distinguisher. It is the second octet of the eight-octet route +distinguisher generated for Ethernet / L2 advertisements. +The first octet is a constant 0xFF, and the third through eighth +octets are set to the L2 ethernet address being advertised.} + +The nve-id subfield may be specified as either a literal value +in the range 1-255, or it may be specified as @code{auto:vn}, which +means to use the least-significant octet of the originating +NVE's VN address. +@end deffn + +@deffn {VNC} {prefix vn|un A.B.C.D/M|X:X::X:X/M} {} +@anchor{prefix} +Specify the matching prefix for this NVE group by either virtual-network address +(@code{vn}) or underlay-network address (@code{un}). Either or both virtual-network +and underlay-network prefixes may be specified. Subsequent virtual-network or +underlay-network values within a @code{vnc nve-group} @code{exit-vnc} +block override their respective previous values. + +These prefixes are used only for determining assignments of NVEs +to NVE Groups. +@end deffn + +@deffn {VNC} {rd @var{route-distinguisher}} +Specify the route distinguisher for routes advertised via BGP +VPNs. The route distinguisher must be in one of these forms: +@itemize +@item @var{IPv4-address}:@var{two-byte-integer} +@item @var{four-byte-autonomous-system-number}:@var{two-byte-integer} +@item @var{two-byte-autonomous-system-number}:@var{four-byte-integer} +@item auto:vn:@var{two-byte-integer} +@end itemize + +Routes originated by NVEs in the NVE group will use +the group's specified @var{route-distinguisher} when they are +advertised via BGP. +If the @code{auto} form is specified, it means that a matching NVE has +its RD set to +@var{rd_type=IP=1}:@var{IPv4-address=VN-address}:@var{two-byte-integer}, +for IPv4 VN addresses and +@var{rd_type=IP=1}:@var{IPv4-address=Last-four-bytes-of-VN-address}:@var{two-byte-integer}, +for IPv6 VN addresses. + +If the NVE group definition does not specify a @var{route-distinguisher}, +then the default @var{route-distinguisher} is used. +If neither a group nor a default @var{route-distinguisher} is +configured, then the advertised RD is set to +@var{two-byte-autonomous-system-number=0}:@var{four-byte-integer=0}. +@end deffn + +@deffn {VNC} {response-lifetime @var{lifetime}|infinite} {} +Specify the response lifetime, in seconds, to be included in RFP +response messages sent to NVEs. If the value +``infinite'' is given, an infinite lifetime will be used. + +Note that this parameter is not the same as the lifetime supplied by +NVEs in RFP registration messages. This parameter does not affect +the lifetime value attached to routes sent by this server via BGP. + +If the NVE group definition does not specify a @var{response-lifetime}, +the default @var{response-lifetime} will be used. +If neither a group nor a default @var{response-lifetime} is configured, +the value 3600 will be used. The maximum response lifetime is 2147483647. +@end deffn + +@deffn {VNC} {rt export @var{rt-list}} {} +@deffnx {VNC} {rt import @var{rt-list}} {} +@deffnx {VNC} {rt both @var{rt-list}} {} +Specify route target import and export lists. @var{rt-list} is a +space-separated list of route targets, each element of which is +in one of the following forms: +@itemize +@item @var{IPv4-address}:@var{two-byte-integer} +@item @var{four-byte-autonomous-system-number}:@var{two-byte-integer} +@item @var{two-byte-autonomous-system-number}:@var{four-byte-integer} +@end itemize + +The first form, @code{rt export}, specifies an @var{export rt-list}. +The @var{export rt-list} will be attached to routes originated by +NVEs in the NVE group when they are advertised via BGP. +If the NVE group definition does not specify an @var{export rt-list}, +then the default @var{export rt-list} is used. +If neither a group nor a default @var{export rt-list} is configured, +then no RT list will be sent; in turn, these routes will probably +not be processed +by receiving NVAs. + +The second form, @code{rt import} specifies an @var{import rt-list}, +which is a filter for incoming routes. +In order to be made available to NVEs in the group, +incoming BGP VPN and @w{ENCAP} @w{SAFI} (when @code{vnc +advertise-un-method encap-safi} is set) routes must have +RT lists that have at least one route target in common with the +group's @var{import rt-list}. + +If the NVE group definition does not specify an import filter, +then the default @var{import rt-list} is used. +If neither a group nor a default @var{import rt-list} is configured, +there can be no RT intersections when receiving BGP routes and +therefore no incoming BGP routes will be processed for the group. + +The third, @code{rt both}, is a shorthand way of specifying both +lists simultaneously, and is equivalent to @code{rt export @var{rt-list}} +followed by @code{rt import @var{rt-list}}. +@end deffn + +@deffn {VNC} {export bgp|zebra route-map MAP-NAME} +Specify that the named route-map should be applied to routes +being exported to bgp or zebra. +This paramter is used in conjunction with +@ref{Configuring Export of Routes to Other Routing Protocols}. +This item is optional. +@end deffn + +@deffn {VNC} {export bgp|zebra no route-map} +Specify that no route-map should be applied to routes +being exported to bgp or zebra. +This paramter is used in conjunction with +@ref{Configuring Export of Routes to Other Routing Protocols}. +This item is optional. +@end deffn + +@deffn {VNC} {export bgp|zebra ipv4|ipv6 prefix-list LIST-NAME} +Specify that the named prefix-list filter should be applied to +routes being exported to bgp or zebra. +Prefix-lists for ipv4 and ipv6 are independent of each other. +This paramter is used in conjunction with +@ref{Configuring Export of Routes to Other Routing Protocols}. +This item is optional. +@end deffn + +@deffn {VNC} {export bgp|zebra no ipv4|ipv6 prefix-list} +Specify that no prefix-list filter should be applied to +routes being exported to bgp or zebra. +This paramter is used in conjunction with +@ref{Configuring Export of Routes to Other Routing Protocols}. +This item is optional. +@end deffn + +@c The following example shows two @code{vnc nve-group} definitions. The first one, +@c ``group1'', applies to the IPV4 virtual-network route prefix 172.16/16. It +@c sets the response lifetime to 200 seconds. It defines a route target +@c import-export filter for the route targets 1000:1 and 1000:2 +@c +@c The second @code{vnc nve-group} definition, ``group2'', applies to the IPV6 +@c underlay-network route prefix 10.0.2/24. It defines the same response +@c lifetime and import-export filter as ``group1''. +@c +@c @example +@c vnc nve-group group1 +@c prefix vn 172.16/16 +@c response-lifetime 200 +@c rt both 1000:1 1000:2 +@c exit-vnc +@c +@c vnc nve-group group2 +@c prefix un 10.0.2/24 +@c response-lifetime 200 +@c rt both 1000:1 1000:2 +@c exit-vnc +@c @end example + +@node VNC L2 Group Configuration +@subsection VNC L2 Group Configuration + +The route targets advertised with prefixes and addresses registered by +an NVE are determined based on the NVE's associated VNC NVE Group +Configuration, @pxref{VNC NVE Group Configuration}. Layer 2 (L2) Groups +are used to override the route targets for an NVE's Ethernet +registrations based on the Logical Network Identifier and label value. +A Logical Network Identifier is used to uniquely identify a logical +Ethernet segment and is conceptually similar to the Ethernet Segment +Identifier defined in @cite{RFC7432, BGP MPLS-Based Ethernet VPN}. Both +the Logical Network Identifier and Label are passed to VNC via RFP +prefix and address registration. + +Note that a corresponding NVE group configuration must be present, and +that other NVE associated configuration information, notably RD, is +not impacted by L2 Group Configuration. + +@deffn {VNC} {vnc l2-group @var{name}} {} +Enter VNC configuration mode for defining the L2 group @var{name}. +Use @code{exit} or @code{exit-vnc} to exit group configuration mode. + +@example +vnc l2-group group1 + ... configuration commands +exit-vnc +@end example +@end deffn + +@deffn {VNC} {no vnc l2-group @var{name}} {} +Delete the L2 group named @var{name}. +@end deffn + +The following statements are valid in a L2 group definition: + +@deffn {VNC} {logical-network-id @var{VALUE}} +Define the Logical Network Identifier with a value in the range of +0-4294967295 that identifies the logical Ethernet segment. +@end deffn + +@deffn {VNC} {labels @var{label-list}} +@deffnx {VNC} {no labels @var{label-list}} +Add or remove labels associated with the group. @var{label-list} is a +space separated list of label values in the range of 0-1048575. +@end deffn + +@deffn {VNC} {rt import @var{rt-target}} {} +@deffnx {VNC} {rt export @var{rt-target}} {} +@deffnx {VNC} {rt both @var{rt-target}} {} +Specify the route target import and export value associated with the +group. A complete definition of these parameters is given above, +@pxref{VNC NVE Group Configuration}. +@end deffn + + +@node Configuring Redistribution of Routes from Other Routing Protocols +@subsection Configuring Redistribution of Routes from Other Routing Protocols + +Routes from other protocols (including BGP) can be provided to VNC (both +for RFP and for redistribution via BGP) +from three sources: the zebra kernel routing process; +directly from the main (default) unicast BGP RIB; or directly +from a designated BGP unicast exterior routing RIB instance. + +The protocol named in the @code{vnc redistribute} command indicates +the route source: +@code{bgp-direct} routes come directly from the main (default) +unicast BGP RIB and are available for RFP and are redistributed via BGP; +@code{bgp-direct-to-nve-groups} routes come directly from a designated +BGP unicast routing RIB and are made available only to RFP; +and routes from other protocols come from the zebra kernel +routing process. +Note that the zebra process does not need to be active if +only @code{bgp-direct} or @code{bgp-direct-to-nve-groups} routes are used. + +@subsubsection @code{zebra} routes + +Routes originating from protocols other than BGP must be obtained +via the zebra routing process. +Redistribution of these routes into VNC does not support policy mechanisms +such as prefix-lists or route-maps. + +@subsubsection @code{bgp-direct} routes + +@code{bgp-direct} redistribution supports policy via +prefix lists and route-maps. This policy is applied to incoming +original unicast routes before the redistribution translations +(described below) are performed. + +Redistribution of @code{bgp-direct} routes is performed in one of three +possible modes: @code{plain}, @code{nve-group}, or @code{resolve-nve}. +The default mode is @code{plain}. +These modes indicate the kind of translations applied to routes before +they are added to the VNC RIB. + +In @code{plain} mode, the route's next hop is unchanged and the RD is set +based on the next hop. +For @code{bgp-direct} redistribution, the following translations are performed: +@itemize @bullet +@item +The VN address is set to the original unicast route's next hop address. +@item +The UN address is NOT set. (VN->UN mapping will occur via +ENCAP route or attribute, based on @code{vnc advertise-un-method} +setting, generated by the RFP registration of the actual NVE) +@item +The RD is set to as if auto:vn:0 were specified (i.e., +@var{rd_type=IP=1}:@var{IPv4-address=VN-address}:@var{two-byte-integer=0}) +@item +The RT list is included in the extended community list copied from the +original unicast route (i.e., it must be set in the original unicast route). +@end itemize + + + +In @code{nve-group} mode, routes are registered with VNC as +if they came from an NVE in the nve-group designated in the +@code{vnc redistribute nve-group} command. The following +translations are performed: + +@itemize @bullet +@item +The next hop/VN address is set to the VN prefix configured for the +redistribute nve-group. +@item +The UN address is set to the UN prefix configured for the +redistribute nve-group. +@item +The RD is set to the RD configured for the redistribute nve-group. +@item +The RT list is set to the RT list configured for the redistribute nve-group. +If @code{bgp-direct} routes are being redistributed, +any extended communities present in the original unicast route +will also be included. +@end itemize + + +In @code{resolve-nve} mode, the next hop of the original BGP route is +typically the address of an NVE connected router (CE) connected by one or +more NVEs. +Each of the connected NVEs will register, via RFP, a VNC host route +to the CE. +This mode may be though of as a mechanism to proxy RFP registrations +of BGP unicast routes on behalf of registering NVEs. + +Multiple copies of the BGP route, one per matching NVE host route, will be +added to VNC. +In other words, for a given BGP unicast route, each instance of a +RFP-registered host route to the unicast route's next hop will result +in an instance of an imported VNC route. +Each such imported VNC route will have a prefix equal to the original +BGP unicast route's prefix, and a next hop equal to the next hop of the +matching RFP-registered host route. +If there is no RFP-registered host route to the next hop of the BGP unicast +route, no corresponding VNC route will be imported. + +The following translations are applied: + +@itemize @bullet +@item +The Next Hop is set to the next hop of the NVE route (i.e., the +VN address of the NVE). + +@item +The extended community list in the new route is set to the +union of: +@itemize @minus +@item +Any extended communities in the original BGP route +@item +Any extended communities in the NVE route +@item +An added route-origin extended community with the next hop of the +original BGP route +is added to the new route. +The value of the local administrator field defaults 5226 but may +be configured by the user via the @code{roo-ec-local-admin} parameter. +@end itemize + +@item +The Tunnel Encapsulation attribute is set to the value of the Tunnel +Encapsulation attribute of the NVE route, if any. + +@end itemize + +@subsubsection @code{bgp-direct-to-nve-groups} routes + +Unicast routes from the main or a designated instance of BGP +may be redistributed to VNC as bgp-direct-to-nve-groups routes. These +routes are NOT announced via BGP, +but they are made available for local RFP lookup in response to +queries from NVEs. + +A non-main/default BGP instance is configured using the +@code{bgp multiple-instance} and @code{router bgp AS view NAME} +commands as described elsewhere in this document. + +In order for a route in the unicast BGP RIB to be made +available to a querying NVE, there must already be, available to +that NVE, an (interior) VNC route matching the next hop address +of the unicast route. +When the unicast route is provided to the NVE, its next hop +is replaced by the next hop of the corresponding +NVE. If there are multiple longest-prefix-match VNC routes, +the unicast route will be replicated for each. + +There is currently no policy (prefix-list or route-map) support +for @code{bgp-direct-to-nve-groups} routes. + +@subsubsection Redistribution Command Syntax + +@deffn {VNC} {vnc redistribute ipv4|ipv6 bgp|bgp-direct|ipv6 bgp-direct-to-nve-groups|connected|kernel|ospf|rip|static} {} +@deffnx {VNC} {vnc redistribute ipv4|ipv6 bgp-direct-to-nve-groups view @var{VIEWNAME}} {} +@deffnx {VNC} {no vnc redistribute ipv4|ipv6 bgp|bgp-direct|bgp-direct-to-nve-groups|connected|kernel|ospf|rip|static} {} +Import (or do not import) prefixes from another routing +protocols. Specify both the address family to import (@code{ipv4} or +@code{ipv6}) and the protocol (@code{bgp}, @code{bgp-direct}, +@code{bgp-direct-to-nve-groups}, @code{connected}, +@code{kernel}, @code{ospf}, @code{rip}, or @code{static}). Repeat +this statement as needed for each combination of address family and +routing protocol. +Prefixes from protocol @code{bgp-direct} are imported from unicast BGP +in the same bgpd process. +Prefixes from all other protocols (including @code{bgp}) are imported +via the @code{zebra} kernel routing process. +@end deffn + +@deffn {VNC} {vnc redistribute mode plain|nve-group|resolve-nve} +Redistribute routes from other protocols into VNC using the +specified mode. +Not all combinations of modes and protocols are supported. +@end deffn + +@deffn {VNC} {vnc redistribute nve-group @var{group-name}} {} +@deffnx {VNC} {no vnc redistribute nve-group @var{group-name}} {} +When using @code{nve-group} mode, +assign (or do not assign) the NVE group @var{group-name} to routes +redistributed from another routing protocol. @var{group-name} +must be configured using @code{vnc nve-group}. + +The VN and UN prefixes of the nve-group must both be configured, +and each prefix must be specified as a full-length (/32 for IPv4, +/128 for IPv6) prefix. +@end deffn + +@deffn {VNC} {vnc redistribute lifetime @var{lifetime}|infinite} {} +Assign a registration lifetime, either @var{lifetime} seconds or +@code{infinite}, to prefixes redistributed from other routing +protocols as if they had been received via RFP registration messages +from an NVE. @var{lifetime} can be any integer between 1 and +4294967295, inclusive. +@end deffn + +@deffn {VNC} {vnc redistribute resolve-nve roo-ec-local-admin @var{0-65536}} +Assign a value to the local-administrator subfield used in the +Route Origin extended community that is assigned to routes exported +under the @code{resolve-nve} mode. The default value is @var{5226}. +@end deffn + +The following four @code{prefix-list} and @code{route-map} commands +may be specified in the context of an nve-group or not. +If they are specified in the context of an nve-group, they +apply only if the redistribution mode is @code{nve-group}, +and then only for routes being redistributed from +@code{bgp-direct}. +If they are specified outside the context of an nve-group, then +they apply only for redistribution modes @code{plain} and @code{resolve-nve}, +and then only for routes being redistributed from @code{bgp-direct}. + +@deffn {VNC} {vnc redistribute bgp-direct (ipv4|ipv6) prefix-list @var{LIST-NAME}} +When redistributing @code{bgp-direct} routes, +specifies that the named prefix-list should be applied. +@end deffn + +@deffn {VNC} {vnc redistribute bgp-direct no (ipv4|ipv6) prefix-list} +When redistributing @code{bgp-direct} routes, +specifies that no prefix-list should be applied. +@end deffn + +@deffn {VNC} {vnc redistribute bgp-direct route-map @var{MAP-NAME}} +When redistributing @code{bgp-direct} routes, +specifies that the named route-map should be applied. +@end deffn + +@deffn {VNC} {vnc redistribute bgp-direct no route-map} +When redistributing @code{bgp-direct} routes, +specifies that no route-map should be applied. +@end deffn + +@node Configuring Export of Routes to Other Routing Protocols +@subsection Configuring Export of Routes to Other Routing Protocols + +Routes from VNC (both for RFP and for redistribution via BGP) can be +provided to other protocols, either via zebra or directly to BGP. + +It is important to note that when exporting routes to other protocols, +the downstream protocol must also be configured to import the routes. +For example, when VNC routes are exported to unicast BGP, the BGP +configuration must include a corresponding @code{redistribute vpn} +statement. + +@deffn {VNC} {export bgp|zebra mode none|group-nve|registering-nve|ce} +Specify how routes should be exported to bgp or zebra. +If the mode is @code{none}, routes are not exported. +If the mode is @code{group-nve}, routes are exported according +to nve-group configuration (@pxref{VNC NVE Group Configuration}): if a group is configured to +allow export, then each prefix visible to the group is exported +with next hops set to the currently-registered NVEs. +If the mode is @code{registering-nve}, then all VNC routes are +exported with their original next hops. +If the mode is @code{ce}, only VNC routes that have an NVE connected CE Router +encoded in a Route Origin Extended Community are exported. +This extended community must have an administrative value that +matches the configured @code{roo-ec-local-admin} value. +The next hop of the exported route is set to the encoded +NVE connected CE Router. + +The default for both bgp and zebra is mode @code{none}. +@end deffn + +@deffn {VNC} {vnc export bgp|zebra group-nve group @var{group-name}} +@deffnx {VNC} {vnc export bgp|zebra group-nve no group @var{group-name}} +When export mode is @code{group-nve}, +export (or do not export) prefixes from the specified nve-group +to unicast BGP or to zebra. +Repeat this statement as needed for each nve-group to be exported. +Each VNC prefix that is exported will result in N exported routes to the +prefix, each with a next hop corresponding to one of the N NVEs currently +associated with the nve-group. +@end deffn + +@deffn {VNC} export bgp|zebra ipv4|ipv6 prefix-list LIST-NAME +When export mode is @code{ce} or @code{registering-nve}, +specifies that the named prefix-list should be applied to routes +being exported to bgp or zebra. +Prefix-lists for ipv4 and ipv6 are independent of each other. +@end deffn + +@deffn {VNC} export bgp|zebra no ipv4|ipv6 prefix-list +When export mode is @code{ce} or @code{registering-nve}, +specifies that no prefix-list should be applied to routes +being exported to bgp or zebra. +@end deffn + +@deffn {VNC} export bgp|zebra route-map MAP-NAME +When export mode is @code{ce} or @code{registering-nve}, +specifies that the named route-map should be applied to routes +being exported to bgp or zebra. +@end deffn + +@deffn {VNC} export bgp|zebra no route-map +When export mode is @code{ce} or @code{registering-nve}, +specifies that no route-map should be applied to routes +being exported to bgp or zebra. +@end deffn + +When the export mode is @code{group-nve}, policy for exported +routes is specified per-NVE-group inside a @code{nve-group} @var{RFG-NAME} block +via the following commands(@pxref{VNC NVE Group Configuration}): + +@deffn {VNC} {export bgp|zebra route-map MAP-NAME} +This command is valid inside a @code{nve-group} @var{RFG-NAME} block. +It specifies that the named route-map should be applied to routes +being exported to bgp or zebra. +@end deffn + +@deffn {VNC} {export bgp|zebra no route-map} +This command is valid inside a @code{nve-group} @var{RFG-NAME} block. +It specifies that no route-map should be applied to routes +being exported to bgp or zebra. +@end deffn + +@deffn {VNC} {export bgp|zebra ipv4|ipv6 prefix-list LIST-NAME} +This command is valid inside a @code{nve-group} @var{RFG-NAME} block. +It specifies that the named prefix-list filter should be applied to +routes being exported to bgp or zebra. +Prefix-lists for ipv4 and ipv6 are independent of each other. +@end deffn + +@deffn {VNC} {export bgp|zebra no ipv4|ipv6 prefix-list} +This command is valid inside a @code{nve-group} @var{RFG-NAME} block. +It specifies that no prefix-list filter should be applied to +routes being exported to bgp or zebra. +@end deffn + +@node Manual Address Control +@section Manual Address Control + +The commands in this section can be used to augment normal dynamic VNC. +The @code{add vnc} commands can be used to manually add IP prefix or +Ethernet MAC address forwarding information. The @code{clear vnc} +commands can be used to remove manually and dynamically added +information. + +@deffn {Command} {add vnc prefix (A.B.C.D/M|X:X::X:X/M) vn (A.B.C.D|X:X::X:X) un (A.B.C.D|X:X::X:X) [cost <0-255>] [lifetime (infinite|<1-4294967295>)] [local-next-hop (A.B.C.D|X:X::X:X) [local-cost <0-255>]]} {} +Register an IP prefix on behalf of the NVE identified by the VN and UN +addresses. The @code{cost} parameter provides the administrative +preference of the forwarding information for remote advertisement. If +omitted, it defaults to 255 (lowest preference). The @code{lifetime} +parameter identifies the period, in seconds, that the information +remains valid. If omitted, it defaults to @var{infinite}. The optional +@code{local-next-hop} parameter is used to configure a nexthop to be +used by an NVE to reach the prefix via a locally connected CE router. +This information remains local to the NVA, i.e., not passed to other +NVAs, and is only passed to registered NVEs. When specified, it is also +possible to provide a @code{local-cost} parameter to provide a +forwarding preference. If omitted, it defaults to 255 (lowest +preference). +@end deffn + + +@deffn {Command} {add vnc mac xx:xx:xx:xx:xx:xx virtual-network-identifier <1-4294967295> vn (A.B.C.D|X:X::X:X) un (A.B.C.D|X:X::X:X) [prefix (A.B.C.D/M|X:X::X:X/M)] [cost <0-255>] [lifetime (infinite|<1-4294967295>)]} {} +Register a MAC address for a logical Ethernet (L2VPN) on behalf of the +NVE identified by the VN and UN addresses. +The optional @code{prefix} parameter is to support enable IP address +mediation for the given prefix. The @code{cost} parameter provides the administrative +preference of the forwarding information. If omitted, it defaults to +255. The @code{lifetime} parameter identifies the period, in seconds, +that the information remains valid. If omitted, it defaults to +@var{infinite}. +@end deffn + +@deffn {Command} {clear vnc prefix (*|A.B.C.D/M|X:X::X:X/M) (*|[(vn|un) (A.B.C.D|X:X::X:X|*) [(un|vn) (A.B.C.D|X:X::X:X|*)] [mac xx:xx:xx:xx:xx:xx] [local-next-hop (A.B.C.D|X:X::X:X)])} {} +Delete the information identified by prefix, VN address, and UN address. +Any or all of these parameters may be wilcarded to (potentially) match +more than one registration. +The optional @code{mac} parameter specifies a layer-2 MAC address +that must match the registration(s) to be deleted. +The optional @code{local-next-hop} parameter is used to +delete specific local nexthop information. +@end deffn + +@deffn {Command} {clear vnc mac (*|xx:xx:xx:xx:xx:xx) virtual-network-identifier (*|<1-4294967295>) (*|[(vn|un) (A.B.C.D|X:X::X:X|*) [(un|vn) (A.B.C.D|X:X::X:X|*)] [prefix (*|A.B.C.D/M|X:X::X:X/M)])} {} +Delete mac forwarding information. +Any or all of these parameters may be wilcarded to (potentially) match +more than one registration. +The default value for the @code{prefix} parameter is the wildcard value @var{*}. +@end deffn + +@deffn {Command} {clear vnc nve (*|((vn|un) (A.B.C.D|X:X::X:X) [(un|vn) (A.B.C.D|X:X::X:X)])) } {} +Delete prefixes associated with the NVE specified by the given VN and UN +addresses. +It is permissible to specify only one of VN or UN, in which case +any matching registration will be deleted. +It is also permissible to specify @code{*} in lieu of any VN or UN +address, in which case all registrations will match. +@end deffn + +@node Other VNC-Related Commands +@section Other VNC-Related Commands + +Note: VNC-Related configuration can be obtained via the @code{show +running-configuration} command when in @code{enable} mode. + +The following commands are used to clear and display +Virtual Network Control related information: + +@deffn {COMMAND} {clear vnc counters} {} +Reset the counter values stored by the NVA. Counter +values can be seen using the @code{show vnc} commands listed above. This +command is only available in @code{enable} mode. +@end deffn + +@deffn {Command} {show vnc summary} {} +Print counter values and other general information +about the NVA. Counter values can be reset +using the @code{clear vnc counters} command listed below. +@end deffn + +@deffn {Command} {show vnc nves} {} +@deffnx {Command} {show vnc nves vn|un @var{address}} {} +Display the NVA's current clients. Specifying @var{address} +limits the output to the NVEs whose addresses match @var{address}. +The time since the NVA last communicated with the NVE, per-NVE +summary counters and each NVE's addresses will be displayed. +@end deffn + +@deffn {Command} {show vnc queries} {} +@deffnx {Command} {show vnc queries @var{prefix}} {} +Display active Query information. Queries remain valid for the default +Response Lifetime (@pxref{VNC Defaults Configuration}) or NVE-group +Response Lifetime (@pxref{VNC NVE Group Configuration}). Specifying +@var{prefix} limits the output to Query Targets that fall within +@var{prefix}. + +Query information is provided for each querying NVE, and includes the +Query Target and the time remaining before the information is removed. +@end deffn + +@deffn {Command} {show vnc registrations [all|local|remote|holddown|imported]} {} +@deffnx {Command} {show vnc registrations [all|local|remote|holddown|imported] @var{prefix}} {} +Display local, remote, holddown, and/or imported registration information. +Local registrations are routes received via RFP, which are present in the +NVA Registrations Cache. +Remote registrations are routes received via BGP (VPN SAFIs), which +are present in the NVE-group import tables. +Holddown registrations are local and remote routes that have been +withdrawn but whose holddown timeouts have not yet elapsed. +Imported information represents routes that are imported into NVA and +are made available to querying NVEs. Depending on configuration, +imported routes may also be advertised via BGP. +Specifying @var{prefix} limits the output to the registered prefixes that +fall within @var{prefix}. + +Registration information includes the registered prefix, the registering +NVE addresses, the registered administrative cost, the registration +lifetime and the time since the information was registered or, in the +case of Holddown registrations, the amount of time remaining before the +information is removed. +@end deffn + +@deffn {Command} {show vnc responses [active|removed]} {} +@deffnx {Command} {show vnc responses [active|removed] @var{prefix}} {} +Display all, active and/or removed response information which are +present in the NVA Responses Cache. Responses remain valid for the +default Response Lifetime (@pxref{VNC Defaults Configuration}) or +NVE-group Response Lifetime (@pxref{VNC NVE Group Configuration}.) +When Removal Responses are enabled (@pxref{General VNC Configuration}), +such responses are listed for the Response Lifetime. Specifying +@var{prefix} limits the output to the addresses that fall within +@var{prefix}. + +Response information is provided for each querying NVE, and includes +the response prefix, the prefix-associated registering NVE addresses, +the administrative cost, the provided response lifetime and the time +remaining before the information is to be removed or will become inactive. +@end deffn + +@deffn {Command} {show memory vnc} {} +Print the number of memory items allocated by the NVA. +@end deffn + +@node Example VNC and VNC-GW Configurations +@section Example VNC and VNC-GW Configurations + +@menu +* Mesh NVA Configuration:: +* Mesh NVA and VNC-GW Configuration:: +* VNC with Quagga Route Reflector Configuration:: +* VNC with Commercial Route Reflector Configuration:: +* VNC with Redundant Route Reflectors Configuration:: +@c * Interfacing VNC to an IGP:: +@end menu + +@node Mesh NVA Configuration +@subsection Mesh NVA Configuration + +This example includes three NVAs, nine NVEs, and two NVE groups. Note +that while not shown, a single physical device may support multiple +logical NVEs. @ref{fig:fig-vnc-mesh} shows @code{NVA 1} +(192.168.1.100), @code{NVA 2} (192.168.1.101), and @code{NVA 3} +(192.168.1.102), which are connected in a full mesh. Each is a +member of the autonomous system 64512. Each NVA provides VNC +services to three NVE clients in the 172.16.0.0/16 virtual-network +address range. The 172.16.0.0/16 address range is partitioned into +two NVE groups, @code{group1} (172.16.0.0/17) and @code{group2} +(172.16.128.0/17). + +Each NVE belongs to either NVE group @code{group1} or NVE group +@code{group2}. The NVEs @code{NVE 1}, @code{NVE 2}, @code{NVE +4}, @code{NVE 7}, and @code{NVE 8} are members of the NVE group +@code{group1}. The NVEs @code{NVE 3}, @code{NVE 5}, @code{NVE +6}, and @code{NVE 9} are members of the NVE group @code{group2}. + +Each NVA advertises NVE underlay-network IP addresses using the +Tunnel Encapsulation Attribute. + +@float Figure,fig:fig-vnc-mesh +@center @image{fig-vnc-mesh,400pt,,Three-way Mesh} +@caption{A three-way full mesh with three NVEs per NVA} +@end float + +@file{bgpd.conf} for @code{NVA 1} (192.168.1.100) +@verbatim +router bgp 64512 + + bgp router-id 192.168.1.100 + + neighbor 192.168.1.101 remote-as 64512 + neighbor 192.168.1.102 remote-as 64512 + + address-family vpnv4 + neighbor 192.168.1.101 activate + neighbor 192.168.1.102 activate + exit-address-family + + vnc defaults + rd 64512:1 + response-lifetime 200 + rt both 1000:1 1000:2 + exit-vnc + + vnc nve-group group1 + prefix vn 172.16.0.0/17 + rt both 1000:1 + exit-vnc + + vnc nve-group group2 + prefix vn 172.16.128.0/17 + rt both 1000:2 + exit-vnc + +exit +@end verbatim + +@file{bgpd.conf} for @code{NVA 2} (192.168.1.101): +@verbatim +router bgp 64512 + + bgp router-id 192.168.1.101 + + neighbor 192.168.1.100 remote-as 64512 + neighbor 192.168.1.102 remote-as 64512 + + address-family vpnv4 + neighbor 192.168.1.100 activate + neighbor 192.168.1.102 activate + exit-address-family + + vnc nve-group group1 + prefix vn 172.16.0.0/17 + rd 64512:1 + response-lifetime 200 + rt both 1000:1 1000:2 + exit-vnc +exit +@end verbatim + +@file{bgpd.conf} for @code{NVA 3} (192.168.1.102): +@verbatim +router bgp 64512 + + bgp router-id 192.168.1.102 + + neighbor 192.168.1.101 remote-as 64512 + neighbor 192.168.1.102 remote-as 64512 + + address-family vpnv4 + neighbor 192.168.1.100 activate + neighbor 192.168.1.101 activate + exit-address-family + + vnc defaults + rd 64512:1 + response-lifetime 200 + rt both 1000:1 1000:2 + exit-vnc + + vnc nve-group group1 + prefix vn 172.16.128.0/17 + exit-vnc +exit +@end verbatim + +@node Mesh NVA and VNC-GW Configuration +@subsection Mesh NVA and VNC-GW Configuration + +This example includes two NVAs, each with two associated NVEs, and two +VNC-GWs, each supporting two CE routers physically attached to the four +NVEs. Note that this example is showing a more complex configuration +where VNC-GW is separated from normal NVA functions; it is equally +possible to simplify the configuration and combine NVA and VNC-GW +functions in a single quagga instance. + +@float Figure,fig:fig-vnc-gw +@center @image{fig-vnc-gw,400pt,,Quagga VNC Gateway} +@caption{Meshed NVEs and VNC-GWs} +@end float + +As shown in @ref{fig:fig-vnc-gw}, NVAs and VNC-GWs are connected in a +full iBGP mesh. The VNC-GWs each have two CEs configured as +route-reflector clients. Each client provides BGP updates with unicast +routes that the VNC-GW reflects to the other client. The VNC-GW also +imports these unicast routes into VPN routes to be shared with the other +VNC-GW and the two NVAs. This route importation is controlled with the +@code{vnc redistribute} statements shown in the configuration. +Similarly, registrations sent by NVEs via RFP to the NVAs are exported +by the VNC-GWs to the route-reflector clients as unicast routes. RFP +registrations exported this way have a next-hop address of the CE behind +the connected (registering) NVE. Exporting VNC routes as IPv4 unicast +is enabled with the @code{vnc export} command below. + +The configuration for @code{VNC-GW 1} is shown below. +@verbatim +router bgp 64512 + bgp router-id 192.168.1.101 + bgp cluster-id 1.2.3.4 + redistribute vpn + neighbor 192.168.1.102 remote-as 64512 + no neighbor 192.168.1.102 activate + neighbor 192.168.1.103 remote-as 64512 + no neighbor 192.168.1.103 activate + neighbor 192.168.1.104 remote-as 64512 + no neighbor 192.168.1.104 activate + neighbor 172.16.1.2 remote-as 64512 + neighbor 172.16.1.2 route-reflector-client + neighbor 172.16.2.2 remote-as 64512 + neighbor 172.16.2.2 route-reflector-client +! + address-family vpnv4 unicast + neighbor 192.168.1.102 activate + neighbor 192.168.1.103 activate + neighbor 192.168.1.104 activate + exit-address-family + vnc export bgp mode ce + vnc redistribute mode resolve-nve + vnc redistribute ipv4 bgp-direct + exit +@end verbatim + +Note that in the VNC-GW configuration, the neighboring VNC-GW and +NVAs each have a statement disabling the IPv4 unicast address family. +IPv4 unicast is on by default and this prevents the other VNC-GW and +NVAs from learning unicast routes advertised by the route-reflector clients. + +Configuration for @code{NVA 2}: +@verbatim +router bgp 64512 + bgp router-id 192.168.1.104 + neighbor 192.168.1.101 remote-as 64512 + no neighbor 192.168.1.101 activate + neighbor 192.168.1.102 remote-as 64512 + no neighbor 192.168.1.102 activate + neighbor 192.168.1.103 remote-as 64512 + no neighbor 192.168.1.103 activate + address-family vpnv4 unicast + neighbor 192.168.1.101 activate + neighbor 192.168.1.102 activate + neighbor 192.168.1.103 activate + exit-address-family + vnc defaults + response-lifetime 3600 + exit-vnc + vnc nve-group nve1 + prefix vn 172.16.1.1/32 + response-lifetime 3600 + rt both 1000:1 1000:2 + exit-vnc + vnc nve-group nve2 + prefix vn 172.16.2.1/32 + response-lifetime 3600 + rt both 1000:1 1000:2 + exit-vnc + exit +@end verbatim + +@c TBD make this its own example: +@c +@c @float Figure,fig:fig-vnc-gw-rr +@c @center @image{fig-vnc-gw-rr,400pt,,Quagga VNC Gateway with RR} +@c @end float +@c An NVA can also import unicast routes from BGP without advertising the +@c imported routes as VPN routes. Such imported routes, while not +@c distributed to other NVAs or VNC-GWs, are are available to NVEs via +@c RFP query messages sent to the NVA. @ref{fig:fig-vnc-gw-rr} +@c shows an example topology where unicast routes are imported into NVAs +@c from a Route Reflector. (@pxref{Route Reflector} for route reflector +@c configuration details.) The following three lines can be added to the +@c @code{NVA 1} and @code{NVA 2} configurations to import routes into VNC +@c for local VNC use: +@c +@c @verbatim +@c neighbor 192.168.1.105 remote-as 64512 +@c vnc redistribute mode plain +@c vnc redistribute ipv4 bgp-direct-to-nve-groups +@c @end verbatim + +@node VNC with Quagga Route Reflector Configuration +@subsection VNC with Quagga Route Reflector Configuration +A route reflector eliminates the need for a fully meshed NVA +network by acting as the hub between NVAs. +@ref{fig:fig-vnc-quagga-route-reflector} shows BGP route reflector +@code{BGP Route Reflector 1} (192.168.1.100) as a route reflector for +NVAs @code{NVA 2}(192.168.1.101) and @code{NVA 3} +(192.168.1.102). + +@float Figure,fig:fig-vnc-quagga-route-reflector +@center @image{fig-vnc-quagga-route-reflector,400pt,,Quagga Route Reflector} +@caption{Two NVAs and a BGP Route Reflector} +@end float + +@code{NVA 2} and @code{NVA 3} +advertise NVE underlay-network IP addresses using the Tunnel Encapsulation Attribute. +@code{BGP Route Reflector 1} ``reflects'' advertisements from +@code{NVA 2} to @code{NVA 3} and vice versa. + +As in the example of @ref{Mesh NVA Configuration}, there are two NVE groups. +The 172.16.0.0/16 address range is partitioned into two NVE groups, +@code{group1} (172.16.0.0/17) and @code{group2} (172.16.128.0/17). +The NVE @code{NVE 4}, @code{NVE 7}, and @code{NVE 8} are +members of the NVE group @code{group1}. The NVEs @code{NVE 5}, +@code{NVE 6}, and @code{NVE 9} are members of the NVE group +@code{group2}. + +@file{bgpd.conf} for @code{BGP Route Reflector 1} on 192.168.1.100: +@verbatim +router bgp 64512 + + bgp router-id 192.168.1.100 + + neighbor 192.168.1.101 remote-as 64512 + neighbor 192.168.1.101 port 7179 + neighbor 192.168.1.101 description iBGP-client-192-168-1-101 + neighbor 192.168.1.101 route-reflector-client + + neighbor 192.168.1.102 remote-as 64512 + neighbor 192.168.1.102 port 7179 + neighbor 192.168.1.102 description iBGP-client-192-168-1-102 + neighbor 192.168.1.102 route-reflector-client + + address-family vpnv4 + neighbor 192.168.1.101 activate + neighbor 192.168.1.102 activate + + neighbor 192.168.1.101 route-reflector-client + neighbor 192.168.1.102 route-reflector-client + exit-address-family + +exit +@end verbatim + +@file{bgpd.conf} for @code{NVA 2} on 192.168.1.101: +@verbatim +router bgp 64512 + + bgp router-id 192.168.1.101 + + neighbor 192.168.1.100 remote-as 64512 + + address-family vpnv4 + neighbor 192.168.1.100 activate + exit-address-family + + vnc nve-group group1 + prefix vn 172.16.0.0/17 + rd 64512:1 + response-lifetime 200 + rt both 1000:1 1000:2 + exit-vnc +exit +@end verbatim + +@file{bgpd.conf} for @code{NVA 2} on 192.168.1.102: +@verbatim +router bgp 64512 + + bgp router-id 192.168.1.102 + + neighbor 192.168.1.100 remote-as 64512 + + address-family vpnv4 + neighbor 192.168.1.100 activate + exit-address-family + + vnc defaults + rd 64512:1 + response-lifetime 200 + rt both 1000:1 1000:2 + exit-vnc + + vnc nve-group group1 + prefix vn 172.16.128.0/17 + exit-vnc +exit +@end verbatim + +While not shown, an NVA can also be configured as a route reflector. + +@node VNC with Commercial Route Reflector Configuration +@subsection VNC with Commercial Route Reflector Configuration +This example is identical to @ref{VNC with Quagga Route Reflector +Configuration} with the exception that the route reflector is a +commercial router. Only the +VNC-relevant configuration is provided. + +@float Figure,fig:fig-vnc-commercial-route-reflector +@center @image{fig-vnc-commercial-route-reflector,400pt,,Commercial Route Reflector} +@caption{Two NVAs with a commercial route reflector} +@end float + +@file{bgpd.conf} for BGP route reflector @code{Commercial Router} on 192.168.1.104: +@verbatim +version 8.5R1.13; +routing-options { + rib inet.0 { + static { + route 172.16.0.0/16 next-hop 192.168.1.104; + } + } + autonomous-system 64512; + resolution { + rib inet.3 { + resolution-ribs inet.0; + } + rib bgp.l3vpn.0 { + resolution-ribs inet.0; + } + } +} +protocols { + bgp { + advertise-inactive; + family inet { + labeled-unicast; + } + group 1 { + type internal; + advertise-inactive; + advertise-peer-as; + import h; + family inet { + unicast; + } + family inet-vpn { + unicast; + } + cluster 192.168.1.104; + neighbor 192.168.1.101; + neighbor 192.168.1.102; + } + } +} +policy-options { + policy-statement h { + from protocol bgp; + then { + as-path-prepend 64512; + accept; + } + } +} +@end verbatim + +@file{bgpd.conf} for @code{NVA 2} on 192.168.1.101: +@verbatim +router bgp 64512 + + bgp router-id 192.168.1.101 + + neighbor 192.168.1.100 remote-as 64512 + + address-family vpnv4 + neighbor 192.168.1.100 activate + exit-address-family + + vnc nve-group group1 + prefix vn 172.16.0.0/17 + rd 64512:1 + response-lifetime 200 + rt both 1000:1 1000:2 + exit-vnc +exit +@end verbatim + +@file{bgpd.conf} for @code{NVA 3} on 192.168.1.102: +@verbatim +router bgp 64512 + + bgp router-id 192.168.1.102 + + neighbor 192.168.1.100 remote-as 64512 + + address-family vpnv4 + neighbor 192.168.1.100 activate + exit-address-family + + vnc defaults + rd 64512:1 + response-lifetime 200 + rt both 1000:1 1000:2 + exit-vnc + + vnc nve-group group1 + prefix vn 172.16.128.0/17 + exit-vnc +exit +@end verbatim + +@node VNC with Redundant Route Reflectors Configuration +@subsection VNC with Redundant Route Reflectors Configuration +This example combines the previous two (@ref{VNC with Quagga Route +Reflector Configuration} and @ref{VNC with Commercial Route Reflector +Configuration}) into a redundant route reflector configuration. BGP +route reflectors @code{BGP Route Reflector 1} and @code{Commercial Router} +are the route reflectors for NVAs @code{NVA 2} and +@code{NVA 3}. The two NVAs have connections to both +route reflectors. + +@float Figure,fig:fig-vnc-redundant-route-reflectors +@center @image{fig-vnc-redundant-route-reflectors,400pt,,Redundant Route Reflectors} +@caption{Quagga-based NVA with redundant route reflectors} +@end float + +@file{bgpd.conf} for @code{Bgpd Route Reflector 1} on 192.168.1.100: +@verbatim +router bgp 64512 + + bgp router-id 192.168.1.100 + bgp cluster-id 192.168.1.100 + + neighbor 192.168.1.104 remote-as 64512 + + neighbor 192.168.1.101 remote-as 64512 + neighbor 192.168.1.101 description iBGP-client-192-168-1-101 + neighbor 192.168.1.101 route-reflector-client + + neighbor 192.168.1.102 remote-as 64512 + neighbor 192.168.1.102 description iBGP-client-192-168-1-102 + neighbor 192.168.1.102 route-reflector-client + + address-family vpnv4 + neighbor 192.168.1.101 activate + neighbor 192.168.1.102 activate + neighbor 192.168.1.104 activate + + neighbor 192.168.1.101 route-reflector-client + neighbor 192.168.1.102 route-reflector-client + exit-address-family +exit +@end verbatim + +@file{bgpd.conf} for @code{NVA 2} on 192.168.1.101: +@verbatim +router bgp 64512 + + bgp router-id 192.168.1.101 + + neighbor 192.168.1.100 remote-as 64512 + neighbor 192.168.1.104 remote-as 64512 + + address-family vpnv4 + neighbor 192.168.1.100 activate + neighbor 192.168.1.104 activate + exit-address-family + + vnc nve-group group1 + prefix vn 172.16.0.0/17 + rd 64512:1 + response-lifetime 200 + rt both 1000:1 1000:2 + exit-vnc +exit +@end verbatim + +@file{bgpd.conf} for @code{NVA 3} on 192.168.1.102: +@verbatim +router bgp 64512 + + bgp router-id 192.168.1.102 + + neighbor 192.168.1.100 remote-as 64512 + neighbor 192.168.1.104 remote-as 64512 + + address-family vpnv4 + neighbor 192.168.1.100 activate + neighbor 192.168.1.104 activate + exit-address-family + + vnc defaults + rd 64512:1 + response-lifetime 200 + rt both 1000:1 1000:2 + exit-vnc + + vnc nve-group group1 + prefix vn 172.16.128.0/17 + exit-vnc +exit +@end verbatim + +@file{bgpd.conf} for the Commercial Router route reflector on +192.168.1.104: +@verbatim +routing-options { + rib inet.0 { + static { + route 172.16.0.0/16 next-hop 192.168.1.104; + } + } + autonomous-system 64512; + resolution { + rib inet.3 { + resolution-ribs inet.0; + } + rib bgp.l3vpn.0 { + resolution-ribs inet.0; + } + } +} +protocols { + bgp { + advertise-inactive; + family inet { + labeled-unicast; + } + group 1 { + type internal; + advertise-inactive; + advertise-peer-as; + import h; + family inet { + unicast; + } + family inet-vpn { + unicast; + } + cluster 192.168.1.104; + neighbor 192.168.1.101; + neighbor 192.168.1.102; + } + + group 2 { + type internal; + advertise-inactive; + advertise-peer-as; + import h; + family inet { + unicast; + } + family inet-vpn { + unicast; + } + neighbor 192.168.1.100; + } + + } +} +policy-options { + policy-statement h { + from protocol bgp; + then { + as-path-prepend 64512; + accept; + } + } +} +@end verbatim + +@node Release Notes +@section Release Notes + +@c A paragraph that introduces our release notes. + +@c outer list, one item per VNC release, items preceded by bullet +@itemize @bullet +@item + +@c @item +@end itemize + +@evenheading @thispage@|@|@thistitle +@oddheading @thischapter@|@|@thispage +@everyfooting + diff --git a/fpm/.gitignore b/fpm/.gitignore new file mode 100644 index 0000000000..b133c52a42 --- /dev/null +++ b/fpm/.gitignore @@ -0,0 +1,15 @@ +Makefile +Makefile.in +*.o +tags +TAGS +.deps +.nfs* +*.lo +*.la +*.a +*.libs +.arch-inventory +.arch-ids +*~ +*.loT diff --git a/fpm/Makefile.am b/fpm/Makefile.am new file mode 100644 index 0000000000..83ab31ce3f --- /dev/null +++ b/fpm/Makefile.am @@ -0,0 +1,29 @@ +include ../common.am + +AM_CPPFLAGS = -I.. -I$(top_srcdir) -I$(top_srcdir)/lib -I$(top_builddir)/lib $(Q_PROTOBUF_C_CLIENT_INCLUDES) + +PROTOBUF_INCLUDES=-I$(top_srcdir) +PROTOBUF_PACKAGE = fpm + +lib_LTLIBRARIES = libfpm_pb.la +libfpm_pb_la_LDFLAGS = -version-info 0:0:0 + +if HAVE_PROTOBUF +protobuf_srcs = + +protobuf_srcs_nodist = \ + fpm.pb-c.c +endif + +libfpm_pb_la_SOURCES = \ + fpm.h \ + fpm_pb.h \ + fpm_pb.c \ + $(protobuf_srcs) + +nodist_libfpm_pb_la_SOURCES = $(protobuf_srcs_nodist) + +CLEANFILES = $(Q_CLEANFILES) + +BUILT_SOURCES = $(Q_PROTOBUF_SRCS) +EXTRA_DIST = fpm.proto diff --git a/fpm/fpm.h b/fpm/fpm.h index 96f05f4865..85285996ca 100644 --- a/fpm/fpm.h +++ b/fpm/fpm.h @@ -87,6 +87,14 @@ * table(s) when it reconnects. */ +/* + * Local host as a default server for fpm connection + */ +#define FPM_DEFAULT_IP (htonl (INADDR_LOOPBACK)) + +/* + * default port for fpm connections + */ #define FPM_DEFAULT_PORT 2620 /* @@ -94,6 +102,10 @@ */ #define FPM_MAX_MSG_LEN 4096 +#ifdef __SUNPRO_C +#pragma pack(1) +#endif + /* * Header that precedes each fpm message to/from the FPM. */ @@ -112,13 +124,13 @@ typedef struct fpm_msg_hdr_t_ /* * Length of entire message, including the header, in network byte * order. - * - * Note that msg_len is rounded up to make sure that message is at - * the desired alignment. This means that some payloads may need - * padding at the end. */ uint16_t msg_len; -} fpm_msg_hdr_t; +} __attribute__ ((packed)) fpm_msg_hdr_t; + +#ifdef __SUNPRO_C +#pragma pack() +#endif /* * The current version of the FPM protocol is 1. @@ -131,8 +143,14 @@ typedef enum fpm_msg_type_e_ { /* * Indicates that the payload is a completely formed netlink * message. + * + * XXX Netlink cares about the alignment of messages. When any + * FPM_MSG_TYPE_NETLINK messages are sent over a channel, then all + * messages should be sized such that netlink alignment is + * maintained. */ FPM_MSG_TYPE_NETLINK = 1, + FPM_MSG_TYPE_PROTOBUF = 2, } fpm_msg_type_e; /* @@ -146,6 +164,8 @@ typedef enum fpm_msg_type_e_ { * fpm_msg_align * * Round up the given length to the desired alignment. + * + * **NB**: Alignment is required only when netlink messages are used. */ static inline size_t fpm_msg_align (size_t len) @@ -157,7 +177,13 @@ fpm_msg_align (size_t len) * The (rounded up) size of the FPM message header. This ensures that * the message payload always starts at an aligned address. */ -#define FPM_MSG_HDR_LEN (fpm_msg_align (sizeof (fpm_msg_hdr_t))) +#define FPM_MSG_HDR_LEN (sizeof (fpm_msg_hdr_t)) + +#ifndef COMPILE_ASSERT +#define COMPILE_ASSERT(x) extern int __dummy[2 * !!(x) - 1] +#endif + +COMPILE_ASSERT(FPM_MSG_ALIGNTO == FPM_MSG_HDR_LEN); /* * fpm_data_len_to_msg_len @@ -168,7 +194,7 @@ fpm_msg_align (size_t len) static inline size_t fpm_data_len_to_msg_len (size_t data_len) { - return fpm_msg_align (data_len) + FPM_MSG_HDR_LEN; + return data_len + FPM_MSG_HDR_LEN; } /* @@ -242,7 +268,11 @@ fpm_msg_hdr_ok (const fpm_msg_hdr_t *hdr) if (msg_len < FPM_MSG_HDR_LEN || msg_len > FPM_MAX_MSG_LEN) return 0; - if (fpm_msg_align (msg_len) != msg_len) + /* + * Netlink messages must be aligned properly. + */ + if (hdr->msg_type == FPM_MSG_TYPE_NETLINK && + fpm_msg_align (msg_len) != msg_len) return 0; return 1; @@ -270,4 +300,10 @@ fpm_msg_ok (const fpm_msg_hdr_t *hdr, size_t len) return 1; } +// tcp maximum range +#define TCP_MAX_PORT 65535 + +// tcp minimum range +#define TCP_MIN_PORT 1 + #endif /* _FPM_H */ diff --git a/fpm/fpm.proto b/fpm/fpm.proto new file mode 100644 index 0000000000..318e80a92e --- /dev/null +++ b/fpm/fpm.proto @@ -0,0 +1,88 @@ +// +// fpm.proto +// +// @copyright Copyright (C) 2016 Sproute Networks, Inc. +// +// @author Avneesh Sachdev +// +// Permission to use, copy, modify, and/or distribute this software +// for any purpose with or without fee is hereby granted, provided +// that the above copyright notice and this permission notice appear +// in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL +// WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE +// AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR +// CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +// OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, +// NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +// CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +// + +// +// Protobuf definitions pertaining to the Forwarding Plane Manager component. +// + +package fpm; + +import "qpb/qpb.proto"; + +// +// A Nexthop for a route. It indicates how packets to a given prefix +// should be forwarded (for instance, send them out of a specified +// interface to a specified address). +// +message Nexthop { + optional qpb.IfIdentifier if_id = 2; + optional qpb.L3Address address = 3; +} + +message RouteKey { + optional qpb.L3Prefix prefix = 1; +} + +message DeleteRoute { + required uint32 vrf_id = 1; + required qpb.AddressFamily address_family = 2; + required qpb.SubAddressFamily sub_address_family = 3; + required RouteKey key = 4; +} + +enum RouteType { + UNKNOWN = 0; + NORMAL = 1; + UNREACHABLE = 2; + BLACKHOLE = 3; +} + +message AddRoute { + required uint32 vrf_id = 1; + required qpb.AddressFamily address_family = 2; + required qpb.SubAddressFamily sub_address_family = 3; + required RouteKey key = 4; + + optional RouteType route_type = 5; + + required qpb.Protocol protocol = 6; + + required int32 metric = 8; + + repeated Nexthop nexthops = 9; +} + +// +// Any message from the FPM. +// +message Message { + enum Type { + UNKNOWN_MSG = 0; + ADD_ROUTE = 1; + DELETE_ROUTE = 2; + }; + + optional Type type = 1; + + optional AddRoute add_route = 2; + optional DeleteRoute delete_route = 3; +} diff --git a/fpm/fpm_pb.c b/fpm/fpm_pb.c new file mode 100644 index 0000000000..ba18627ea1 --- /dev/null +++ b/fpm/fpm_pb.c @@ -0,0 +1,28 @@ +/* + * fpm_pb.c + * + * @copyright Copyright (C) 2016 Sproute Networks, Inc. + * + * @author Avneesh Sachdev + * + * This file is part of Quagga. + * + * Quagga is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * Quagga is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Quagga; see the file COPYING. If not, write to the Free + * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ + +/* + * Main file for the fpm_pb library. + */ diff --git a/fpm/fpm_pb.h b/fpm/fpm_pb.h new file mode 100644 index 0000000000..8f74ac06eb --- /dev/null +++ b/fpm/fpm_pb.h @@ -0,0 +1,63 @@ +/* + * fpm_pb.h + * + * @copyright Copyright (C) 2016 Sproute Networks, Inc. + * + * @author Avneesh Sachdev + * + * This file is part of Quagga. + * + * Quagga is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * Quagga is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Quagga; see the file COPYING. If not, write to the Free + * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ + +/* + * Public header file for fpm protobuf definitions. + */ + +#ifndef _FPM_PB_H +#define _FPM_PB_H + +#include "route_types.h" +#include "qpb/qpb.h" + +#include "fpm/fpm.pb-c.h" + +/* + * fpm__route_key__create + */ +#define fpm_route_key_create fpm__route_key__create +static inline Fpm__RouteKey * +fpm__route_key__create (qpb_allocator_t *allocator, struct prefix *prefix) +{ + Fpm__RouteKey *key; + + key = QPB_ALLOC (allocator, typeof (*key)); + if (!key) + { + return NULL; + } + fpm__route_key__init (key); + + key->prefix = qpb__l3_prefix__create (allocator, prefix); + if (!key->prefix) + { + return NULL; + } + + return key; +} + +#endif diff --git a/isisd/Makefile.am b/isisd/Makefile.am index c14351ca3a..69624dced3 100644 --- a/isisd/Makefile.am +++ b/isisd/Makefile.am @@ -1,7 +1,7 @@ ## Process this file with automake to produce Makefile.in. AM_CPPFLAGS = -I.. -I$(top_srcdir) -I$(top_srcdir)/lib -I$(top_builddir)/lib \ - @ISIS_TOPOLOGY_INCLUDES@ + @ISIS_TOPOLOGY_INCLUDES@ -DVTY_DEPRECATE_INDEX DEFS = @DEFS@ -DSYSCONFDIR=\"$(sysconfdir)/\" INSTALL_SDATA=@INSTALL@ -m 600 LIBS = @LIBS@ diff --git a/isisd/isis_adjacency.c b/isisd/isis_adjacency.c index c7ab83ba0b..8afabede4e 100644 --- a/isisd/isis_adjacency.c +++ b/isisd/isis_adjacency.c @@ -507,6 +507,11 @@ isis_adj_build_up_list (struct list *adjdb, struct list *list) struct isis_adjacency *adj; struct listnode *node; + if (adjdb == NULL) { + zlog_warn ("isis_adj_build_up_list(): adjacency DB is empty"); + return; + } + if (!list) { zlog_warn ("isis_adj_build_up_list(): NULL list"); diff --git a/isisd/isis_circuit.c b/isisd/isis_circuit.c index 4f22a5e558..c66fa48263 100644 --- a/isisd/isis_circuit.c +++ b/isisd/isis_circuit.c @@ -41,6 +41,7 @@ #include "hash.h" #include "prefix.h" #include "stream.h" +#include "qobj.h" #include "isisd/dict.h" #include "isisd/include-netbsd/iso.h" @@ -61,6 +62,8 @@ #include "isisd/isis_events.h" #include "isisd/isis_te.h" +DEFINE_QOBJ_TYPE(isis_circuit) + /* * Prototypes. */ @@ -100,6 +103,8 @@ isis_circuit_new () circuit->mtc = mpls_te_circuit_new(); + QOBJ_REG (circuit, isis_circuit); + return circuit; } @@ -109,6 +114,8 @@ isis_circuit_del (struct isis_circuit *circuit) if (!circuit) return; + QOBJ_UNREG (circuit); + isis_circuit_if_unbind (circuit, circuit->interface); /* and lastly the circuit itself */ diff --git a/isisd/isis_circuit.h b/isisd/isis_circuit.h index 9ada1e26a3..efe153f32e 100644 --- a/isisd/isis_circuit.h +++ b/isisd/isis_circuit.h @@ -25,6 +25,7 @@ #include "vty.h" #include "if.h" +#include "qobj.h" #include "isis_constants.h" #include "isis_common.h" @@ -140,7 +141,10 @@ struct isis_circuit u_int32_t ctrl_pdus_txed; /* controlPDUsSent */ u_int32_t desig_changes[2]; /* lanLxDesignatedIntermediateSystemChanges */ u_int32_t rej_adjacencies; /* rejectedAdjacencies */ + + QOBJ_FIELDS }; +DECLARE_QOBJ_TYPE(isis_circuit) void isis_circuit_init (void); struct isis_circuit *isis_circuit_new (void); diff --git a/isisd/isis_events.c b/isisd/isis_events.c index 460b1d25ba..abc4471cad 100644 --- a/isisd/isis_events.c +++ b/isisd/isis_events.c @@ -137,8 +137,10 @@ circuit_resign_level (struct isis_circuit *circuit, int level) THREAD_TIMER_OFF (circuit->u.bc.t_refresh_pseudo_lsp[idx]); circuit->lsp_regenerate_pending[idx] = 0; circuit->u.bc.run_dr_elect[idx] = 0; - list_delete (circuit->u.bc.lan_neighs[idx]); - circuit->u.bc.lan_neighs[idx] = NULL; + if (circuit->u.bc.lan_neighs[idx] != NULL) { + list_delete (circuit->u.bc.lan_neighs[idx]); + circuit->u.bc.lan_neighs[idx] = NULL; + } } return; diff --git a/isisd/isis_lsp.c b/isisd/isis_lsp.c index 0177a9423a..55887f901d 100644 --- a/isisd/isis_lsp.c +++ b/isisd/isis_lsp.c @@ -138,14 +138,16 @@ lsp_destroy (struct isis_lsp *lsp) if (!lsp) return; - for (ALL_LIST_ELEMENTS_RO (lsp->area->circuit_list, cnode, circuit)) - { - if (circuit->lsp_queue == NULL) - continue; - for (ALL_LIST_ELEMENTS (circuit->lsp_queue, lnode, lnnode, lsp_in_list)) - if (lsp_in_list == lsp) - list_delete_node(circuit->lsp_queue, lnode); - } + if (lsp->area->circuit_list) { + for (ALL_LIST_ELEMENTS_RO (lsp->area->circuit_list, cnode, circuit)) + { + if (circuit->lsp_queue == NULL) + continue; + for (ALL_LIST_ELEMENTS (circuit->lsp_queue, lnode, lnnode, lsp_in_list)) + if (lsp_in_list == lsp) + list_delete_node(circuit->lsp_queue, lnode); + } + } ISIS_FLAGS_CLEAR_ALL (lsp->SSNflags); ISIS_FLAGS_CLEAR_ALL (lsp->SRMflags); diff --git a/isisd/isis_main.c b/isisd/isis_main.c index 163a4e051c..44c7840022 100644 --- a/isisd/isis_main.c +++ b/isisd/isis_main.c @@ -38,6 +38,7 @@ #include "plist.h" #include "zclient.h" #include "vrf.h" +#include "qobj.h" #include "isisd/dict.h" #include "include-netbsd/iso.h" @@ -341,6 +342,7 @@ main (int argc, char **argv, char **envp) */ signal_init (master, array_size (isisd_signals), isisd_signals); cmd_init (1); + vty_config_lockless (); vty_init (master); memory_init (); access_list_init(); diff --git a/isisd/isis_redist.c b/isisd/isis_redist.c index 384eb3572b..045c7daa32 100644 --- a/isisd/isis_redist.c +++ b/isisd/isis_redist.c @@ -52,6 +52,7 @@ redist_protocol(int family) return 1; assert(!"Unsupported address family!"); + return 0; } static int @@ -555,7 +556,7 @@ DEFUN (isis_redistribute, int idx_protocol = 2; int idx_level = 3; int idx_metric_rmap = 4; - struct isis_area *area = vty->index; + VTY_DECLVAR_CONTEXT (isis_area, area); int family; int afi; int type; @@ -621,7 +622,7 @@ DEFUN (no_isis_redistribute, int idx_afi = 2; int idx_protocol = 3; int idx_level = 4; - struct isis_area *area = vty->index; + VTY_DECLVAR_CONTEXT (isis_area, area); int type; int level; int family; @@ -663,7 +664,7 @@ DEFUN (isis_default_originate, int idx_afi = 2; int idx_level = 3; int idx_metric_rmap = 4; - struct isis_area *area = vty->index; + VTY_DECLVAR_CONTEXT (isis_area, area); int family; int originate_type = DEFAULT_ORIGINATE; int level; @@ -715,8 +716,7 @@ DEFUN (no_isis_default_originate, { int idx_afi = 3; int idx_level = 4; - struct isis_area *area = vty->index; - + VTY_DECLVAR_CONTEXT (isis_area, area); int family; int level; diff --git a/isisd/isis_route.c b/isisd/isis_route.c index 67d45c8f10..cc3ecba0d3 100644 --- a/isisd/isis_route.c +++ b/isisd/isis_route.c @@ -643,6 +643,9 @@ isis_route_validate (struct isis_area *area) isis_route_validate_merge (area, AF_INET6); #endif + if (!area->circuit_list) { + return; + } /* walk all circuits and reset any spf specific flags */ for (ALL_LIST_ELEMENTS_RO (area->circuit_list, node, circuit)) UNSET_FLAG(circuit->flags, ISIS_CIRCUIT_FLAPPED_AFTER_SPF); diff --git a/isisd/isis_spf.c b/isisd/isis_spf.c index fd5af4a2b8..041f2ed3f6 100644 --- a/isisd/isis_spf.c +++ b/isisd/isis_spf.c @@ -1680,8 +1680,4 @@ isis_spf_cmds_init () install_element (VIEW_NODE, &show_isis_topology_cmd); install_element (VIEW_NODE, &show_isis_topology_l1_cmd); install_element (VIEW_NODE, &show_isis_topology_l2_cmd); - - install_element (ENABLE_NODE, &show_isis_topology_cmd); - install_element (ENABLE_NODE, &show_isis_topology_l1_cmd); - install_element (ENABLE_NODE, &show_isis_topology_l2_cmd); } diff --git a/isisd/isis_te.c b/isisd/isis_te.c index 95abf22310..d3605f448c 100644 --- a/isisd/isis_te.c +++ b/isisd/isis_te.c @@ -1358,8 +1358,6 @@ isis_mpls_te_init (void) /* Register new VTY commands */ install_element (VIEW_NODE, &show_isis_mpls_te_router_cmd); install_element (VIEW_NODE, &show_isis_mpls_te_interface_cmd); - install_element (ENABLE_NODE, &show_isis_mpls_te_router_cmd); - install_element (ENABLE_NODE, &show_isis_mpls_te_interface_cmd); install_element (ISIS_NODE, &isis_mpls_te_on_cmd); install_element (ISIS_NODE, &no_isis_mpls_te_on_cmd); diff --git a/isisd/isis_vty.c b/isisd/isis_vty.c index 7fe65e6ca7..492572bddf 100644 --- a/isisd/isis_vty.c +++ b/isisd/isis_vty.c @@ -32,10 +32,9 @@ static struct isis_circuit * isis_circuit_lookup (struct vty *vty) { - struct interface *ifp; + struct interface *ifp = VTY_GET_CONTEXT(interface); struct isis_circuit *circuit; - ifp = (struct interface *) vty->index; if (!ifp) { vty_out (vty, "Invalid interface %s", VTY_NEWLINE); @@ -63,15 +62,12 @@ DEFUN (ip_router_isis, { int idx_afi = 0; int idx_word = 3; - struct interface *ifp; + VTY_DECLVAR_CONTEXT (interface, ifp); struct isis_circuit *circuit; struct isis_area *area; const char *af = argv[idx_afi]->arg; const char *area_tag = argv[idx_word]->arg; - ifp = (struct interface *) vty->index; - assert (ifp); - /* Prevent more than one area per circuit */ circuit = circuit_scan_by_ifp (ifp); if (circuit && circuit->area) @@ -130,19 +126,12 @@ DEFUN (no_ip_router_isis, { int idx_afi = 1; int idx_word = 4; - struct interface *ifp; + VTY_DECLVAR_CONTEXT (interface, ifp); struct isis_area *area; struct isis_circuit *circuit; const char *af = argv[idx_afi]->arg; const char *area_tag = argv[idx_word]->arg; - ifp = (struct interface *) vty->index; - if (!ifp) - { - vty_out (vty, "Invalid interface %s", VTY_NEWLINE); - return CMD_ERR_NO_MATCH; - } - area = isis_area_lookup (area_tag); if (!area) { @@ -1327,11 +1316,9 @@ DEFUN (metric_style, "Use new style of TLVs to carry wider metric\n") { int idx_metric_style = 1; - struct isis_area *area = vty->index; + VTY_DECLVAR_CONTEXT (isis_area, area); int ret; - assert(area); - if (strncmp (argv[idx_metric_style]->arg, "w", 1) == 0) { isis_area_metricstyle_set(area, false, true); @@ -1357,10 +1344,9 @@ DEFUN (no_metric_style, NO_STR "Use old-style (ISO 10589) or new-style packet formats\n") { - struct isis_area *area = vty->index; + VTY_DECLVAR_CONTEXT (isis_area, area); int ret; - assert (area); ret = validate_metric_style_narrow (vty, area); if (ret != CMD_SUCCESS) return ret; @@ -1375,8 +1361,7 @@ DEFUN (set_overload_bit, "Set overload bit to avoid any transit traffic\n" "Set overload bit\n") { - struct isis_area *area = vty->index; - assert (area); + VTY_DECLVAR_CONTEXT (isis_area, area); isis_area_overload_bit_set(area, true); return CMD_SUCCESS; @@ -1388,8 +1373,7 @@ DEFUN (no_set_overload_bit, "Reset overload bit to accept transit traffic\n" "Reset overload bit\n") { - struct isis_area *area = vty->index; - assert (area); + VTY_DECLVAR_CONTEXT (isis_area, area); isis_area_overload_bit_set(area, false); return CMD_SUCCESS; @@ -1401,8 +1385,7 @@ DEFUN (set_attached_bit, "Set attached bit to identify as L1/L2 router for inter-area traffic\n" "Set attached bit\n") { - struct isis_area *area = vty->index; - assert (area); + VTY_DECLVAR_CONTEXT (isis_area, area); isis_area_attached_bit_set(area, true); return CMD_SUCCESS; @@ -1413,8 +1396,7 @@ DEFUN (no_set_attached_bit, "no set-attached-bit", "Reset attached bit\n") { - struct isis_area *area = vty->index; - assert (area); + VTY_DECLVAR_CONTEXT (isis_area, area); isis_area_attached_bit_set(area, false); return CMD_SUCCESS; @@ -1426,8 +1408,7 @@ DEFUN (dynamic_hostname, "Dynamic hostname for IS-IS\n" "Dynamic hostname\n") { - struct isis_area *area = vty->index; - assert(area); + VTY_DECLVAR_CONTEXT (isis_area, area); isis_area_dynhostname_set(area, true); return CMD_SUCCESS; @@ -1440,8 +1421,7 @@ DEFUN (no_dynamic_hostname, "Dynamic hostname for IS-IS\n" "Dynamic hostname\n") { - struct isis_area *area = vty->index; - assert(area); + VTY_DECLVAR_CONTEXT (isis_area, area); isis_area_dynhostname_set(area, false); return CMD_SUCCESS; @@ -1449,16 +1429,10 @@ DEFUN (no_dynamic_hostname, static int area_lsp_mtu_set(struct vty *vty, unsigned int lsp_mtu) { - struct isis_area *area = vty->index; + VTY_DECLVAR_CONTEXT (isis_area, area); struct listnode *node; struct isis_circuit *circuit; - if (!area) - { - vty_out (vty, "Can't find ISIS instance %s", VTY_NEWLINE); - return CMD_ERR_NO_MATCH; - } - for (ALL_LIST_ELEMENTS_RO(area->circuit_list, node, circuit)) { if(circuit->state != C_STATE_INIT && circuit->state != C_STATE_UP) @@ -1511,17 +1485,9 @@ DEFUN (is_type, "Act as an area router only\n") { int idx_level = 1; - struct isis_area *area; + VTY_DECLVAR_CONTEXT (isis_area, area); int type; - area = vty->index; - - if (!area) - { - vty_out (vty, "Can't find IS-IS instance%s", VTY_NEWLINE); - return CMD_ERR_NO_MATCH; - } - type = string2circuit_t (argv[idx_level]->arg); if (!type) { @@ -1543,12 +1509,9 @@ DEFUN (no_is_type, "Act as both a station router and an area router\n" "Act as an area router only\n") { - struct isis_area *area; + VTY_DECLVAR_CONTEXT (isis_area, area); int type; - area = vty->index; - assert (area); - /* * Put the is-type back to defaults: * - level-1-2 on first area @@ -1601,11 +1564,10 @@ DEFUN (lsp_gen_interval, "Minimum interval in seconds\n") { int idx_number = 1; - struct isis_area *area; + VTY_DECLVAR_CONTEXT (isis_area, area); uint16_t interval; int level; - area = vty->index; interval = atoi (argv[idx_number]->arg); level = IS_LEVEL_1 | IS_LEVEL_2; return set_lsp_gen_interval (vty, area, interval, level); @@ -1618,11 +1580,10 @@ DEFUN (no_lsp_gen_interval, "Minimum interval between regenerating same LSP\n" "Minimum interval in seconds\n") { - struct isis_area *area; + VTY_DECLVAR_CONTEXT (isis_area, area); uint16_t interval; int level; - area = vty->index; interval = DEFAULT_MIN_LSP_GEN_INTERVAL; level = IS_LEVEL_1 | IS_LEVEL_2; return set_lsp_gen_interval (vty, area, interval, level); @@ -1637,11 +1598,10 @@ DEFUN (lsp_gen_interval_l1, "Minimum interval in seconds\n") { int idx_number = 2; - struct isis_area *area; + VTY_DECLVAR_CONTEXT (isis_area, area); uint16_t interval; int level; - area = vty->index; interval = atoi (argv[idx_number]->arg); level = IS_LEVEL_1; return set_lsp_gen_interval (vty, area, interval, level); @@ -1654,11 +1614,10 @@ DEFUN (no_lsp_gen_interval_l1, "Minimum interval between regenerating same LSP\n" "Set interval for level 1 only\n") { - struct isis_area *area; + VTY_DECLVAR_CONTEXT (isis_area, area); uint16_t interval; int level; - area = vty->index; interval = DEFAULT_MIN_LSP_GEN_INTERVAL; level = IS_LEVEL_1; return set_lsp_gen_interval (vty, area, interval, level); @@ -1672,12 +1631,11 @@ DEFUN (lsp_gen_interval_l2, "Set interval for level 2 only\n" "Minimum interval in seconds\n") { + VTY_DECLVAR_CONTEXT (isis_area, area); int idx_number = 2; - struct isis_area *area; uint16_t interval; int level; - area = vty->index; interval = atoi (argv[idx_number]->arg); level = IS_LEVEL_2; return set_lsp_gen_interval (vty, area, interval, level); @@ -1691,11 +1649,10 @@ DEFUN (no_lsp_gen_interval_l2, "Set interval for level 2 only\n" "Minimum interval in seconds\n") { - struct isis_area *area; + VTY_DECLVAR_CONTEXT (isis_area, area); uint16_t interval; int level; - area = vty->index; interval = DEFAULT_MIN_LSP_GEN_INTERVAL; level = IS_LEVEL_2; return set_lsp_gen_interval (vty, area, interval, level); @@ -1709,10 +1666,9 @@ DEFUN (spf_interval, "Minimum interval between consecutive SPFs in seconds\n") { int idx_number = 1; - struct isis_area *area; + VTY_DECLVAR_CONTEXT (isis_area, area); u_int16_t interval; - area = vty->index; interval = atoi (argv[idx_number]->arg); area->min_spf_interval[0] = interval; area->min_spf_interval[1] = interval; @@ -1730,9 +1686,7 @@ DEFUN (no_spf_interval, "Set interval for level 2 only\n" "Minimum interval between consecutive SPFs in seconds\n") { - struct isis_area *area; - - area = vty->index; + VTY_DECLVAR_CONTEXT (isis_area, area); area->min_spf_interval[0] = MINIMUM_SPF_INTERVAL; area->min_spf_interval[1] = MINIMUM_SPF_INTERVAL; @@ -1749,10 +1703,9 @@ DEFUN (spf_interval_l1, "Minimum interval between consecutive SPFs in seconds\n") { int idx_number = 2; - struct isis_area *area; + VTY_DECLVAR_CONTEXT (isis_area, area); u_int16_t interval; - area = vty->index; interval = atoi (argv[idx_number]->arg); area->min_spf_interval[0] = interval; @@ -1766,9 +1719,7 @@ DEFUN (no_spf_interval_l1, "Minimum interval between SPF calculations\n" "Set interval for level 1 only\n") { - struct isis_area *area; - - area = vty->index; + VTY_DECLVAR_CONTEXT (isis_area, area); area->min_spf_interval[0] = MINIMUM_SPF_INTERVAL; @@ -1784,10 +1735,9 @@ DEFUN (spf_interval_l2, "Minimum interval between consecutive SPFs in seconds\n") { int idx_number = 2; - struct isis_area *area; + VTY_DECLVAR_CONTEXT (isis_area, area); u_int16_t interval; - area = vty->index; interval = atoi (argv[idx_number]->arg); area->min_spf_interval[1] = interval; @@ -1801,9 +1751,7 @@ DEFUN (no_spf_interval_l2, "Minimum interval between SPF calculations\n" "Set interval for level 2 only\n") { - struct isis_area *area; - - area = vty->index; + VTY_DECLVAR_CONTEXT (isis_area, area); area->min_spf_interval[1] = MINIMUM_SPF_INTERVAL; @@ -1815,17 +1763,11 @@ static int area_max_lsp_lifetime_set(struct vty *vty, int level, uint16_t interval) { - struct isis_area *area = vty->index; + VTY_DECLVAR_CONTEXT (isis_area, area); int lvl; uint16_t refresh_interval = interval - 300; int set_refresh_interval[ISIS_LEVELS] = {0, 0}; - if (!area) - { - vty_out (vty, "Can't find ISIS instance %s", VTY_NEWLINE); - return CMD_ERR_NO_MATCH; - } - for (lvl = IS_LEVEL_1; lvl <= IS_LEVEL_2; lvl++) { if (!(lvl & level)) @@ -1931,15 +1873,9 @@ DEFUN (no_max_lsp_lifetime_l2, static int area_lsp_refresh_interval_set(struct vty *vty, int level, uint16_t interval) { - struct isis_area *area = vty->index; + VTY_DECLVAR_CONTEXT (isis_area, area); int lvl; - if (!area) - { - vty_out (vty, "Can't find ISIS instance %s", VTY_NEWLINE); - return CMD_ERR_NO_MATCH; - } - for (lvl = IS_LEVEL_1; lvl <= IS_LEVEL_2; ++lvl) { if (!(lvl & level)) @@ -2047,13 +1983,7 @@ area_passwd_set(struct vty *vty, int level, const char *passwd, u_char snp_auth), const char *passwd, u_char snp_auth) { - struct isis_area *area = vty->index; - - if (!area) - { - vty_out (vty, "Can't find IS-IS instance%s", VTY_NEWLINE); - return CMD_ERR_NO_MATCH; - } + VTY_DECLVAR_CONTEXT (isis_area, area); if (passwd && strlen(passwd) > 254) { @@ -2159,13 +2089,7 @@ DEFUN (no_area_passwd, { int idx_password = 1; int level = strmatch (argv[idx_password]->text, "domain-password") ? IS_LEVEL_2 : IS_LEVEL_1; - struct isis_area *area = vty->index; - - if (!area) - { - vty_out (vty, "Can't find IS-IS instance%s", VTY_NEWLINE); - return CMD_ERR_NO_MATCH; - } + VTY_DECLVAR_CONTEXT (isis_area, area); return isis_area_passwd_unset (area, level); } diff --git a/isisd/isis_zebra.c b/isisd/isis_zebra.c index 569ff70d8d..646c5fa88e 100644 --- a/isisd/isis_zebra.c +++ b/isisd/isis_zebra.c @@ -257,7 +257,8 @@ static void isis_zebra_route_add_ipv4 (struct prefix *prefix, struct isis_route_info *route_info) { - u_char message, flags; + u_char message; + u_int32_t flags; int psize; struct stream *stream; struct isis_nexthop *nexthop; @@ -285,7 +286,7 @@ isis_zebra_route_add_ipv4 (struct prefix *prefix, /* instance */ stream_putw (stream, 0); /* flags */ - stream_putc (stream, flags); + stream_putl (stream, flags); /* message */ stream_putc (stream, message); /* SAFI */ @@ -566,11 +567,11 @@ isis_zebra_read_ipv4 (int command, struct zclient *zclient, api.type = stream_getc (stream); api.instance = stream_getw (stream); - api.flags = stream_getc (stream); + api.flags = stream_getl (stream); api.message = stream_getc (stream); p.family = AF_INET; - p.prefixlen = stream_getc (stream); + p.prefixlen = MIN(IPV4_MAX_PREFIXLEN, stream_getc (stream)); stream_get (&p.prefix, stream, PSIZE (p.prefixlen)); if (CHECK_FLAG (api.message, ZAPI_MESSAGE_NEXTHOP)) @@ -623,7 +624,7 @@ isis_zebra_read_ipv6 (int command, struct zclient *zclient, ifindex = 0; api.type = stream_getc(stream); - api.flags = stream_getc(stream); + api.flags = stream_getl(stream); api.message = stream_getc(stream); p.family = AF_INET6; @@ -706,13 +707,9 @@ isis_zebra_init (struct thread_master *master) zclient->interface_address_add = isis_zebra_if_address_add; zclient->interface_address_delete = isis_zebra_if_address_del; zclient->interface_link_params = isis_zebra_link_params; - zclient->ipv4_route_add = isis_zebra_read_ipv4; - zclient->ipv4_route_delete = isis_zebra_read_ipv4; zclient->redistribute_route_ipv4_add = isis_zebra_read_ipv4; zclient->redistribute_route_ipv4_del = isis_zebra_read_ipv4; #ifdef HAVE_IPV6 - zclient->ipv6_route_add = isis_zebra_read_ipv6; - zclient->ipv6_route_delete = isis_zebra_read_ipv6; zclient->redistribute_route_ipv6_add = isis_zebra_read_ipv6; zclient->redistribute_route_ipv6_del = isis_zebra_read_ipv6; #endif /* HAVE_IPV6 */ diff --git a/isisd/isisd.c b/isisd/isisd.c index ffe17b3643..143e380016 100644 --- a/isisd/isisd.c +++ b/isisd/isisd.c @@ -34,6 +34,7 @@ #include "stream.h" #include "prefix.h" #include "table.h" +#include "qobj.h" #include "isisd/dict.h" #include "isisd/include-netbsd/iso.h" @@ -63,6 +64,9 @@ u_char DEFAULT_TOPOLOGY_BASEIS[6] = { 0xFE, 0xED, 0xFE, 0xED, 0x00, 0x00 }; struct isis *isis = NULL; +DEFINE_QOBJ_TYPE(isis) +DEFINE_QOBJ_TYPE(isis_area) + /* * Prototypes. */ @@ -100,6 +104,7 @@ isis_new (unsigned long process_id) */ /* isis->debugs = 0xFFFF; */ isisMplsTE.status = disable; /* Only support TE metric */ + QOBJ_REG (isis, isis); } struct isis_area * @@ -169,6 +174,8 @@ isis_area_create (const char *area_tag) listnode_add (isis->area_list, area); area->isis = isis; + QOBJ_REG (area, isis_area); + return area; } @@ -196,8 +203,7 @@ isis_area_get (struct vty *vty, const char *area_tag) if (area) { - vty->node = ISIS_NODE; - vty->index = area; + VTY_PUSH_CONTEXT (ISIS_NODE, area); return CMD_SUCCESS; } @@ -206,8 +212,7 @@ isis_area_get (struct vty *vty, const char *area_tag) if (isis->debugs & DEBUG_EVENTS) zlog_debug ("New IS-IS area instance %s", area->area_tag); - vty->node = ISIS_NODE; - vty->index = area; + VTY_PUSH_CONTEXT (ISIS_NODE, area); return CMD_SUCCESS; } @@ -228,6 +233,8 @@ isis_area_destroy (struct vty *vty, const char *area_tag) return CMD_ERR_NO_MATCH; } + QOBJ_UNREG (area); + if (area->circuit_list) { for (ALL_LIST_ELEMENTS (area->circuit_list, node, nnode, circuit)) @@ -315,13 +322,12 @@ isis_area_destroy (struct vty *vty, const char *area_tag) int area_net_title (struct vty *vty, const char *net_title) { - struct isis_area *area; + VTY_DECLVAR_CONTEXT (isis_area, area); struct area_addr *addr; struct area_addr *addrp; struct listnode *node; u_char buff[255]; - area = vty->index; if (!area) { @@ -418,12 +424,11 @@ area_net_title (struct vty *vty, const char *net_title) int area_clear_net_title (struct vty *vty, const char *net_title) { - struct isis_area *area; + VTY_DECLVAR_CONTEXT (isis_area, area); struct area_addr addr, *addrp = NULL; struct listnode *node; u_char buff[255]; - area = vty->index; if (!area) { vty_out (vty, "Can't find ISIS instance %s", VTY_NEWLINE); @@ -1877,10 +1882,7 @@ DEFUN (log_adj_changes, "log-adjacency-changes", "Log changes in adjacency state\n") { - struct isis_area *area; - - area = vty->index; - assert (area); + VTY_DECLVAR_CONTEXT (isis_area, area); area->log_adj_changes = 1; @@ -1892,10 +1894,7 @@ DEFUN (no_log_adj_changes, "no log-adjacency-changes", "Stop logging changes in adjacency state\n") { - struct isis_area *area; - - area = vty->index; - assert (area); + VTY_DECLVAR_CONTEXT (isis_area, area); area->log_adj_changes = 0; @@ -1918,10 +1917,7 @@ DEFUN (topology_generate_grid, "Optional param 3\n" "Topology\n") { - struct isis_area *area; - - area = vty->index; - assert (area); + VTY_DECLVAR_CONTEXT (isis_area, area); if (!spgrid_check_params (vty, argc, argv)) { @@ -1977,11 +1973,8 @@ DEFUN (topology_baseis, "XXXX.XXXX.XXXX Network entity title (NET)\n") { int idx_word = 2; - struct isis_area *area; u_char buff[ISIS_SYS_ID_LEN]; - - area = vty->index; - assert (area); + VTY_DECLVAR_CONTEXT (isis_area, area); if (sysid2buff (buff, argv[idx_word]->arg)) sysid2buff (area->topology_baseis, argv[idx_word]->arg); @@ -1997,10 +1990,7 @@ DEFUN (no_topology_baseis, "A Network IS Base for this topology\n" "XXXX.XXXX.XXXX Network entity title (NET)\n") { - struct isis_area *area; - - area = vty->index; - assert (area); + VTY_DECLVAR_CONTEXT (isis_area, area); memcpy (area->topology_baseis, DEFAULT_TOPOLOGY_BASEIS, ISIS_SYS_ID_LEN); return CMD_SUCCESS; @@ -2015,10 +2005,7 @@ DEFUN (topology_basedynh, "Dynamic hostname base\n") { int idx_word = 2; - struct isis_area *area; - - area = vty->index; - assert (area); + VTY_DECLVAR_CONTEXT (isis_area, area); /* I hope that it's enough. */ area->topology_basedynh = strndup (argv[idx_word]->arg, 16); @@ -2328,24 +2315,6 @@ isis_init () install_element (VIEW_NODE, &show_database_detail_cmd); install_element (VIEW_NODE, &show_database_detail_arg_cmd); - install_element (ENABLE_NODE, &show_isis_summary_cmd); - - install_element (ENABLE_NODE, &show_isis_interface_cmd); - install_element (ENABLE_NODE, &show_isis_interface_detail_cmd); - install_element (ENABLE_NODE, &show_isis_interface_arg_cmd); - - install_element (ENABLE_NODE, &show_isis_neighbor_cmd); - install_element (ENABLE_NODE, &show_isis_neighbor_detail_cmd); - install_element (ENABLE_NODE, &show_isis_neighbor_arg_cmd); - install_element (ENABLE_NODE, &clear_isis_neighbor_cmd); - install_element (ENABLE_NODE, &clear_isis_neighbor_arg_cmd); - - install_element (ENABLE_NODE, &show_hostname_cmd); - install_element (ENABLE_NODE, &show_database_cmd); - install_element (ENABLE_NODE, &show_database_arg_cmd); - install_element (ENABLE_NODE, &show_database_arg_detail_cmd); - install_element (ENABLE_NODE, &show_database_detail_cmd); - install_element (ENABLE_NODE, &show_database_detail_arg_cmd); install_element (ENABLE_NODE, &show_debugging_isis_cmd); install_node (&debug_node, config_write_debug); @@ -2425,6 +2394,5 @@ isis_init () install_element (ISIS_NODE, &topology_basedynh_cmd); install_element (ISIS_NODE, &no_topology_baseis_cmd); install_element (VIEW_NODE, &show_isis_generated_topology_cmd); - install_element (ENABLE_NODE, &show_isis_generated_topology_cmd); #endif /* TOPOLOGY_GENERATE */ } diff --git a/isisd/isisd.h b/isisd/isisd.h index ca3e570063..2c303d9304 100644 --- a/isisd/isisd.h +++ b/isisd/isisd.h @@ -33,6 +33,7 @@ #include "isis_flags.h" #include "dict.h" #include "isis_memory.h" +#include "qobj.h" /* uncomment if you are a developer in bug hunt */ /* #define EXTREME_DEBUG */ @@ -57,9 +58,12 @@ struct isis struct thread *t_dync_clean; /* dynamic hostname cache cleanup thread */ struct route_table *ext_info[REDIST_PROTOCOL_COUNT]; + + QOBJ_FIELDS }; extern struct isis *isis; +DECLARE_QOBJ_TYPE(isis_area) struct isis_area { @@ -135,7 +139,10 @@ struct isis_area char *topology_basedynh; /* Dynamic hostname base. */ char top_params[200]; /* FIXME: what is reasonable? */ #endif /* TOPOLOGY_GENERATE */ + + QOBJ_FIELDS }; +DECLARE_QOBJ_TYPE(isis_area) void isis_init (void); void isis_new(unsigned long); diff --git a/ldpd/.gitignore b/ldpd/.gitignore new file mode 100644 index 0000000000..be90d42119 --- /dev/null +++ b/ldpd/.gitignore @@ -0,0 +1,18 @@ +Makefile +Makefile.in +*.o +ldpd +ldpd.conf +tags +TAGS +.deps +.nfs* +*.lo +*.la +*.a +*.libs +.arch-inventory +.arch-ids +*~ +*.loT + diff --git a/ldpd/Makefile.am b/ldpd/Makefile.am new file mode 100644 index 0000000000..1f4d910192 --- /dev/null +++ b/ldpd/Makefile.am @@ -0,0 +1,27 @@ +## Process this file with automake to produce Makefile.in. + +AM_CPPFLAGS = -I.. -I$(top_srcdir) -I$(top_srcdir)/lib -I$(top_builddir)/lib \ + -DVTY_DEPRECATE_INDEX +DEFS = @DEFS@ -DSYSCONFDIR=\"$(sysconfdir)/\" +INSTALL_SDATA=@INSTALL@ -m 600 + +AM_CFLAGS = $(WERROR) + +noinst_LIBRARIES = libldp.a +sbin_PROGRAMS = ldpd + +libldp_a_SOURCES = \ + accept.c address.c adjacency.c control.c hello.c init.c interface.c \ + keepalive.c l2vpn.c labelmapping.c lde.c lde_lib.c ldpd.c \ + ldpe.c log.c neighbor.c notification.c packet.c pfkey.c \ + socket.c util.c ldp_vty_cmds.c ldp_vty_conf.c ldp_vty_exec.c \ + ldp_debug.c ldp_zebra.c + +noinst_HEADERS = \ + control.h lde.h ldpd.h ldpe.h ldp.h log.h ldp_debug.h ldp_vty.h + +ldpd_SOURCES = ldpd.c +ldpd_LDADD = libldp.a ../lib/libzebra.la @LIBCAP@ + +examplesdir = $(exampledir) +dist_examples_DATA = ldpd.conf.sample diff --git a/ldpd/accept.c b/ldpd/accept.c new file mode 100644 index 0000000000..4cb461b908 --- /dev/null +++ b/ldpd/accept.c @@ -0,0 +1,136 @@ +/* $OpenBSD$ */ + +/* + * Copyright (c) 2012 Claudio Jeker + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include + +#include "ldpd.h" +#include "ldpe.h" +#include "log.h" + +struct accept_ev { + LIST_ENTRY(accept_ev) entry; + struct thread *ev; + int (*accept_cb)(struct thread *); + void *arg; + int fd; +}; + +struct { + LIST_HEAD(, accept_ev) queue; + struct thread *evt; +} accept_queue; + +static void accept_arm(void); +static void accept_unarm(void); +static int accept_cb(struct thread *); +static int accept_timeout(struct thread *); + +void +accept_init(void) +{ + LIST_INIT(&accept_queue.queue); +} + +int +accept_add(int fd, int (*cb)(struct thread *), void *arg) +{ + struct accept_ev *av; + + if ((av = calloc(1, sizeof(*av))) == NULL) + return (-1); + av->fd = fd; + av->accept_cb = cb; + av->arg = arg; + LIST_INSERT_HEAD(&accept_queue.queue, av, entry); + + av->ev = thread_add_read(master, accept_cb, av, av->fd); + + log_debug("%s: accepting on fd %d", __func__, fd); + + return (0); +} + +void +accept_del(int fd) +{ + struct accept_ev *av; + + LIST_FOREACH(av, &accept_queue.queue, entry) + if (av->fd == fd) { + log_debug("%s: %d removed from queue", __func__, fd); + THREAD_READ_OFF(av->ev); + LIST_REMOVE(av, entry); + free(av); + return; + } +} + +void +accept_pause(void) +{ + log_debug(__func__); + accept_unarm(); + accept_queue.evt = thread_add_timer(master, accept_timeout, NULL, 1); +} + +void +accept_unpause(void) +{ + if (accept_queue.evt != NULL) { + log_debug(__func__); + THREAD_TIMER_OFF(accept_queue.evt); + accept_arm(); + } +} + +static void +accept_arm(void) +{ + struct accept_ev *av; + LIST_FOREACH(av, &accept_queue.queue, entry) + av->ev = thread_add_read(master, accept_cb, av, av->fd); +} + +static void +accept_unarm(void) +{ + struct accept_ev *av; + LIST_FOREACH(av, &accept_queue.queue, entry) + THREAD_READ_OFF(av->ev); +} + +static int +accept_cb(struct thread *thread) +{ + struct accept_ev *av = THREAD_ARG(thread); + av->ev = thread_add_read(master, accept_cb, av, av->fd); + av->accept_cb(thread); + + return (0); +} + +static int +accept_timeout(struct thread *thread) +{ + accept_queue.evt = NULL; + + log_debug(__func__); + accept_arm(); + + return (0); +} diff --git a/ldpd/address.c b/ldpd/address.c new file mode 100644 index 0000000000..1c4c116f21 --- /dev/null +++ b/ldpd/address.c @@ -0,0 +1,294 @@ +/* $OpenBSD$ */ + +/* + * Copyright (c) 2009 Michele Marchetto + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include + +#include "ldpd.h" +#include "ldpe.h" +#include "lde.h" +#include "log.h" +#include "ldp_debug.h" + +static void send_address(struct nbr *, int, struct if_addr_head *, + unsigned int, int); +static int gen_address_list_tlv(struct ibuf *, uint16_t, int, + struct if_addr_head *, unsigned int); +static void address_list_add(struct if_addr_head *, struct if_addr *); +static void address_list_clr(struct if_addr_head *); + +static void +send_address(struct nbr *nbr, int af, struct if_addr_head *addr_list, + unsigned int addr_count, int withdraw) +{ + struct ibuf *buf; + uint16_t msg_type; + uint8_t addr_size; + struct if_addr *if_addr; + uint16_t size; + unsigned int tlv_addr_count = 0; + int err = 0; + + /* nothing to send */ + if (LIST_EMPTY(addr_list)) + return; + + if (!withdraw) + msg_type = MSG_TYPE_ADDR; + else + msg_type = MSG_TYPE_ADDRWITHDRAW; + + switch (af) { + case AF_INET: + addr_size = sizeof(struct in_addr); + break; + case AF_INET6: + addr_size = sizeof(struct in6_addr); + break; + default: + fatalx("send_address: unknown af"); + } + + while ((if_addr = LIST_FIRST(addr_list)) != NULL) { + /* + * Send as many addresses as possible - respect the session's + * negotiated maximum pdu length. + */ + size = LDP_HDR_SIZE + LDP_MSG_SIZE + ADDR_LIST_SIZE; + if (size + addr_count * addr_size <= nbr->max_pdu_len) + tlv_addr_count = addr_count; + else + tlv_addr_count = (nbr->max_pdu_len - size) / addr_size; + size += tlv_addr_count * addr_size; + addr_count -= tlv_addr_count; + + if ((buf = ibuf_open(size)) == NULL) + fatal(__func__); + + err |= gen_ldp_hdr(buf, size); + size -= LDP_HDR_SIZE; + err |= gen_msg_hdr(buf, msg_type, size); + size -= LDP_MSG_SIZE; + err |= gen_address_list_tlv(buf, size, af, addr_list, + tlv_addr_count); + if (err) { + address_list_clr(addr_list); + ibuf_free(buf); + return; + } + + while ((if_addr = LIST_FIRST(addr_list)) != NULL) { + debug_msg_send("%s: lsr-id %s address %s", + msg_name(msg_type), inet_ntoa(nbr->id), + log_addr(af, &if_addr->addr)); + + LIST_REMOVE(if_addr, entry); + free(if_addr); + if (--tlv_addr_count == 0) + break; + } + + evbuf_enqueue(&nbr->tcp->wbuf, buf); + } + + nbr_fsm(nbr, NBR_EVT_PDU_SENT); +} + +void +send_address_single(struct nbr *nbr, struct if_addr *if_addr, int withdraw) +{ + struct if_addr_head addr_list; + + LIST_INIT(&addr_list); + address_list_add(&addr_list, if_addr); + send_address(nbr, if_addr->af, &addr_list, 1, withdraw); +} + +void +send_address_all(struct nbr *nbr, int af) +{ + struct if_addr_head addr_list; + struct if_addr *if_addr; + unsigned int addr_count = 0; + + LIST_INIT(&addr_list); + LIST_FOREACH(if_addr, &global.addr_list, entry) { + if (if_addr->af != af) + continue; + + address_list_add(&addr_list, if_addr); + addr_count++; + } + + send_address(nbr, af, &addr_list, addr_count, 0); +} + +int +recv_address(struct nbr *nbr, char *buf, uint16_t len) +{ + struct ldp_msg msg; + uint16_t msg_type; + struct address_list_tlv alt; + enum imsg_type type; + struct lde_addr lde_addr; + + memcpy(&msg, buf, sizeof(msg)); + buf += LDP_MSG_SIZE; + len -= LDP_MSG_SIZE; + + /* Address List TLV */ + if (len < ADDR_LIST_SIZE) { + session_shutdown(nbr, S_BAD_MSG_LEN, msg.id, msg.type); + return (-1); + } + + memcpy(&alt, buf, sizeof(alt)); + if (ntohs(alt.length) != len - TLV_HDR_SIZE) { + session_shutdown(nbr, S_BAD_TLV_LEN, msg.id, msg.type); + return (-1); + } + if (ntohs(alt.type) != TLV_TYPE_ADDRLIST) { + session_shutdown(nbr, S_UNKNOWN_TLV, msg.id, msg.type); + return (-1); + } + switch (ntohs(alt.family)) { + case AF_IPV4: + if (!nbr->v4_enabled) + /* just ignore the message */ + return (0); + break; + case AF_IPV6: + if (!nbr->v6_enabled) + /* just ignore the message */ + return (0); + break; + default: + send_notification_nbr(nbr, S_UNSUP_ADDR, msg.id, msg.type); + return (-1); + } + buf += sizeof(alt); + len -= sizeof(alt); + + msg_type = ntohs(msg.type); + if (msg_type == MSG_TYPE_ADDR) + type = IMSG_ADDRESS_ADD; + else + type = IMSG_ADDRESS_DEL; + + while (len > 0) { + switch (ntohs(alt.family)) { + case AF_IPV4: + if (len < sizeof(struct in_addr)) { + session_shutdown(nbr, S_BAD_TLV_LEN, msg.id, + msg.type); + return (-1); + } + + memset(&lde_addr, 0, sizeof(lde_addr)); + lde_addr.af = AF_INET; + memcpy(&lde_addr.addr, buf, sizeof(struct in_addr)); + + buf += sizeof(struct in_addr); + len -= sizeof(struct in_addr); + break; + case AF_IPV6: + if (len < sizeof(struct in6_addr)) { + session_shutdown(nbr, S_BAD_TLV_LEN, msg.id, + msg.type); + return (-1); + } + + memset(&lde_addr, 0, sizeof(lde_addr)); + lde_addr.af = AF_INET6; + memcpy(&lde_addr.addr, buf, sizeof(struct in6_addr)); + + buf += sizeof(struct in6_addr); + len -= sizeof(struct in6_addr); + break; + default: + fatalx("recv_address: unknown af"); + } + + debug_msg_recv("%s: lsr-id %s address %s", msg_name(msg_type), + inet_ntoa(nbr->id), log_addr(lde_addr.af, &lde_addr.addr)); + + ldpe_imsg_compose_lde(type, nbr->peerid, 0, &lde_addr, + sizeof(lde_addr)); + } + + return (0); +} + +static int +gen_address_list_tlv(struct ibuf *buf, uint16_t size, int af, + struct if_addr_head *addr_list, unsigned int tlv_addr_count) +{ + struct address_list_tlv alt; + uint16_t addr_size; + struct if_addr *if_addr; + int err = 0; + + memset(&alt, 0, sizeof(alt)); + alt.type = TLV_TYPE_ADDRLIST; + alt.length = htons(size - TLV_HDR_SIZE); + + switch (af) { + case AF_INET: + alt.family = htons(AF_IPV4); + addr_size = sizeof(struct in_addr); + break; + case AF_INET6: + alt.family = htons(AF_IPV6); + addr_size = sizeof(struct in6_addr); + break; + default: + fatalx("gen_address_list_tlv: unknown af"); + } + + err |= ibuf_add(buf, &alt, sizeof(alt)); + LIST_FOREACH(if_addr, addr_list, entry) { + err |= ibuf_add(buf, &if_addr->addr, addr_size); + if (--tlv_addr_count == 0) + break; + } + + return (err); +} + +static void +address_list_add(struct if_addr_head *addr_list, struct if_addr *if_addr) +{ + struct if_addr *new; + + new = malloc(sizeof(*new)); + if (new == NULL) + fatal(__func__); + *new = *if_addr; + + LIST_INSERT_HEAD(addr_list, new, entry); +} + +static void +address_list_clr(struct if_addr_head *addr_list) +{ + struct if_addr *if_addr; + + while ((if_addr = LIST_FIRST(addr_list)) != NULL) { + LIST_REMOVE(if_addr, entry); + free(if_addr); + } +} diff --git a/ldpd/adjacency.c b/ldpd/adjacency.c new file mode 100644 index 0000000000..3607ee96b3 --- /dev/null +++ b/ldpd/adjacency.c @@ -0,0 +1,352 @@ +/* $OpenBSD$ */ + +/* + * Copyright (c) 2013, 2015 Renato Westphal + * Copyright (c) 2009 Michele Marchetto + * Copyright (c) 2005 Claudio Jeker + * Copyright (c) 2004, 2005, 2008 Esben Norby + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include + +#include "ldpd.h" +#include "ldpe.h" +#include "log.h" + +static int adj_itimer(struct thread *); +static void tnbr_del(struct tnbr *); +static int tnbr_hello_timer(struct thread *); +static void tnbr_start_hello_timer(struct tnbr *); +static void tnbr_stop_hello_timer(struct tnbr *); + +struct adj * +adj_new(struct in_addr lsr_id, struct hello_source *source, + union ldpd_addr *addr) +{ + struct adj *adj; + + log_debug("%s: lsr-id %s, %s", __func__, inet_ntoa(lsr_id), + log_hello_src(source)); + + if ((adj = calloc(1, sizeof(*adj))) == NULL) + fatal(__func__); + + adj->lsr_id = lsr_id; + adj->nbr = NULL; + adj->source = *source; + adj->trans_addr = *addr; + + LIST_INSERT_HEAD(&global.adj_list, adj, global_entry); + + switch (source->type) { + case HELLO_LINK: + LIST_INSERT_HEAD(&source->link.ia->adj_list, adj, ia_entry); + break; + case HELLO_TARGETED: + source->target->adj = adj; + break; + } + + return (adj); +} + +static void +adj_del_single(struct adj *adj) +{ + log_debug("%s: lsr-id %s, %s (%s)", __func__, inet_ntoa(adj->lsr_id), + log_hello_src(&adj->source), af_name(adj_get_af(adj))); + + adj_stop_itimer(adj); + + LIST_REMOVE(adj, global_entry); + if (adj->nbr) + LIST_REMOVE(adj, nbr_entry); + switch (adj->source.type) { + case HELLO_LINK: + LIST_REMOVE(adj, ia_entry); + break; + case HELLO_TARGETED: + adj->source.target->adj = NULL; + break; + } + + free(adj); +} + +void +adj_del(struct adj *adj, uint32_t notif_status) +{ + struct nbr *nbr = adj->nbr; + struct adj *atmp; + + adj_del_single(adj); + + /* + * If the neighbor still exists but none of its remaining + * adjacencies (if any) are from the preferred address-family, + * then delete it. + */ + if (nbr && nbr_adj_count(nbr, nbr->af) == 0) { + LIST_FOREACH_SAFE(adj, &nbr->adj_list, nbr_entry, atmp) + adj_del_single(adj); + session_shutdown(nbr, notif_status, 0, 0); + nbr_del(nbr); + } +} + +struct adj * +adj_find(struct hello_source *source) +{ + struct adj *adj; + + LIST_FOREACH(adj, &global.adj_list, global_entry) { + if (adj->source.type != source->type) + continue; + + switch (source->type) { + case HELLO_LINK: + if (ldp_addrcmp(source->link.ia->af, + &adj->source.link.src_addr, + &source->link.src_addr) == 0) + return (adj); + break; + case HELLO_TARGETED: + if (adj->source.target == source->target) + return (adj); + break; + } + } + + return (NULL); +} + +int +adj_get_af(struct adj *adj) +{ + switch (adj->source.type) { + case HELLO_LINK: + return (adj->source.link.ia->af); + case HELLO_TARGETED: + return (adj->source.target->af); + default: + fatalx("adj_get_af: unknown hello type"); + } +} + +/* adjacency timers */ + +/* ARGSUSED */ +static int +adj_itimer(struct thread *thread) +{ + struct adj *adj = THREAD_ARG(thread); + + adj->inactivity_timer = NULL; + + log_debug("%s: lsr-id %s", __func__, inet_ntoa(adj->lsr_id)); + + if (adj->source.type == HELLO_TARGETED) { + if (!(adj->source.target->flags & F_TNBR_CONFIGURED) && + adj->source.target->pw_count == 0) { + /* remove dynamic targeted neighbor */ + tnbr_del(adj->source.target); + return (0); + } + adj->source.target->adj = NULL; + } + + adj_del(adj, S_HOLDTIME_EXP); + + return (0); +} + +void +adj_start_itimer(struct adj *adj) +{ + THREAD_TIMER_OFF(adj->inactivity_timer); + adj->inactivity_timer = thread_add_timer(master, adj_itimer, adj, + adj->holdtime); +} + +void +adj_stop_itimer(struct adj *adj) +{ + THREAD_TIMER_OFF(adj->inactivity_timer); +} + +/* targeted neighbors */ + +struct tnbr * +tnbr_new(int af, union ldpd_addr *addr) +{ + struct tnbr *tnbr; + + if ((tnbr = calloc(1, sizeof(*tnbr))) == NULL) + fatal(__func__); + + tnbr->af = af; + tnbr->addr = *addr; + tnbr->state = TNBR_STA_DOWN; + + return (tnbr); +} + +static void +tnbr_del(struct tnbr *tnbr) +{ + tnbr_stop_hello_timer(tnbr); + if (tnbr->adj) + adj_del(tnbr->adj, S_SHUTDOWN); + LIST_REMOVE(tnbr, entry); + free(tnbr); +} + +struct tnbr * +tnbr_find(struct ldpd_conf *xconf, int af, union ldpd_addr *addr) +{ + struct tnbr *tnbr; + + LIST_FOREACH(tnbr, &xconf->tnbr_list, entry) + if (af == tnbr->af && + ldp_addrcmp(af, addr, &tnbr->addr) == 0) + return (tnbr); + + return (NULL); +} + +struct tnbr * +tnbr_check(struct tnbr *tnbr) +{ + if (!(tnbr->flags & (F_TNBR_CONFIGURED|F_TNBR_DYNAMIC)) && + tnbr->pw_count == 0) { + tnbr_del(tnbr); + return (NULL); + } + + return (tnbr); +} + +void +tnbr_update(struct tnbr *tnbr) +{ + int socket_ok, rtr_id_ok; + + if ((ldp_af_global_get(&global, tnbr->af))->ldp_edisc_socket != -1) + socket_ok = 1; + else + socket_ok = 0; + + if (ldp_rtr_id_get(leconf) != INADDR_ANY) + rtr_id_ok = 1; + else + rtr_id_ok = 0; + + if (tnbr->state == TNBR_STA_DOWN) { + if (!socket_ok || !rtr_id_ok) + return; + + tnbr->state = TNBR_STA_ACTIVE; + send_hello(HELLO_TARGETED, NULL, tnbr); + + tnbr_start_hello_timer(tnbr); + } else if (tnbr->state == TNBR_STA_ACTIVE) { + if (socket_ok && rtr_id_ok) + return; + + tnbr->state = TNBR_STA_DOWN; + tnbr_stop_hello_timer(tnbr); + } +} + +void +tnbr_update_all(int af) +{ + struct tnbr *tnbr; + + /* update targeted neighbors */ + LIST_FOREACH(tnbr, &leconf->tnbr_list, entry) + if (tnbr->af == af || af == AF_UNSPEC) + tnbr_update(tnbr); +} + +uint16_t +tnbr_get_hello_holdtime(struct tnbr *tnbr) +{ + if ((ldp_af_conf_get(leconf, tnbr->af))->thello_holdtime != 0) + return ((ldp_af_conf_get(leconf, tnbr->af))->thello_holdtime); + + return (leconf->thello_holdtime); +} + +uint16_t +tnbr_get_hello_interval(struct tnbr *tnbr) +{ + if ((ldp_af_conf_get(leconf, tnbr->af))->thello_interval != 0) + return ((ldp_af_conf_get(leconf, tnbr->af))->thello_interval); + + return (leconf->thello_interval); +} + +/* target neighbors timers */ + +/* ARGSUSED */ +static int +tnbr_hello_timer(struct thread *thread) +{ + struct tnbr *tnbr = THREAD_ARG(thread); + + tnbr->hello_timer = NULL; + send_hello(HELLO_TARGETED, NULL, tnbr); + tnbr_start_hello_timer(tnbr); + + return (0); +} + +static void +tnbr_start_hello_timer(struct tnbr *tnbr) +{ + THREAD_TIMER_OFF(tnbr->hello_timer); + tnbr->hello_timer = thread_add_timer(master, tnbr_hello_timer, tnbr, + tnbr_get_hello_interval(tnbr)); +} + +static void +tnbr_stop_hello_timer(struct tnbr *tnbr) +{ + THREAD_TIMER_OFF(tnbr->hello_timer); +} + +struct ctl_adj * +adj_to_ctl(struct adj *adj) +{ + static struct ctl_adj actl; + + actl.af = adj_get_af(adj); + actl.id = adj->lsr_id; + actl.type = adj->source.type; + switch (adj->source.type) { + case HELLO_LINK: + memcpy(actl.ifname, adj->source.link.ia->iface->name, + sizeof(actl.ifname)); + break; + case HELLO_TARGETED: + actl.src_addr = adj->source.target->addr; + break; + } + actl.holdtime = adj->holdtime; + actl.trans_addr = adj->trans_addr; + + return (&actl); +} diff --git a/ldpd/control.c b/ldpd/control.c new file mode 100644 index 0000000000..ba303cc12c --- /dev/null +++ b/ldpd/control.c @@ -0,0 +1,286 @@ +/* $OpenBSD$ */ + +/* + * Copyright (c) 2003, 2004 Henning Brauer + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include + +#include "ldpd.h" +#include "ldpe.h" +#include "log.h" +#include "control.h" + +#define CONTROL_BACKLOG 5 + +static int control_accept(struct thread *); +static struct ctl_conn *control_connbyfd(int); +static struct ctl_conn *control_connbypid(pid_t); +static void control_close(int); +static int control_dispatch_imsg(struct thread *); + +struct ctl_conns ctl_conns; + +static int control_fd; + +int +control_init(void) +{ + struct sockaddr_un s_un; + int fd; + mode_t old_umask; + + if ((fd = socket(AF_UNIX, SOCK_STREAM, 0)) == -1) { + log_warn("%s: socket", __func__); + return (-1); + } + sock_set_nonblock(fd); + + memset(&s_un, 0, sizeof(s_un)); + s_un.sun_family = AF_UNIX; + strlcpy(s_un.sun_path, LDPD_SOCKET, sizeof(s_un.sun_path)); + + if (unlink(LDPD_SOCKET) == -1) + if (errno != ENOENT) { + log_warn("%s: unlink %s", __func__, LDPD_SOCKET); + close(fd); + return (-1); + } + + old_umask = umask(S_IXUSR|S_IXGRP|S_IWOTH|S_IROTH|S_IXOTH); + if (bind(fd, (struct sockaddr *)&s_un, sizeof(s_un)) == -1) { + log_warn("%s: bind: %s", __func__, LDPD_SOCKET); + close(fd); + umask(old_umask); + return (-1); + } + umask(old_umask); + + if (chmod(LDPD_SOCKET, S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP) == -1) { + log_warn("%s: chmod", __func__); + close(fd); + (void)unlink(LDPD_SOCKET); + return (-1); + } + + control_fd = fd; + + return (0); +} + +int +control_listen(void) +{ + if (listen(control_fd, CONTROL_BACKLOG) == -1) { + log_warn("%s: listen", __func__); + return (-1); + } + + return (accept_add(control_fd, control_accept, NULL)); +} + +void +control_cleanup(void) +{ + accept_del(control_fd); + close(control_fd); + unlink(LDPD_SOCKET); +} + +/* ARGSUSED */ +static int +control_accept(struct thread *thread) +{ + int connfd; + socklen_t len; + struct sockaddr_un s_un; + struct ctl_conn *c; + + len = sizeof(s_un); + if ((connfd = accept(THREAD_FD(thread), (struct sockaddr *)&s_un, + &len)) == -1) { + /* + * Pause accept if we are out of file descriptors, or + * libevent will haunt us here too. + */ + if (errno == ENFILE || errno == EMFILE) + accept_pause(); + else if (errno != EWOULDBLOCK && errno != EINTR && + errno != ECONNABORTED) + log_warn("%s: accept", __func__); + return (0); + } + sock_set_nonblock(connfd); + + if ((c = calloc(1, sizeof(struct ctl_conn))) == NULL) { + log_warn(__func__); + close(connfd); + return (0); + } + + imsg_init(&c->iev.ibuf, connfd); + c->iev.handler_read = control_dispatch_imsg; + c->iev.ev_read = thread_add_read(master, c->iev.handler_read, + &c->iev, c->iev.ibuf.fd); + c->iev.handler_write = ldp_write_handler; + c->iev.ev_write = NULL; + + TAILQ_INSERT_TAIL(&ctl_conns, c, entry); + + return (0); +} + +static struct ctl_conn * +control_connbyfd(int fd) +{ + struct ctl_conn *c; + + for (c = TAILQ_FIRST(&ctl_conns); c != NULL && c->iev.ibuf.fd != fd; + c = TAILQ_NEXT(c, entry)) + ; /* nothing */ + + return (c); +} + +static struct ctl_conn * +control_connbypid(pid_t pid) +{ + struct ctl_conn *c; + + for (c = TAILQ_FIRST(&ctl_conns); c != NULL && c->iev.ibuf.pid != pid; + c = TAILQ_NEXT(c, entry)) + ; /* nothing */ + + return (c); +} + +static void +control_close(int fd) +{ + struct ctl_conn *c; + + if ((c = control_connbyfd(fd)) == NULL) { + log_warnx("%s: fd %d: not found", __func__, fd); + return; + } + + msgbuf_clear(&c->iev.ibuf.w); + TAILQ_REMOVE(&ctl_conns, c, entry); + + THREAD_READ_OFF(c->iev.ev_read); + THREAD_WRITE_OFF(c->iev.ev_write); + close(c->iev.ibuf.fd); + accept_unpause(); + free(c); +} + +/* ARGSUSED */ +static int +control_dispatch_imsg(struct thread *thread) +{ + int fd = THREAD_FD(thread); + struct ctl_conn *c; + struct imsg imsg; + ssize_t n; + unsigned int ifidx; + + if ((c = control_connbyfd(fd)) == NULL) { + log_warnx("%s: fd %d: not found", __func__, fd); + return (0); + } + + c->iev.ev_read = NULL; + + if (((n = imsg_read(&c->iev.ibuf)) == -1 && errno != EAGAIN) || + n == 0) { + control_close(fd); + return (0); + } + + for (;;) { + if ((n = imsg_get(&c->iev.ibuf, &imsg)) == -1) { + control_close(fd); + return (0); + } + + if (n == 0) + break; + + switch (imsg.hdr.type) { + case IMSG_CTL_FIB_COUPLE: + case IMSG_CTL_FIB_DECOUPLE: + case IMSG_CTL_RELOAD: + case IMSG_CTL_KROUTE: + case IMSG_CTL_KROUTE_ADDR: + case IMSG_CTL_IFINFO: + /* ignore */ + break; + case IMSG_CTL_SHOW_INTERFACE: + if (imsg.hdr.len == IMSG_HEADER_SIZE + + sizeof(ifidx)) { + memcpy(&ifidx, imsg.data, sizeof(ifidx)); + ldpe_iface_ctl(c, ifidx); + imsg_compose_event(&c->iev, IMSG_CTL_END, 0, + 0, -1, NULL, 0); + } + break; + case IMSG_CTL_SHOW_DISCOVERY: + ldpe_adj_ctl(c); + break; + case IMSG_CTL_SHOW_LIB: + case IMSG_CTL_SHOW_L2VPN_PW: + case IMSG_CTL_SHOW_L2VPN_BINDING: + c->iev.ibuf.pid = imsg.hdr.pid; + ldpe_imsg_compose_lde(imsg.hdr.type, 0, imsg.hdr.pid, + imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE); + break; + case IMSG_CTL_SHOW_NBR: + ldpe_nbr_ctl(c); + break; + case IMSG_CTL_CLEAR_NBR: + if (imsg.hdr.len != IMSG_HEADER_SIZE + + sizeof(struct ctl_nbr)) + break; + + nbr_clear_ctl(imsg.data); + break; + case IMSG_CTL_LOG_VERBOSE: + /* ignore */ + break; + default: + log_debug("%s: error handling imsg %d", __func__, + imsg.hdr.type); + break; + } + imsg_free(&imsg); + } + + imsg_event_add(&c->iev); + + return (0); +} + +int +control_imsg_relay(struct imsg *imsg) +{ + struct ctl_conn *c; + + if ((c = control_connbypid(imsg->hdr.pid)) == NULL) + return (0); + + return (imsg_compose_event(&c->iev, imsg->hdr.type, 0, imsg->hdr.pid, + -1, imsg->data, imsg->hdr.len - IMSG_HEADER_SIZE)); +} diff --git a/ldpd/control.h b/ldpd/control.h new file mode 100644 index 0000000000..32c49fdf87 --- /dev/null +++ b/ldpd/control.h @@ -0,0 +1,37 @@ +/* $OpenBSD$ */ + +/* + * Copyright (c) 2003, 2004 Henning Brauer + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _CONTROL_H_ +#define _CONTROL_H_ + +#include "openbsd-queue.h" + +struct ctl_conn { + TAILQ_ENTRY(ctl_conn) entry; + struct imsgev iev; +}; +TAILQ_HEAD(ctl_conns, ctl_conn); + +extern struct ctl_conns ctl_conns; + +int control_init(void); +int control_listen(void); +void control_cleanup(void); +int control_imsg_relay(struct imsg *); + +#endif /* _CONTROL_H_ */ diff --git a/ldpd/hello.c b/ldpd/hello.c new file mode 100644 index 0000000000..755b25aa85 --- /dev/null +++ b/ldpd/hello.c @@ -0,0 +1,591 @@ +/* $OpenBSD$ */ + +/* + * Copyright (c) 2013, 2016 Renato Westphal + * Copyright (c) 2009 Michele Marchetto + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include + +#include "ldpd.h" +#include "ldpe.h" +#include "log.h" +#include "ldp_debug.h" + +static int gen_hello_prms_tlv(struct ibuf *buf, uint16_t, uint16_t); +static int gen_opt4_hello_prms_tlv(struct ibuf *, uint16_t, uint32_t); +static int gen_opt16_hello_prms_tlv(struct ibuf *, uint16_t, uint8_t *); +static int gen_ds_hello_prms_tlv(struct ibuf *, uint32_t); +static int tlv_decode_hello_prms(char *, uint16_t, uint16_t *, uint16_t *); +static int tlv_decode_opt_hello_prms(char *, uint16_t, int *, int, + union ldpd_addr *, uint32_t *, uint16_t *); + +int +send_hello(enum hello_type type, struct iface_af *ia, struct tnbr *tnbr) +{ + int af; + union ldpd_addr dst; + uint16_t size, holdtime = 0, flags = 0; + int fd = 0; + struct ibuf *buf; + int err = 0; + + switch (type) { + case HELLO_LINK: + af = ia->af; + holdtime = if_get_hello_holdtime(ia); + flags = 0; + fd = (ldp_af_global_get(&global, af))->ldp_disc_socket; + + /* multicast destination address */ + switch (af) { + case AF_INET: + if (!(leconf->ipv4.flags & F_LDPD_AF_NO_GTSM)) + flags |= F_HELLO_GTSM; + dst.v4 = global.mcast_addr_v4; + break; + case AF_INET6: + dst.v6 = global.mcast_addr_v6; + break; + default: + fatalx("send_hello: unknown af"); + } + break; + case HELLO_TARGETED: + af = tnbr->af; + holdtime = tnbr_get_hello_holdtime(tnbr); + flags = F_HELLO_TARGETED; + if ((tnbr->flags & F_TNBR_CONFIGURED) || tnbr->pw_count) + flags |= F_HELLO_REQ_TARG; + fd = (ldp_af_global_get(&global, af))->ldp_edisc_socket; + + /* unicast destination address */ + dst = tnbr->addr; + break; + default: + fatalx("send_hello: unknown hello type"); + } + + /* calculate message size */ + size = LDP_HDR_SIZE + LDP_MSG_SIZE + sizeof(struct hello_prms_tlv); + switch (af) { + case AF_INET: + size += sizeof(struct hello_prms_opt4_tlv); + break; + case AF_INET6: + size += sizeof(struct hello_prms_opt16_tlv); + break; + default: + fatalx("send_hello: unknown af"); + } + size += sizeof(struct hello_prms_opt4_tlv); + if (ldp_is_dual_stack(leconf)) + size += sizeof(struct hello_prms_opt4_tlv); + + /* generate message */ + if ((buf = ibuf_open(size)) == NULL) + fatal(__func__); + + err |= gen_ldp_hdr(buf, size); + size -= LDP_HDR_SIZE; + err |= gen_msg_hdr(buf, MSG_TYPE_HELLO, size); + err |= gen_hello_prms_tlv(buf, holdtime, flags); + + /* + * RFC 7552 - Section 6.1: + * "An LSR MUST include only the transport address whose address + * family is the same as that of the IP packet carrying the Hello + * message". + */ + switch (af) { + case AF_INET: + err |= gen_opt4_hello_prms_tlv(buf, TLV_TYPE_IPV4TRANSADDR, + leconf->ipv4.trans_addr.v4.s_addr); + break; + case AF_INET6: + err |= gen_opt16_hello_prms_tlv(buf, TLV_TYPE_IPV6TRANSADDR, + leconf->ipv6.trans_addr.v6.s6_addr); + break; + default: + fatalx("send_hello: unknown af"); + } + + err |= gen_opt4_hello_prms_tlv(buf, TLV_TYPE_CONFIG, + htonl(global.conf_seqnum)); + + /* + * RFC 7552 - Section 6.1.1: + * "A Dual-stack LSR (i.e., an LSR supporting Dual-stack LDP for a peer) + * MUST include the Dual-Stack capability TLV in all of its LDP Hellos". + */ + if (ldp_is_dual_stack(leconf)) + err |= gen_ds_hello_prms_tlv(buf, leconf->trans_pref); + + if (err) { + ibuf_free(buf); + return (-1); + } + + switch (type) { + case HELLO_LINK: + debug_hello_send("iface %s (%s) holdtime %u", ia->iface->name, + af_name(ia->af), holdtime); + break; + case HELLO_TARGETED: + debug_hello_send("targeted-neighbor %s (%s) holdtime %u", + log_addr(tnbr->af, &tnbr->addr), af_name(tnbr->af), + holdtime); + break; + default: + fatalx("send_hello: unknown hello type"); + } + + send_packet(fd, af, &dst, ia, buf->buf, buf->wpos); + ibuf_free(buf); + + return (0); +} + +void +recv_hello(struct in_addr lsr_id, struct ldp_msg *msg, int af, + union ldpd_addr *src, struct iface *iface, int multicast, char *buf, + uint16_t len) +{ + struct adj *adj = NULL; + struct nbr *nbr, *nbrt; + uint16_t holdtime = 0, flags = 0; + int tlvs_rcvd; + int ds_tlv; + union ldpd_addr trans_addr; + uint32_t scope_id = 0; + uint32_t conf_seqnum; + uint16_t trans_pref; + int r; + struct hello_source source; + struct iface_af *ia = NULL; + struct tnbr *tnbr = NULL; + + r = tlv_decode_hello_prms(buf, len, &holdtime, &flags); + if (r == -1) { + log_debug("%s: lsr-id %s: failed to decode params", __func__, + inet_ntoa(lsr_id)); + return; + } + /* safety checks */ + if (holdtime != 0 && holdtime < MIN_HOLDTIME) { + log_debug("%s: lsr-id %s: invalid hello holdtime (%u)", + __func__, inet_ntoa(lsr_id), holdtime); + return; + } + if (multicast && (flags & F_HELLO_TARGETED)) { + log_debug("%s: lsr-id %s: multicast targeted hello", __func__, + inet_ntoa(lsr_id)); + return; + } + if (!multicast && !((flags & F_HELLO_TARGETED))) { + log_debug("%s: lsr-id %s: unicast link hello", __func__, + inet_ntoa(lsr_id)); + return; + } + buf += r; + len -= r; + + r = tlv_decode_opt_hello_prms(buf, len, &tlvs_rcvd, af, &trans_addr, + &conf_seqnum, &trans_pref); + if (r == -1) { + log_debug("%s: lsr-id %s: failed to decode optional params", + __func__, inet_ntoa(lsr_id)); + return; + } + if (r != len) { + log_debug("%s: lsr-id %s: unexpected data in message", + __func__, inet_ntoa(lsr_id)); + return; + } + + /* implicit transport address */ + if (!(tlvs_rcvd & F_HELLO_TLV_RCVD_ADDR)) + trans_addr = *src; + if (bad_addr(af, &trans_addr)) { + log_debug("%s: lsr-id %s: invalid transport address %s", + __func__, inet_ntoa(lsr_id), log_addr(af, &trans_addr)); + return; + } + if (af == AF_INET6 && IN6_IS_SCOPE_EMBED(&trans_addr.v6)) { + /* + * RFC 7552 - Section 6.1: + * "An LSR MUST use a global unicast IPv6 address in an IPv6 + * Transport Address optional object of outgoing targeted + * Hellos and check for the same in incoming targeted Hellos + * (i.e., MUST discard the targeted Hello if it failed the + * check)". + */ + if (flags & F_HELLO_TARGETED) { + log_debug("%s: lsr-id %s: invalid targeted hello " + "transport address %s", __func__, inet_ntoa(lsr_id), + log_addr(af, &trans_addr)); + return; + } + scope_id = iface->ifindex; + } + + memset(&source, 0, sizeof(source)); + if (flags & F_HELLO_TARGETED) { + /* + * RFC 7552 - Section 5.2: + * "The link-local IPv6 addresses MUST NOT be used as the + * targeted LDP Hello packet's source or destination addresses". + */ + if (af == AF_INET6 && IN6_IS_SCOPE_EMBED(&src->v6)) { + log_debug("%s: lsr-id %s: targeted hello with " + "link-local source address", __func__, + inet_ntoa(lsr_id)); + return; + } + + tnbr = tnbr_find(leconf, af, src); + + /* remove the dynamic tnbr if the 'R' bit was cleared */ + if (tnbr && (tnbr->flags & F_TNBR_DYNAMIC) && + !((flags & F_HELLO_REQ_TARG))) { + tnbr->flags &= ~F_TNBR_DYNAMIC; + tnbr = tnbr_check(tnbr); + } + + if (!tnbr) { + if (!((flags & F_HELLO_REQ_TARG) && + ((ldp_af_conf_get(leconf, af))->flags & + F_LDPD_AF_THELLO_ACCEPT))) + return; + + tnbr = tnbr_new(af, src); + tnbr->flags |= F_TNBR_DYNAMIC; + tnbr_update(tnbr); + LIST_INSERT_HEAD(&leconf->tnbr_list, tnbr, entry); + } + + source.type = HELLO_TARGETED; + source.target = tnbr; + } else { + ia = iface_af_get(iface, af); + source.type = HELLO_LINK; + source.link.ia = ia; + source.link.src_addr = *src; + } + + adj = adj_find(&source); + nbr = nbr_find_ldpid(lsr_id.s_addr); + + /* check dual-stack tlv */ + ds_tlv = (tlvs_rcvd & F_HELLO_TLV_RCVD_DS) ? 1 : 0; + if (ds_tlv && trans_pref != leconf->trans_pref) { + /* + * RFC 7552 - Section 6.1.1: + * "If the Dual-Stack capability TLV is present and the remote + * preference does not match the local preference (or does not + * get recognized), then the LSR MUST discard the Hello message + * and log an error. + * If an LDP session was already in place, then the LSR MUST + * send a fatal Notification message with status code of + * 'Transport Connection Mismatch' and reset the session". + */ + log_debug("%s: lsr-id %s: remote transport preference does not " + "match the local preference", __func__, inet_ntoa(lsr_id)); + if (nbr) + session_shutdown(nbr, S_TRANS_MISMTCH, msg->id, + msg->type); + if (adj) + adj_del(adj, S_SHUTDOWN); + return; + } + + /* + * Check for noncompliant dual-stack neighbor according to + * RFC 7552 section 6.1.1. + */ + if (nbr && !ds_tlv) { + switch (af) { + case AF_INET: + if (nbr_adj_count(nbr, AF_INET6) > 0) { + session_shutdown(nbr, S_DS_NONCMPLNCE, + msg->id, msg->type); + return; + } + break; + case AF_INET6: + if (nbr_adj_count(nbr, AF_INET) > 0) { + session_shutdown(nbr, S_DS_NONCMPLNCE, + msg->id, msg->type); + return; + } + break; + default: + fatalx("recv_hello: unknown af"); + } + } + + /* + * Protections against misconfigured networks and buggy implementations. + */ + if (nbr && nbr->af == af && + (ldp_addrcmp(af, &nbr->raddr, &trans_addr) || + nbr->raddr_scope != scope_id)) { + log_warnx("%s: lsr-id %s: hello packet advertising a different " + "transport address", __func__, inet_ntoa(lsr_id)); + if (adj) + adj_del(adj, S_SHUTDOWN); + return; + } + if (nbr == NULL) { + nbrt = nbr_find_addr(af, &trans_addr); + if (nbrt) { + log_debug("%s: transport address %s is already being " + "used by lsr-id %s", __func__, log_addr(af, + &trans_addr), inet_ntoa(nbrt->id)); + if (adj) + adj_del(adj, S_SHUTDOWN); + return; + } + } + + if (adj == NULL) { + adj = adj_new(lsr_id, &source, &trans_addr); + if (nbr) { + adj->nbr = nbr; + LIST_INSERT_HEAD(&nbr->adj_list, adj, nbr_entry); + } + } + + /* + * If the hello adjacency's address-family doesn't match the local + * preference, then an adjacency is still created but we don't attempt + * to start an LDP session. + */ + if (nbr == NULL && (!ds_tlv || + ((trans_pref == DUAL_STACK_LDPOV4 && af == AF_INET) || + (trans_pref == DUAL_STACK_LDPOV6 && af == AF_INET6)))) + nbr = nbr_new(lsr_id, af, ds_tlv, &trans_addr, scope_id); + + /* dynamic LDPv4 GTSM negotiation as per RFC 6720 */ + if (nbr) { + if (flags & F_HELLO_GTSM) + nbr->flags |= F_NBR_GTSM_NEGOTIATED; + else + nbr->flags &= ~F_NBR_GTSM_NEGOTIATED; + } + + /* update neighbor's configuration sequence number */ + if (nbr && (tlvs_rcvd & F_HELLO_TLV_RCVD_CONF)) { + if (conf_seqnum > nbr->conf_seqnum && + nbr_pending_idtimer(nbr)) + nbr_stop_idtimer(nbr); + nbr->conf_seqnum = conf_seqnum; + } + + /* always update the holdtime to properly handle runtime changes */ + switch (source.type) { + case HELLO_LINK: + if (holdtime == 0) + holdtime = LINK_DFLT_HOLDTIME; + + adj->holdtime = min(if_get_hello_holdtime(ia), holdtime); + break; + case HELLO_TARGETED: + if (holdtime == 0) + holdtime = TARGETED_DFLT_HOLDTIME; + + adj->holdtime = min(tnbr_get_hello_holdtime(tnbr), holdtime); + } + if (adj->holdtime != INFINITE_HOLDTIME) + adj_start_itimer(adj); + else + adj_stop_itimer(adj); + + debug_hello_recv("%s lsr-id %s transport-address %s holdtime %u%s", + log_hello_src(&source), inet_ntoa(lsr_id), log_addr(af, &trans_addr), + holdtime, (ds_tlv) ? " (dual stack TLV present)" : ""); + + if (nbr && nbr->state == NBR_STA_PRESENT && !nbr_pending_idtimer(nbr) && + nbr_session_active_role(nbr) && !nbr_pending_connect(nbr)) + nbr_establish_connection(nbr); +} + +static int +gen_hello_prms_tlv(struct ibuf *buf, uint16_t holdtime, uint16_t flags) +{ + struct hello_prms_tlv parms; + + memset(&parms, 0, sizeof(parms)); + parms.type = htons(TLV_TYPE_COMMONHELLO); + parms.length = htons(sizeof(parms.holdtime) + sizeof(parms.flags)); + parms.holdtime = htons(holdtime); + parms.flags = htons(flags); + + return (ibuf_add(buf, &parms, sizeof(parms))); +} + +static int +gen_opt4_hello_prms_tlv(struct ibuf *buf, uint16_t type, uint32_t value) +{ + struct hello_prms_opt4_tlv parms; + + memset(&parms, 0, sizeof(parms)); + parms.type = htons(type); + parms.length = htons(sizeof(parms.value)); + parms.value = value; + + return (ibuf_add(buf, &parms, sizeof(parms))); +} + +static int +gen_opt16_hello_prms_tlv(struct ibuf *buf, uint16_t type, uint8_t *value) +{ + struct hello_prms_opt16_tlv parms; + + memset(&parms, 0, sizeof(parms)); + parms.type = htons(type); + parms.length = htons(sizeof(parms.value)); + memcpy(&parms.value, value, sizeof(parms.value)); + + return (ibuf_add(buf, &parms, sizeof(parms))); +} + +static int +gen_ds_hello_prms_tlv(struct ibuf *buf, uint32_t value) +{ + if (leconf->flags & F_LDPD_DS_CISCO_INTEROP) + value = htonl(value); + else + value = htonl(value << 28); + + return (gen_opt4_hello_prms_tlv(buf, TLV_TYPE_DUALSTACK, value)); +} + +static int +tlv_decode_hello_prms(char *buf, uint16_t len, uint16_t *holdtime, + uint16_t *flags) +{ + struct hello_prms_tlv tlv; + + if (len < sizeof(tlv)) + return (-1); + memcpy(&tlv, buf, sizeof(tlv)); + + if (tlv.type != htons(TLV_TYPE_COMMONHELLO)) + return (-1); + if (ntohs(tlv.length) != sizeof(tlv) - TLV_HDR_SIZE) + return (-1); + + *holdtime = ntohs(tlv.holdtime); + *flags = ntohs(tlv.flags); + + return (sizeof(tlv)); +} + +static int +tlv_decode_opt_hello_prms(char *buf, uint16_t len, int *tlvs_rcvd, int af, + union ldpd_addr *addr, uint32_t *conf_number, uint16_t *trans_pref) +{ + struct tlv tlv; + uint16_t tlv_len; + int total = 0; + + *tlvs_rcvd = 0; + memset(addr, 0, sizeof(*addr)); + *conf_number = 0; + *trans_pref = 0; + + /* + * RFC 7552 - Section 6.1: + * "An LSR SHOULD accept the Hello message that contains both IPv4 and + * IPv6 Transport Address optional objects but MUST use only the + * transport address whose address family is the same as that of the + * IP packet carrying the Hello message. An LSR SHOULD accept only + * the first Transport Address optional object for a given address + * family in the received Hello message and ignore the rest if the + * LSR receives more than one Transport Address optional object for a + * given address family". + */ + while (len >= sizeof(tlv)) { + memcpy(&tlv, buf, TLV_HDR_SIZE); + tlv_len = ntohs(tlv.length); + if (tlv_len + TLV_HDR_SIZE > len) + return (-1); + buf += TLV_HDR_SIZE; + len -= TLV_HDR_SIZE; + total += TLV_HDR_SIZE; + + switch (ntohs(tlv.type)) { + case TLV_TYPE_IPV4TRANSADDR: + if (tlv_len != sizeof(addr->v4)) + return (-1); + if (af != AF_INET) + return (-1); + if (*tlvs_rcvd & F_HELLO_TLV_RCVD_ADDR) + break; + memcpy(&addr->v4, buf, sizeof(addr->v4)); + *tlvs_rcvd |= F_HELLO_TLV_RCVD_ADDR; + break; + case TLV_TYPE_IPV6TRANSADDR: + if (tlv_len != sizeof(addr->v6)) + return (-1); + if (af != AF_INET6) + return (-1); + if (*tlvs_rcvd & F_HELLO_TLV_RCVD_ADDR) + break; + memcpy(&addr->v6, buf, sizeof(addr->v6)); + *tlvs_rcvd |= F_HELLO_TLV_RCVD_ADDR; + break; + case TLV_TYPE_CONFIG: + if (tlv_len != sizeof(uint32_t)) + return (-1); + memcpy(conf_number, buf, sizeof(uint32_t)); + *tlvs_rcvd |= F_HELLO_TLV_RCVD_CONF; + break; + case TLV_TYPE_DUALSTACK: + if (tlv_len != sizeof(uint32_t)) + return (-1); + /* + * RFC 7552 - Section 6.1: + * "A Single-stack LSR does not need to use the + * Dual-Stack capability in Hello messages and SHOULD + * ignore this capability if received". + */ + if (!ldp_is_dual_stack(leconf)) + break; + /* Shame on you, Cisco! */ + if (leconf->flags & F_LDPD_DS_CISCO_INTEROP) { + memcpy(trans_pref, buf + sizeof(uint16_t), + sizeof(uint16_t)); + *trans_pref = ntohs(*trans_pref); + } else { + memcpy(trans_pref, buf , sizeof(uint16_t)); + *trans_pref = ntohs(*trans_pref) >> 12; + } + *tlvs_rcvd |= F_HELLO_TLV_RCVD_DS; + break; + default: + /* if unknown flag set, ignore TLV */ + if (!(ntohs(tlv.type) & UNKNOWN_FLAG)) + return (-1); + break; + } + buf += tlv_len; + len -= tlv_len; + total += tlv_len; + } + + return (total); +} diff --git a/ldpd/init.c b/ldpd/init.c new file mode 100644 index 0000000000..ed6b53c02d --- /dev/null +++ b/ldpd/init.c @@ -0,0 +1,165 @@ +/* $OpenBSD$ */ + +/* + * Copyright (c) 2009 Michele Marchetto + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include + +#include "ldpd.h" +#include "ldpe.h" +#include "log.h" +#include "ldp_debug.h" + +static int gen_init_prms_tlv(struct ibuf *, struct nbr *); + +void +send_init(struct nbr *nbr) +{ + struct ibuf *buf; + uint16_t size; + int err = 0; + + debug_msg_send("initialization: lsr-id %s", inet_ntoa(nbr->id)); + + size = LDP_HDR_SIZE + LDP_MSG_SIZE + SESS_PRMS_SIZE; + if ((buf = ibuf_open(size)) == NULL) + fatal(__func__); + + err |= gen_ldp_hdr(buf, size); + size -= LDP_HDR_SIZE; + err |= gen_msg_hdr(buf, MSG_TYPE_INIT, size); + size -= LDP_MSG_SIZE; + err |= gen_init_prms_tlv(buf, nbr); + if (err) { + ibuf_free(buf); + return; + } + + evbuf_enqueue(&nbr->tcp->wbuf, buf); +} + +int +recv_init(struct nbr *nbr, char *buf, uint16_t len) +{ + struct ldp_msg msg; + struct sess_prms_tlv sess; + uint16_t max_pdu_len; + + debug_msg_recv("initialization: lsr-id %s", inet_ntoa(nbr->id)); + + memcpy(&msg, buf, sizeof(msg)); + buf += LDP_MSG_SIZE; + len -= LDP_MSG_SIZE; + + if (len < SESS_PRMS_SIZE) { + session_shutdown(nbr, S_BAD_MSG_LEN, msg.id, msg.type); + return (-1); + } + memcpy(&sess, buf, sizeof(sess)); + if (ntohs(sess.length) != SESS_PRMS_LEN) { + session_shutdown(nbr, S_BAD_TLV_LEN, msg.id, msg.type); + return (-1); + } + if (ntohs(sess.proto_version) != LDP_VERSION) { + session_shutdown(nbr, S_BAD_PROTO_VER, msg.id, msg.type); + return (-1); + } + if (ntohs(sess.keepalive_time) < MIN_KEEPALIVE) { + session_shutdown(nbr, S_KEEPALIVE_BAD, msg.id, msg.type); + return (-1); + } + if (sess.lsr_id != ldp_rtr_id_get(leconf) || + ntohs(sess.lspace_id) != 0) { + session_shutdown(nbr, S_NO_HELLO, msg.id, msg.type); + return (-1); + } + + buf += SESS_PRMS_SIZE; + len -= SESS_PRMS_SIZE; + + /* Optional Parameters */ + while (len > 0) { + struct tlv tlv; + uint16_t tlv_len; + + if (len < sizeof(tlv)) { + session_shutdown(nbr, S_BAD_TLV_LEN, msg.id, msg.type); + return (-1); + } + + memcpy(&tlv, buf, TLV_HDR_SIZE); + tlv_len = ntohs(tlv.length); + if (tlv_len + TLV_HDR_SIZE > len) { + session_shutdown(nbr, S_BAD_TLV_LEN, msg.id, msg.type); + return (-1); + } + buf += TLV_HDR_SIZE; + len -= TLV_HDR_SIZE; + + switch (ntohs(tlv.type)) { + case TLV_TYPE_ATMSESSIONPAR: + session_shutdown(nbr, S_BAD_TLV_VAL, msg.id, msg.type); + return (-1); + case TLV_TYPE_FRSESSION: + session_shutdown(nbr, S_BAD_TLV_VAL, msg.id, msg.type); + return (-1); + default: + if (!(ntohs(tlv.type) & UNKNOWN_FLAG)) + send_notification_nbr(nbr, S_UNKNOWN_TLV, + msg.id, msg.type); + /* ignore unknown tlv */ + break; + } + buf += tlv_len; + len -= tlv_len; + } + + nbr->keepalive = min(nbr_get_keepalive(nbr->af, nbr->id), + ntohs(sess.keepalive_time)); + + max_pdu_len = ntohs(sess.max_pdu_len); + /* + * RFC 5036 - Section 3.5.3: + * "A value of 255 or less specifies the default maximum length of + * 4096 octets". + */ + if (max_pdu_len <= 255) + max_pdu_len = LDP_MAX_LEN; + nbr->max_pdu_len = min(max_pdu_len, LDP_MAX_LEN); + + nbr_fsm(nbr, NBR_EVT_INIT_RCVD); + + return (0); +} + +static int +gen_init_prms_tlv(struct ibuf *buf, struct nbr *nbr) +{ + struct sess_prms_tlv parms; + + memset(&parms, 0, sizeof(parms)); + parms.type = htons(TLV_TYPE_COMMONSESSION); + parms.length = htons(SESS_PRMS_LEN); + parms.proto_version = htons(LDP_VERSION); + parms.keepalive_time = htons(nbr_get_keepalive(nbr->af, nbr->id)); + parms.reserved = 0; + parms.pvlim = 0; + parms.max_pdu_len = 0; + parms.lsr_id = nbr->id.s_addr; + parms.lspace_id = 0; + + return (ibuf_add(buf, &parms, SESS_PRMS_SIZE)); +} diff --git a/ldpd/interface.c b/ldpd/interface.c new file mode 100644 index 0000000000..b6472fe5e8 --- /dev/null +++ b/ldpd/interface.c @@ -0,0 +1,561 @@ +/* $OpenBSD$ */ + +/* + * Copyright (c) 2013, 2016 Renato Westphal + * Copyright (c) 2005 Claudio Jeker + * Copyright (c) 2004, 2005, 2008 Esben Norby + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include + +#include "ldpd.h" +#include "ldpe.h" +#include "log.h" + +#include "sockopt.h" + +static struct if_addr *if_addr_new(struct kaddr *); +static struct if_addr *if_addr_lookup(struct if_addr_head *, struct kaddr *); +static int if_start(struct iface *, int); +static int if_reset(struct iface *, int); +static void if_update_af(struct iface_af *, int); +static int if_hello_timer(struct thread *); +static void if_start_hello_timer(struct iface_af *); +static void if_stop_hello_timer(struct iface_af *); +static int if_join_ipv4_group(struct iface *, struct in_addr *); +static int if_leave_ipv4_group(struct iface *, struct in_addr *); +static int if_join_ipv6_group(struct iface *, struct in6_addr *); +static int if_leave_ipv6_group(struct iface *, struct in6_addr *); + +struct iface * +if_new(struct kif *kif) +{ + struct iface *iface; + + if ((iface = calloc(1, sizeof(*iface))) == NULL) + fatal("if_new: calloc"); + + strlcpy(iface->name, kif->ifname, sizeof(iface->name)); + LIST_INIT(&iface->addr_list); + if (kif->ifindex) + if_update_info(iface, kif); + + /* ipv4 */ + iface->ipv4.af = AF_INET; + iface->ipv4.iface = iface; + iface->ipv4.enabled = 0; + iface->ipv4.state = IF_STA_DOWN; + LIST_INIT(&iface->ipv4.adj_list); + + /* ipv6 */ + iface->ipv6.af = AF_INET6; + iface->ipv6.iface = iface; + iface->ipv6.enabled = 0; + iface->ipv6.state = IF_STA_DOWN; + LIST_INIT(&iface->ipv6.adj_list); + + return (iface); +} + +struct iface * +if_lookup(struct ldpd_conf *xconf, unsigned short ifindex) +{ + struct iface *iface; + + LIST_FOREACH(iface, &xconf->iface_list, entry) + if (iface->ifindex == ifindex) + return (iface); + + return (NULL); +} + +void +if_exit(struct iface *iface) +{ + struct if_addr *if_addr; + + log_debug("%s: interface %s", __func__, iface->name); + + if (iface->ipv4.state == IF_STA_ACTIVE) + if_reset(iface, AF_INET); + if (iface->ipv6.state == IF_STA_ACTIVE) + if_reset(iface, AF_INET6); + + while ((if_addr = LIST_FIRST(&iface->addr_list)) != NULL) { + LIST_REMOVE(if_addr, entry); + free(if_addr); + } +} + +struct iface * +if_lookup_name(struct ldpd_conf *xconf, const char *ifname) +{ + struct iface *iface; + + LIST_FOREACH(iface, &xconf->iface_list, entry) + if (strcmp(iface->name, ifname) == 0) + return (iface); + + return (NULL); +} + +void +if_update_info(struct iface *iface, struct kif *kif) +{ + /* get type */ + if (kif->flags & IFF_POINTOPOINT) + iface->type = IF_TYPE_POINTOPOINT; + if (kif->flags & IFF_BROADCAST && + kif->flags & IFF_MULTICAST) + iface->type = IF_TYPE_BROADCAST; + + /* get index and flags */ + iface->ifindex = kif->ifindex; + iface->flags = kif->flags; +} + +struct iface_af * +iface_af_get(struct iface *iface, int af) +{ + switch (af) { + case AF_INET: + return (&iface->ipv4); + case AF_INET6: + return (&iface->ipv6); + default: + fatalx("iface_af_get: unknown af"); + } +} + +static struct if_addr * +if_addr_new(struct kaddr *ka) +{ + struct if_addr *if_addr; + + if ((if_addr = calloc(1, sizeof(*if_addr))) == NULL) + fatal(__func__); + + if_addr->af = ka->af; + if_addr->addr = ka->addr; + if_addr->prefixlen = ka->prefixlen; + if_addr->dstbrd = ka->dstbrd; + + return (if_addr); +} + +static struct if_addr * +if_addr_lookup(struct if_addr_head *addr_list, struct kaddr *ka) +{ + struct if_addr *if_addr; + int af = ka->af; + + LIST_FOREACH(if_addr, addr_list, entry) + if (!ldp_addrcmp(af, &if_addr->addr, &ka->addr) && + if_addr->prefixlen == ka->prefixlen && + !ldp_addrcmp(af, &if_addr->dstbrd, &ka->dstbrd)) + return (if_addr); + + return (NULL); +} + +void +if_addr_add(struct kaddr *ka) +{ + struct iface *iface; + struct if_addr *if_addr; + struct nbr *nbr; + + if (if_addr_lookup(&global.addr_list, ka) == NULL) { + if_addr = if_addr_new(ka); + + LIST_INSERT_HEAD(&global.addr_list, if_addr, entry); + RB_FOREACH(nbr, nbr_id_head, &nbrs_by_id) { + if (nbr->state != NBR_STA_OPER) + continue; + if (if_addr->af == AF_INET && !nbr->v4_enabled) + continue; + if (if_addr->af == AF_INET6 && !nbr->v6_enabled) + continue; + + send_address_single(nbr, if_addr, 0); + } + } + + iface = if_lookup(leconf, ka->ifindex); + if (iface) { + if (ka->af == AF_INET6 && IN6_IS_ADDR_LINKLOCAL(&ka->addr.v6)) + iface->linklocal = ka->addr.v6; + + if (if_addr_lookup(&iface->addr_list, ka) == NULL) { + if_addr = if_addr_new(ka); + LIST_INSERT_HEAD(&iface->addr_list, if_addr, entry); + if_update(iface, if_addr->af); + } + } +} + +void +if_addr_del(struct kaddr *ka) +{ + struct iface *iface; + struct if_addr *if_addr; + struct nbr *nbr; + + iface = if_lookup(leconf, ka->ifindex); + if (iface) { + if (ka->af == AF_INET6 && + IN6_ARE_ADDR_EQUAL(&iface->linklocal, &ka->addr.v6)) + memset(&iface->linklocal, 0, sizeof(iface->linklocal)); + + if_addr = if_addr_lookup(&iface->addr_list, ka); + if (if_addr) { + LIST_REMOVE(if_addr, entry); + if_update(iface, if_addr->af); + free(if_addr); + } + } + + if_addr = if_addr_lookup(&global.addr_list, ka); + if (if_addr) { + RB_FOREACH(nbr, nbr_id_head, &nbrs_by_id) { + if (nbr->state != NBR_STA_OPER) + continue; + if (if_addr->af == AF_INET && !nbr->v4_enabled) + continue; + if (if_addr->af == AF_INET6 && !nbr->v6_enabled) + continue; + send_address_single(nbr, if_addr, 1); + } + LIST_REMOVE(if_addr, entry); + free(if_addr); + } +} + +static int +if_start(struct iface *iface, int af) +{ + struct iface_af *ia; + struct timeval now; + + log_debug("%s: %s address-family %s", __func__, iface->name, + af_name(af)); + + ia = iface_af_get(iface, af); + + gettimeofday(&now, NULL); + ia->uptime = now.tv_sec; + + switch (af) { + case AF_INET: + if (if_join_ipv4_group(iface, &global.mcast_addr_v4)) + return (-1); + break; + case AF_INET6: + if (if_join_ipv6_group(iface, &global.mcast_addr_v6)) + return (-1); + break; + default: + fatalx("if_start: unknown af"); + } + + send_hello(HELLO_LINK, ia, NULL); + + if_start_hello_timer(ia); + return (0); +} + +static int +if_reset(struct iface *iface, int af) +{ + struct iface_af *ia; + struct adj *adj; + + log_debug("%s: %s address-family %s", __func__, iface->name, + af_name(af)); + + ia = iface_af_get(iface, af); + if_stop_hello_timer(ia); + + while ((adj = LIST_FIRST(&ia->adj_list)) != NULL) + adj_del(adj, S_SHUTDOWN); + + /* try to cleanup */ + switch (af) { + case AF_INET: + if (global.ipv4.ldp_disc_socket != -1) + if_leave_ipv4_group(iface, &global.mcast_addr_v4); + break; + case AF_INET6: + if (global.ipv6.ldp_disc_socket != -1) + if_leave_ipv6_group(iface, &global.mcast_addr_v6); + break; + default: + fatalx("if_start: unknown af"); + } + + return (0); +} + +static void +if_update_af(struct iface_af *ia, int link_ok) +{ + int addr_ok = 0, socket_ok, rtr_id_ok; + struct if_addr *if_addr; + + switch (ia->af) { + case AF_INET: + /* + * NOTE: for LDPv4, each interface should have at least one + * valid IP address otherwise they can not be enabled. + */ + LIST_FOREACH(if_addr, &ia->iface->addr_list, entry) { + if (if_addr->af == AF_INET) { + addr_ok = 1; + break; + } + } + break; + case AF_INET6: + /* for IPv6 the link-local address is enough. */ + if (IN6_IS_ADDR_LINKLOCAL(&ia->iface->linklocal)) + addr_ok = 1; + break; + default: + fatalx("if_update_af: unknown af"); + } + + if ((ldp_af_global_get(&global, ia->af))->ldp_disc_socket != -1) + socket_ok = 1; + else + socket_ok = 0; + + if (ldp_rtr_id_get(leconf) != INADDR_ANY) + rtr_id_ok = 1; + else + rtr_id_ok = 0; + + if (ia->state == IF_STA_DOWN) { + if (!ia->enabled || !link_ok || !addr_ok || !socket_ok || + !rtr_id_ok) + return; + + ia->state = IF_STA_ACTIVE; + if_start(ia->iface, ia->af); + } else if (ia->state == IF_STA_ACTIVE) { + if (ia->enabled && link_ok && addr_ok && socket_ok && rtr_id_ok) + return; + + ia->state = IF_STA_DOWN; + if_reset(ia->iface, ia->af); + } +} + +void +if_update(struct iface *iface, int af) +{ + int link_ok; + + link_ok = (iface->flags & IFF_UP) && (iface->flags & IFF_RUNNING); + + if (af == AF_INET || af == AF_UNSPEC) + if_update_af(&iface->ipv4, link_ok); + if (af == AF_INET6 || af == AF_UNSPEC) + if_update_af(&iface->ipv6, link_ok); +} + +void +if_update_all(int af) +{ + struct iface *iface; + + LIST_FOREACH(iface, &leconf->iface_list, entry) + if_update(iface, af); +} + +uint16_t +if_get_hello_holdtime(struct iface_af *ia) +{ + if (ia->hello_holdtime != 0) + return (ia->hello_holdtime); + + if ((ldp_af_conf_get(leconf, ia->af))->lhello_holdtime != 0) + return ((ldp_af_conf_get(leconf, ia->af))->lhello_holdtime); + + return (leconf->lhello_holdtime); +} + +uint16_t +if_get_hello_interval(struct iface_af *ia) +{ + if (ia->hello_interval != 0) + return (ia->hello_interval); + + if ((ldp_af_conf_get(leconf, ia->af))->lhello_interval != 0) + return ((ldp_af_conf_get(leconf, ia->af))->lhello_interval); + + return (leconf->lhello_interval); +} + +/* timers */ +/* ARGSUSED */ +static int +if_hello_timer(struct thread *thread) +{ + struct iface_af *ia = THREAD_ARG(thread); + + ia->hello_timer = NULL; + send_hello(HELLO_LINK, ia, NULL); + if_start_hello_timer(ia); + + return (0); +} + +static void +if_start_hello_timer(struct iface_af *ia) +{ + THREAD_TIMER_OFF(ia->hello_timer); + ia->hello_timer = thread_add_timer(master, if_hello_timer, ia, + if_get_hello_interval(ia)); +} + +static void +if_stop_hello_timer(struct iface_af *ia) +{ + THREAD_TIMER_OFF(ia->hello_timer); +} + +struct ctl_iface * +if_to_ctl(struct iface_af *ia) +{ + static struct ctl_iface ictl; + struct timeval now; + struct adj *adj; + + ictl.af = ia->af; + memcpy(ictl.name, ia->iface->name, sizeof(ictl.name)); + ictl.ifindex = ia->iface->ifindex; + ictl.state = ia->state; + ictl.flags = ia->iface->flags; + ictl.type = ia->iface->type; + ictl.hello_holdtime = if_get_hello_holdtime(ia); + ictl.hello_interval = if_get_hello_interval(ia); + + gettimeofday(&now, NULL); + if (ia->state != IF_STA_DOWN && + ia->uptime != 0) { + ictl.uptime = now.tv_sec - ia->uptime; + } else + ictl.uptime = 0; + + ictl.adj_cnt = 0; + LIST_FOREACH(adj, &ia->adj_list, ia_entry) + ictl.adj_cnt++; + + return (&ictl); +} + +/* multicast membership sockopts */ +in_addr_t +if_get_ipv4_addr(struct iface *iface) +{ + struct if_addr *if_addr; + + LIST_FOREACH(if_addr, &iface->addr_list, entry) + if (if_addr->af == AF_INET) + return (if_addr->addr.v4.s_addr); + + return (INADDR_ANY); +} + +static int +if_join_ipv4_group(struct iface *iface, struct in_addr *addr) +{ + struct in_addr if_addr; + + log_debug("%s: interface %s addr %s", __func__, iface->name, + inet_ntoa(*addr)); + + if_addr.s_addr = if_get_ipv4_addr(iface); + + if (setsockopt_ipv4_multicast(global.ipv4.ldp_disc_socket, + IP_ADD_MEMBERSHIP, if_addr, addr->s_addr, iface->ifindex) < 0) { + log_warn("%s: error IP_ADD_MEMBERSHIP, interface %s address %s", + __func__, iface->name, inet_ntoa(*addr)); + return (-1); + } + return (0); +} + +static int +if_leave_ipv4_group(struct iface *iface, struct in_addr *addr) +{ + struct in_addr if_addr; + + log_debug("%s: interface %s addr %s", __func__, iface->name, + inet_ntoa(*addr)); + + if_addr.s_addr = if_get_ipv4_addr(iface); + + if (setsockopt_ipv4_multicast(global.ipv4.ldp_disc_socket, + IP_DROP_MEMBERSHIP, if_addr, addr->s_addr, iface->ifindex) < 0) { + log_warn("%s: error IP_DROP_MEMBERSHIP, interface %s " + "address %s", __func__, iface->name, inet_ntoa(*addr)); + return (-1); + } + + return (0); +} + +static int +if_join_ipv6_group(struct iface *iface, struct in6_addr *addr) +{ + struct ipv6_mreq mreq; + + log_debug("%s: interface %s addr %s", __func__, iface->name, + log_in6addr(addr)); + + mreq.ipv6mr_multiaddr = *addr; + mreq.ipv6mr_interface = iface->ifindex; + + if (setsockopt(global.ipv6.ldp_disc_socket, IPPROTO_IPV6, + IPV6_JOIN_GROUP, &mreq, sizeof(mreq)) < 0) { + log_warn("%s: error IPV6_JOIN_GROUP, interface %s address %s", + __func__, iface->name, log_in6addr(addr)); + return (-1); + } + + return (0); +} + +static int +if_leave_ipv6_group(struct iface *iface, struct in6_addr *addr) +{ + struct ipv6_mreq mreq; + + log_debug("%s: interface %s addr %s", __func__, iface->name, + log_in6addr(addr)); + + mreq.ipv6mr_multiaddr = *addr; + mreq.ipv6mr_interface = iface->ifindex; + + if (setsockopt(global.ipv6.ldp_disc_socket, IPPROTO_IPV6, + IPV6_LEAVE_GROUP, (void *)&mreq, sizeof(mreq)) < 0) { + log_warn("%s: error IPV6_LEAVE_GROUP, interface %s address %s", + __func__, iface->name, log_in6addr(addr)); + return (-1); + } + + return (0); +} diff --git a/ldpd/keepalive.c b/ldpd/keepalive.c new file mode 100644 index 0000000000..f9a7d850fd --- /dev/null +++ b/ldpd/keepalive.c @@ -0,0 +1,62 @@ +/* $OpenBSD$ */ + +/* + * Copyright (c) 2009 Michele Marchetto + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include + +#include "ldpd.h" +#include "ldpe.h" +#include "log.h" +#include "ldp_debug.h" + +void +send_keepalive(struct nbr *nbr) +{ + struct ibuf *buf; + uint16_t size; + + size = LDP_HDR_SIZE + LDP_MSG_SIZE; + if ((buf = ibuf_open(size)) == NULL) + fatal(__func__); + + gen_ldp_hdr(buf, size); + size -= LDP_HDR_SIZE; + gen_msg_hdr(buf, MSG_TYPE_KEEPALIVE, size); + + debug_kalive_send("keepalive: lsr-id %s", inet_ntoa(nbr->id)); + + evbuf_enqueue(&nbr->tcp->wbuf, buf); +} + +int +recv_keepalive(struct nbr *nbr, char *buf, uint16_t len) +{ + struct ldp_msg msg; + + memcpy(&msg, buf, sizeof(msg)); + if (len != LDP_MSG_SIZE) { + session_shutdown(nbr, S_BAD_MSG_LEN, msg.id, msg.type); + return (-1); + } + + debug_kalive_recv("keepalive: lsr-id %s", inet_ntoa(nbr->id)); + + if (nbr->state != NBR_STA_OPER) + nbr_fsm(nbr, NBR_EVT_KEEPALIVE_RCVD); + + return (0); +} diff --git a/ldpd/l2vpn.c b/ldpd/l2vpn.c new file mode 100644 index 0000000000..db382e484f --- /dev/null +++ b/ldpd/l2vpn.c @@ -0,0 +1,549 @@ +/* $OpenBSD$ */ + +/* + * Copyright (c) 2015 Renato Westphal + * Copyright (c) 2009 Michele Marchetto + * Copyright (c) 2005 Claudio Jeker + * Copyright (c) 2004, 2005, 2008 Esben Norby + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include + +#include "ldpd.h" +#include "ldpe.h" +#include "lde.h" +#include "log.h" + +static void l2vpn_pw_fec(struct l2vpn_pw *, struct fec *); + +struct l2vpn * +l2vpn_new(const char *name) +{ + struct l2vpn *l2vpn; + + if ((l2vpn = calloc(1, sizeof(*l2vpn))) == NULL) + fatal("l2vpn_new: calloc"); + + strlcpy(l2vpn->name, name, sizeof(l2vpn->name)); + + /* set default values */ + l2vpn->mtu = DEFAULT_L2VPN_MTU; + l2vpn->pw_type = DEFAULT_PW_TYPE; + + LIST_INIT(&l2vpn->if_list); + LIST_INIT(&l2vpn->pw_list); + LIST_INIT(&l2vpn->pw_inactive_list); + + return (l2vpn); +} + +struct l2vpn * +l2vpn_find(struct ldpd_conf *xconf, const char *name) +{ + struct l2vpn *l2vpn; + + LIST_FOREACH(l2vpn, &xconf->l2vpn_list, entry) + if (strcmp(l2vpn->name, name) == 0) + return (l2vpn); + + return (NULL); +} + +void +l2vpn_del(struct l2vpn *l2vpn) +{ + struct l2vpn_if *lif; + struct l2vpn_pw *pw; + + while ((lif = LIST_FIRST(&l2vpn->if_list)) != NULL) { + LIST_REMOVE(lif, entry); + free(lif); + } + while ((pw = LIST_FIRST(&l2vpn->pw_list)) != NULL) { + LIST_REMOVE(pw, entry); + free(pw); + } + while ((pw = LIST_FIRST(&l2vpn->pw_inactive_list)) != NULL) { + LIST_REMOVE(pw, entry); + free(pw); + } + + free(l2vpn); +} + +void +l2vpn_init(struct l2vpn *l2vpn) +{ + struct l2vpn_pw *pw; + + LIST_FOREACH(pw, &l2vpn->pw_list, entry) + l2vpn_pw_init(pw); +} + +void +l2vpn_exit(struct l2vpn *l2vpn) +{ + struct l2vpn_pw *pw; + + LIST_FOREACH(pw, &l2vpn->pw_list, entry) + l2vpn_pw_exit(pw); +} + +struct l2vpn_if * +l2vpn_if_new(struct l2vpn *l2vpn, struct kif *kif) +{ + struct l2vpn_if *lif; + + if ((lif = calloc(1, sizeof(*lif))) == NULL) + fatal("l2vpn_if_new: calloc"); + + lif->l2vpn = l2vpn; + strlcpy(lif->ifname, kif->ifname, sizeof(lif->ifname)); + lif->ifindex = kif->ifindex; + lif->flags = kif->flags; + + return (lif); +} + +struct l2vpn_if * +l2vpn_if_find(struct l2vpn *l2vpn, unsigned int ifindex) +{ + struct l2vpn_if *lif; + + LIST_FOREACH(lif, &l2vpn->if_list, entry) + if (lif->ifindex == ifindex) + return (lif); + + return (NULL); +} + +struct l2vpn_if * +l2vpn_if_find_name(struct l2vpn *l2vpn, const char *ifname) +{ + struct l2vpn_if *lif; + + LIST_FOREACH(lif, &l2vpn->if_list, entry) + if (strcmp(lif->ifname, ifname) == 0) + return (lif); + + return (NULL); +} + + +struct l2vpn_pw * +l2vpn_pw_new(struct l2vpn *l2vpn, struct kif *kif) +{ + struct l2vpn_pw *pw; + + if ((pw = calloc(1, sizeof(*pw))) == NULL) + fatal("l2vpn_pw_new: calloc"); + + pw->l2vpn = l2vpn; + strlcpy(pw->ifname, kif->ifname, sizeof(pw->ifname)); + pw->ifindex = kif->ifindex; + + return (pw); +} + +struct l2vpn_pw * +l2vpn_pw_find(struct l2vpn *l2vpn, unsigned int ifindex) +{ + struct l2vpn_pw *pw; + + LIST_FOREACH(pw, &l2vpn->pw_list, entry) + if (pw->ifindex == ifindex) + return (pw); + LIST_FOREACH(pw, &l2vpn->pw_inactive_list, entry) + if (pw->ifindex == ifindex) + return (pw); + + return (NULL); +} + +struct l2vpn_pw * +l2vpn_pw_find_name(struct l2vpn *l2vpn, const char *ifname) +{ + struct l2vpn_pw *pw; + + LIST_FOREACH(pw, &l2vpn->pw_list, entry) + if (strcmp(pw->ifname, ifname) == 0) + return (pw); + LIST_FOREACH(pw, &l2vpn->pw_inactive_list, entry) + if (strcmp(pw->ifname, ifname) == 0) + return (pw); + + return (NULL); +} + +void +l2vpn_pw_init(struct l2vpn_pw *pw) +{ + struct fec fec; + + l2vpn_pw_reset(pw); + + l2vpn_pw_fec(pw, &fec); + lde_kernel_insert(&fec, AF_INET, (union ldpd_addr*)&pw->lsr_id, 0, + 0, (void *)pw); +} + +void +l2vpn_pw_exit(struct l2vpn_pw *pw) +{ + struct fec fec; + + l2vpn_pw_fec(pw, &fec); + lde_kernel_remove(&fec, AF_INET, (union ldpd_addr*)&pw->lsr_id, 0); +} + +static void +l2vpn_pw_fec(struct l2vpn_pw *pw, struct fec *fec) +{ + memset(fec, 0, sizeof(*fec)); + fec->type = FEC_TYPE_PWID; + fec->u.pwid.type = pw->l2vpn->pw_type; + fec->u.pwid.pwid = pw->pwid; + fec->u.pwid.lsr_id = pw->lsr_id; +} + +void +l2vpn_pw_reset(struct l2vpn_pw *pw) +{ + pw->remote_group = 0; + pw->remote_mtu = 0; + pw->remote_status = 0; + + if (pw->flags & F_PW_CWORD_CONF) + pw->flags |= F_PW_CWORD; + else + pw->flags &= ~F_PW_CWORD; + + if (pw->flags & F_PW_STATUSTLV_CONF) + pw->flags |= F_PW_STATUSTLV; + else + pw->flags &= ~F_PW_STATUSTLV; +} + +int +l2vpn_pw_ok(struct l2vpn_pw *pw, struct fec_nh *fnh) +{ + struct fec fec; + struct fec_node *fn; + + /* check for a remote label */ + if (fnh->remote_label == NO_LABEL) + return (0); + + /* MTUs must match */ + if (pw->l2vpn->mtu != pw->remote_mtu) + return (0); + + /* check pw status if applicable */ + if ((pw->flags & F_PW_STATUSTLV) && + pw->remote_status != PW_FORWARDING) + return (0); + + /* check for a working lsp to the nexthop */ + memset(&fec, 0, sizeof(fec)); + switch (pw->af) { + case AF_INET: + fec.type = FEC_TYPE_IPV4; + fec.u.ipv4.prefix = pw->addr.v4; + fec.u.ipv4.prefixlen = 32; + break; + case AF_INET6: + fec.type = FEC_TYPE_IPV6; + fec.u.ipv6.prefix = pw->addr.v6; + fec.u.ipv6.prefixlen = 128; + break; + default: + fatalx("l2vpn_pw_ok: unknown af"); + } + + fn = (struct fec_node *)fec_find(&ft, &fec); + if (fn == NULL || fn->local_label == NO_LABEL) + return (0); + /* + * Need to ensure that there's a label binding for all nexthops. + * Otherwise, ECMP for this route could render the pseudowire unusable. + */ + LIST_FOREACH(fnh, &fn->nexthops, entry) + if (fnh->remote_label == NO_LABEL) + return (0); + + return (1); +} + +int +l2vpn_pw_negotiate(struct lde_nbr *ln, struct fec_node *fn, struct map *map) +{ + struct l2vpn_pw *pw; + struct status_tlv st; + + /* NOTE: thanks martini & friends for all this mess */ + + pw = (struct l2vpn_pw *) fn->data; + if (pw == NULL) + /* + * pseudowire not configured, return and record + * the mapping later + */ + return (0); + + /* RFC4447 - Section 6.2: control word negotiation */ + if (fec_find(&ln->sent_map, &fn->fec)) { + if ((map->flags & F_MAP_PW_CWORD) && + !(pw->flags & F_PW_CWORD_CONF)) { + /* ignore the received label mapping */ + return (1); + } else if (!(map->flags & F_MAP_PW_CWORD) && + (pw->flags & F_PW_CWORD_CONF)) { + /* append a "Wrong C-bit" status code */ + st.status_code = S_WRONG_CBIT; + st.msg_id = map->msg_id; + st.msg_type = htons(MSG_TYPE_LABELMAPPING); + lde_send_labelwithdraw(ln, fn, NO_LABEL, &st); + + pw->flags &= ~F_PW_CWORD; + lde_send_labelmapping(ln, fn, 1); + } + } else if (map->flags & F_MAP_PW_CWORD) { + if (pw->flags & F_PW_CWORD_CONF) + pw->flags |= F_PW_CWORD; + else + /* act as if no label mapping had been received */ + return (1); + } else + pw->flags &= ~F_PW_CWORD; + + /* RFC4447 - Section 5.4.3: pseudowire status negotiation */ + if (fec_find(&ln->recv_map, &fn->fec) == NULL && + !(map->flags & F_MAP_PW_STATUS)) + pw->flags &= ~F_PW_STATUSTLV; + + return (0); +} + +void +l2vpn_send_pw_status(uint32_t peerid, uint32_t status, struct fec *fec) +{ + struct notify_msg nm; + + memset(&nm, 0, sizeof(nm)); + nm.status_code = S_PW_STATUS; + nm.pw_status = status; + nm.flags |= F_NOTIF_PW_STATUS; + lde_fec2map(fec, &nm.fec); + nm.flags |= F_NOTIF_FEC; + + lde_imsg_compose_ldpe(IMSG_NOTIFICATION_SEND, peerid, 0, + &nm, sizeof(nm)); +} + +void +l2vpn_recv_pw_status(struct lde_nbr *ln, struct notify_msg *nm) +{ + struct fec fec; + struct fec_node *fn; + struct fec_nh *fnh; + struct l2vpn_pw *pw; + + /* TODO group wildcard */ + if (!(nm->fec.flags & F_MAP_PW_ID)) + return; + + lde_map2fec(&nm->fec, ln->id, &fec); + fn = (struct fec_node *)fec_find(&ft, &fec); + if (fn == NULL) + /* unknown fec */ + return; + + pw = (struct l2vpn_pw *) fn->data; + if (pw == NULL) + return; + + fnh = fec_nh_find(fn, AF_INET, (union ldpd_addr *)&ln->id, 0); + if (fnh == NULL) + return; + + /* remote status didn't change */ + if (pw->remote_status == nm->pw_status) + return; + + pw->remote_status = nm->pw_status; + + if (l2vpn_pw_ok(pw, fnh)) + lde_send_change_klabel(fn, fnh); + else + lde_send_delete_klabel(fn, fnh); +} + +void +l2vpn_sync_pws(int af, union ldpd_addr *addr) +{ + struct l2vpn *l2vpn; + struct l2vpn_pw *pw; + struct fec fec; + struct fec_node *fn; + struct fec_nh *fnh; + + LIST_FOREACH(l2vpn, &ldeconf->l2vpn_list, entry) { + LIST_FOREACH(pw, &l2vpn->pw_list, entry) { + if (af != pw->af || ldp_addrcmp(af, &pw->addr, addr)) + continue; + + l2vpn_pw_fec(pw, &fec); + fn = (struct fec_node *)fec_find(&ft, &fec); + if (fn == NULL) + continue; + fnh = fec_nh_find(fn, AF_INET, (union ldpd_addr *) + &pw->lsr_id, 0); + if (fnh == NULL) + continue; + + if (l2vpn_pw_ok(pw, fnh)) + lde_send_change_klabel(fn, fnh); + else + lde_send_delete_klabel(fn, fnh); + } + } +} + +void +l2vpn_pw_ctl(pid_t pid) +{ + struct l2vpn *l2vpn; + struct l2vpn_pw *pw; + static struct ctl_pw pwctl; + + LIST_FOREACH(l2vpn, &ldeconf->l2vpn_list, entry) + LIST_FOREACH(pw, &l2vpn->pw_list, entry) { + memset(&pwctl, 0, sizeof(pwctl)); + strlcpy(pwctl.l2vpn_name, pw->l2vpn->name, + sizeof(pwctl.l2vpn_name)); + strlcpy(pwctl.ifname, pw->ifname, + sizeof(pwctl.ifname)); + pwctl.pwid = pw->pwid; + pwctl.lsr_id = pw->lsr_id; + pwctl.status = pw->flags & F_PW_STATUS_UP; + + lde_imsg_compose_ldpe(IMSG_CTL_SHOW_L2VPN_PW, 0, + pid, &pwctl, sizeof(pwctl)); + } +} + +void +l2vpn_binding_ctl(pid_t pid) +{ + struct fec *f; + struct fec_node *fn; + struct lde_map *me; + struct l2vpn_pw *pw; + static struct ctl_pw pwctl; + + RB_FOREACH(f, fec_tree, &ft) { + if (f->type != FEC_TYPE_PWID) + continue; + + fn = (struct fec_node *)f; + if (fn->local_label == NO_LABEL && + LIST_EMPTY(&fn->downstream)) + continue; + + memset(&pwctl, 0, sizeof(pwctl)); + pwctl.type = f->u.pwid.type; + pwctl.pwid = f->u.pwid.pwid; + pwctl.lsr_id = f->u.pwid.lsr_id; + + pw = (struct l2vpn_pw *) fn->data; + if (pw) { + pwctl.local_label = fn->local_label; + pwctl.local_gid = 0; + pwctl.local_ifmtu = pw->l2vpn->mtu; + pwctl.local_cword = (pw->flags & F_PW_CWORD_CONF) ? + 1 : 0; + } else + pwctl.local_label = NO_LABEL; + + LIST_FOREACH(me, &fn->downstream, entry) + if (f->u.pwid.lsr_id.s_addr == me->nexthop->id.s_addr) + break; + + if (me) { + pwctl.remote_label = me->map.label; + pwctl.remote_gid = me->map.fec.pwid.group_id; + if (me->map.flags & F_MAP_PW_IFMTU) + pwctl.remote_ifmtu = me->map.fec.pwid.ifmtu; + if (pw) + pwctl.remote_cword = (pw->flags & F_PW_CWORD) ? + 1 : 0; + + lde_imsg_compose_ldpe(IMSG_CTL_SHOW_L2VPN_BINDING, + 0, pid, &pwctl, sizeof(pwctl)); + } else if (pw) { + pwctl.remote_label = NO_LABEL; + + lde_imsg_compose_ldpe(IMSG_CTL_SHOW_L2VPN_BINDING, + 0, pid, &pwctl, sizeof(pwctl)); + } + } +} + +/* ldpe */ + +void +ldpe_l2vpn_init(struct l2vpn *l2vpn) +{ + struct l2vpn_pw *pw; + + LIST_FOREACH(pw, &l2vpn->pw_list, entry) + ldpe_l2vpn_pw_init(pw); +} + +void +ldpe_l2vpn_exit(struct l2vpn *l2vpn) +{ + struct l2vpn_pw *pw; + + LIST_FOREACH(pw, &l2vpn->pw_list, entry) + ldpe_l2vpn_pw_exit(pw); +} + +void +ldpe_l2vpn_pw_init(struct l2vpn_pw *pw) +{ + struct tnbr *tnbr; + + tnbr = tnbr_find(leconf, pw->af, &pw->addr); + if (tnbr == NULL) { + tnbr = tnbr_new(pw->af, &pw->addr); + tnbr_update(tnbr); + LIST_INSERT_HEAD(&leconf->tnbr_list, tnbr, entry); + } + + tnbr->pw_count++; +} + +void +ldpe_l2vpn_pw_exit(struct l2vpn_pw *pw) +{ + struct tnbr *tnbr; + + tnbr = tnbr_find(leconf, pw->af, &pw->addr); + if (tnbr) { + tnbr->pw_count--; + tnbr_check(tnbr); + } +} diff --git a/ldpd/labelmapping.c b/ldpd/labelmapping.c new file mode 100644 index 0000000000..62f2a620d2 --- /dev/null +++ b/ldpd/labelmapping.c @@ -0,0 +1,761 @@ +/* $OpenBSD$ */ + +/* + * Copyright (c) 2014, 2015 Renato Westphal + * Copyright (c) 2009 Michele Marchetto + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include + +#include "ldpd.h" +#include "ldpe.h" +#include "log.h" +#include "ldp_debug.h" + +#include "mpls.h" + +static void enqueue_pdu(struct nbr *, struct ibuf *, uint16_t); +static int gen_label_tlv(struct ibuf *, uint32_t); +static int tlv_decode_label(struct nbr *, struct ldp_msg *, char *, + uint16_t, uint32_t *); +static int gen_reqid_tlv(struct ibuf *, uint32_t); + +static void +enqueue_pdu(struct nbr *nbr, struct ibuf *buf, uint16_t size) +{ + struct ldp_hdr *ldp_hdr; + + ldp_hdr = ibuf_seek(buf, 0, sizeof(struct ldp_hdr)); + ldp_hdr->length = htons(size); + evbuf_enqueue(&nbr->tcp->wbuf, buf); +} + +/* Generic function that handles all Label Message types */ +void +send_labelmessage(struct nbr *nbr, uint16_t type, struct mapping_head *mh) +{ + struct ibuf *buf = NULL; + struct mapping_entry *me; + uint16_t msg_size, size = 0; + int first = 1; + int err = 0; + + /* nothing to send */ + if (TAILQ_EMPTY(mh)) + return; + + while ((me = TAILQ_FIRST(mh)) != NULL) { + /* generate pdu */ + if (first) { + if ((buf = ibuf_open(nbr->max_pdu_len + + LDP_HDR_DEAD_LEN)) == NULL) + fatal(__func__); + + /* real size will be set up later */ + err |= gen_ldp_hdr(buf, 0); + + size = LDP_HDR_PDU_LEN; + first = 0; + } + + /* calculate size */ + msg_size = LDP_MSG_SIZE + TLV_HDR_SIZE; + switch (me->map.type) { + case MAP_TYPE_WILDCARD: + msg_size += FEC_ELM_WCARD_LEN; + break; + case MAP_TYPE_PREFIX: + msg_size += FEC_ELM_PREFIX_MIN_LEN + + PREFIX_SIZE(me->map.fec.prefix.prefixlen); + break; + case MAP_TYPE_PWID: + msg_size += FEC_PWID_ELM_MIN_LEN; + if (me->map.flags & F_MAP_PW_ID) + msg_size += PW_STATUS_TLV_LEN; + if (me->map.flags & F_MAP_PW_IFMTU) + msg_size += FEC_SUBTLV_IFMTU_SIZE; + if (me->map.flags & F_MAP_PW_STATUS) + msg_size += PW_STATUS_TLV_SIZE; + break; + } + if (me->map.label != NO_LABEL) + msg_size += LABEL_TLV_SIZE; + if (me->map.flags & F_MAP_REQ_ID) + msg_size += REQID_TLV_SIZE; + if (me->map.flags & F_MAP_STATUS) + msg_size += STATUS_SIZE; + + /* maximum pdu length exceeded, we need a new ldp pdu */ + if (size + msg_size > nbr->max_pdu_len) { + enqueue_pdu(nbr, buf, size); + first = 1; + continue; + } + + size += msg_size; + + /* append message and tlvs */ + err |= gen_msg_hdr(buf, type, msg_size); + err |= gen_fec_tlv(buf, &me->map); + if (me->map.label != NO_LABEL) + err |= gen_label_tlv(buf, me->map.label); + if (me->map.flags & F_MAP_REQ_ID) + err |= gen_reqid_tlv(buf, me->map.requestid); + if (me->map.flags & F_MAP_PW_STATUS) + err |= gen_pw_status_tlv(buf, me->map.pw_status); + if (me->map.flags & F_MAP_STATUS) + err |= gen_status_tlv(buf, me->map.st.status_code, + me->map.st.msg_id, me->map.st.msg_type); + if (err) { + ibuf_free(buf); + mapping_list_clr(mh); + return; + } + + debug_msg_send("%s: lsr-id %s fec %s label %s", msg_name(type), + inet_ntoa(nbr->id), log_map(&me->map), + log_label(me->map.label)); + + TAILQ_REMOVE(mh, me, entry); + free(me); + } + + enqueue_pdu(nbr, buf, size); + + nbr_fsm(nbr, NBR_EVT_PDU_SENT); +} + +/* Generic function that handles all Label Message types */ +int +recv_labelmessage(struct nbr *nbr, char *buf, uint16_t len, uint16_t type) +{ + struct ldp_msg msg; + struct tlv ft; + uint32_t label = NO_LABEL, reqid = 0; + uint32_t pw_status = 0; + uint8_t flags = 0; + int feclen, lbllen, tlen; + struct mapping_entry *me; + struct mapping_head mh; + struct map map; + + memcpy(&msg, buf, sizeof(msg)); + buf += LDP_MSG_SIZE; + len -= LDP_MSG_SIZE; + + /* FEC TLV */ + if (len < sizeof(ft)) { + session_shutdown(nbr, S_BAD_TLV_LEN, msg.id, msg.type); + return (-1); + } + + memcpy(&ft, buf, sizeof(ft)); + if (ntohs(ft.type) != TLV_TYPE_FEC) { + send_notification_nbr(nbr, S_MISS_MSG, msg.id, msg.type); + return (-1); + } + feclen = ntohs(ft.length); + if (feclen > len - TLV_HDR_SIZE) { + session_shutdown(nbr, S_BAD_TLV_LEN, msg.id, msg.type); + return (-1); + } + + buf += TLV_HDR_SIZE; /* just advance to the end of the fec header */ + len -= TLV_HDR_SIZE; + + TAILQ_INIT(&mh); + do { + memset(&map, 0, sizeof(map)); + map.msg_id = msg.id; + + if ((tlen = tlv_decode_fec_elm(nbr, &msg, buf, feclen, + &map)) == -1) + goto err; + if (map.type == MAP_TYPE_PWID && + !(map.flags & F_MAP_PW_ID) && + type != MSG_TYPE_LABELWITHDRAW && + type != MSG_TYPE_LABELRELEASE) { + send_notification_nbr(nbr, S_MISS_MSG, msg.id, + msg.type); + return (-1); + } + + /* + * The Wildcard FEC Element can be used only in the + * Label Withdraw and Label Release messages. + */ + if (map.type == MAP_TYPE_WILDCARD) { + switch (type) { + case MSG_TYPE_LABELMAPPING: + case MSG_TYPE_LABELREQUEST: + case MSG_TYPE_LABELABORTREQ: + session_shutdown(nbr, S_UNKNOWN_FEC, msg.id, + msg.type); + goto err; + default: + break; + } + } + + /* + * LDP supports the use of multiple FEC Elements per + * FEC for the Label Mapping message only. + */ + if (type != MSG_TYPE_LABELMAPPING && + tlen != feclen) { + session_shutdown(nbr, S_BAD_TLV_VAL, msg.id, msg.type); + goto err; + } + + mapping_list_add(&mh, &map); + + buf += tlen; + len -= tlen; + feclen -= tlen; + } while (feclen > 0); + + /* Mandatory Label TLV */ + if (type == MSG_TYPE_LABELMAPPING) { + lbllen = tlv_decode_label(nbr, &msg, buf, len, &label); + if (lbllen == -1) + goto err; + + buf += lbllen; + len -= lbllen; + } + + /* Optional Parameters */ + while (len > 0) { + struct tlv tlv; + uint16_t tlv_len; + uint32_t reqbuf, labelbuf, statusbuf; + + if (len < sizeof(tlv)) { + session_shutdown(nbr, S_BAD_TLV_LEN, msg.id, msg.type); + goto err; + } + + memcpy(&tlv, buf, TLV_HDR_SIZE); + tlv_len = ntohs(tlv.length); + if (tlv_len + TLV_HDR_SIZE > len) { + session_shutdown(nbr, S_BAD_TLV_LEN, msg.id, msg.type); + goto err; + } + buf += TLV_HDR_SIZE; + len -= TLV_HDR_SIZE; + + switch (ntohs(tlv.type)) { + case TLV_TYPE_LABELREQUEST: + switch (type) { + case MSG_TYPE_LABELMAPPING: + case MSG_TYPE_LABELREQUEST: + if (tlv_len != REQID_TLV_LEN) { + session_shutdown(nbr, S_BAD_TLV_LEN, + msg.id, msg.type); + goto err; + } + + flags |= F_MAP_REQ_ID; + memcpy(&reqbuf, buf, sizeof(reqbuf)); + reqid = ntohl(reqbuf); + break; + default: + /* ignore */ + break; + } + break; + case TLV_TYPE_HOPCOUNT: + case TLV_TYPE_PATHVECTOR: + /* ignore */ + break; + case TLV_TYPE_GENERICLABEL: + switch (type) { + case MSG_TYPE_LABELWITHDRAW: + case MSG_TYPE_LABELRELEASE: + if (tlv_len != LABEL_TLV_LEN) { + session_shutdown(nbr, S_BAD_TLV_LEN, + msg.id, msg.type); + goto err; + } + + memcpy(&labelbuf, buf, sizeof(labelbuf)); + label = ntohl(labelbuf); + break; + default: + /* ignore */ + break; + } + break; + case TLV_TYPE_ATMLABEL: + case TLV_TYPE_FRLABEL: + switch (type) { + case MSG_TYPE_LABELWITHDRAW: + case MSG_TYPE_LABELRELEASE: + /* unsupported */ + session_shutdown(nbr, S_BAD_TLV_VAL, msg.id, + msg.type); + goto err; + break; + default: + /* ignore */ + break; + } + break; + case TLV_TYPE_STATUS: + if (tlv_len != STATUS_TLV_LEN) { + session_shutdown(nbr, S_BAD_TLV_LEN, msg.id, + msg.type); + goto err; + } + /* ignore */ + break; + case TLV_TYPE_PW_STATUS: + switch (type) { + case MSG_TYPE_LABELMAPPING: + if (tlv_len != PW_STATUS_TLV_LEN) { + session_shutdown(nbr, S_BAD_TLV_LEN, + msg.id, msg.type); + goto err; + } + + flags |= F_MAP_PW_STATUS; + memcpy(&statusbuf, buf, sizeof(statusbuf)); + pw_status = ntohl(statusbuf); + break; + default: + /* ignore */ + break; + } + break; + default: + if (!(ntohs(tlv.type) & UNKNOWN_FLAG)) + send_notification_nbr(nbr, S_UNKNOWN_TLV, + msg.id, msg.type); + /* ignore unknown tlv */ + break; + } + buf += tlv_len; + len -= tlv_len; + } + + /* notify lde about the received message. */ + while ((me = TAILQ_FIRST(&mh)) != NULL) { + int imsg_type = IMSG_NONE; + + me->map.flags |= flags; + switch (me->map.type) { + case MAP_TYPE_PREFIX: + switch (me->map.fec.prefix.af) { + case AF_INET: + if (label == MPLS_LABEL_IPV6NULL) { + session_shutdown(nbr, S_BAD_TLV_VAL, + msg.id, msg.type); + goto err; + } + if (!nbr->v4_enabled) + goto next; + break; + case AF_INET6: + if (label == MPLS_LABEL_IPV4NULL) { + session_shutdown(nbr, S_BAD_TLV_VAL, + msg.id, msg.type); + goto err; + } + if (!nbr->v6_enabled) + goto next; + break; + default: + fatalx("recv_labelmessage: unknown af"); + } + break; + case MAP_TYPE_PWID: + if (label <= MPLS_LABEL_RESERVED_MAX) { + session_shutdown(nbr, S_BAD_TLV_VAL, msg.id, + msg.type); + goto err; + } + if (me->map.flags & F_MAP_PW_STATUS) + me->map.pw_status = pw_status; + break; + default: + break; + } + me->map.label = label; + if (me->map.flags & F_MAP_REQ_ID) + me->map.requestid = reqid; + + debug_msg_recv("%s: lsr-id %s fec %s label %s", msg_name(type), + inet_ntoa(nbr->id), log_map(&me->map), + log_label(me->map.label)); + + switch (type) { + case MSG_TYPE_LABELMAPPING: + imsg_type = IMSG_LABEL_MAPPING; + break; + case MSG_TYPE_LABELREQUEST: + imsg_type = IMSG_LABEL_REQUEST; + break; + case MSG_TYPE_LABELWITHDRAW: + imsg_type = IMSG_LABEL_WITHDRAW; + break; + case MSG_TYPE_LABELRELEASE: + imsg_type = IMSG_LABEL_RELEASE; + break; + case MSG_TYPE_LABELABORTREQ: + imsg_type = IMSG_LABEL_ABORT; + break; + default: + break; + } + + ldpe_imsg_compose_lde(imsg_type, nbr->peerid, 0, &me->map, + sizeof(struct map)); + +next: + TAILQ_REMOVE(&mh, me, entry); + free(me); + } + + return (0); + +err: + mapping_list_clr(&mh); + + return (-1); +} + +/* Other TLV related functions */ +static int +gen_label_tlv(struct ibuf *buf, uint32_t label) +{ + struct label_tlv lt; + + lt.type = htons(TLV_TYPE_GENERICLABEL); + lt.length = htons(LABEL_TLV_LEN); + lt.label = htonl(label); + + return (ibuf_add(buf, <, sizeof(lt))); +} + +static int +tlv_decode_label(struct nbr *nbr, struct ldp_msg *msg, char *buf, + uint16_t len, uint32_t *label) +{ + struct label_tlv lt; + + if (len < sizeof(lt)) { + session_shutdown(nbr, S_BAD_TLV_LEN, msg->id, msg->type); + return (-1); + } + memcpy(<, buf, sizeof(lt)); + + if (!(ntohs(lt.type) & TLV_TYPE_GENERICLABEL)) { + send_notification_nbr(nbr, S_MISS_MSG, msg->id, msg->type); + return (-1); + } + + switch (htons(lt.type)) { + case TLV_TYPE_GENERICLABEL: + if (ntohs(lt.length) != sizeof(lt) - TLV_HDR_SIZE) { + session_shutdown(nbr, S_BAD_TLV_LEN, msg->id, + msg->type); + return (-1); + } + + *label = ntohl(lt.label); + if (*label > MPLS_LABEL_MAX || + (*label <= MPLS_LABEL_RESERVED_MAX && + *label != MPLS_LABEL_IPV4NULL && + *label != MPLS_LABEL_IPV6NULL && + *label != MPLS_LABEL_IMPLNULL)) { + session_shutdown(nbr, S_BAD_TLV_VAL, msg->id, + msg->type); + return (-1); + } + break; + case TLV_TYPE_ATMLABEL: + case TLV_TYPE_FRLABEL: + default: + /* unsupported */ + session_shutdown(nbr, S_BAD_TLV_VAL, msg->id, msg->type); + return (-1); + } + + return (sizeof(lt)); +} + +static int +gen_reqid_tlv(struct ibuf *buf, uint32_t reqid) +{ + struct reqid_tlv rt; + + rt.type = htons(TLV_TYPE_LABELREQUEST); + rt.length = htons(REQID_TLV_LEN); + rt.reqid = htonl(reqid); + + return (ibuf_add(buf, &rt, sizeof(rt))); +} + +int +gen_pw_status_tlv(struct ibuf *buf, uint32_t status) +{ + struct pw_status_tlv st; + + st.type = htons(TLV_TYPE_PW_STATUS); + st.length = htons(PW_STATUS_TLV_LEN); + st.value = htonl(status); + + return (ibuf_add(buf, &st, sizeof(st))); +} + +int +gen_fec_tlv(struct ibuf *buf, struct map *map) +{ + struct tlv ft; + uint16_t family, len, pw_type, ifmtu; + uint8_t pw_len = 0; + uint32_t group_id, pwid; + int err = 0; + + ft.type = htons(TLV_TYPE_FEC); + + switch (map->type) { + case MAP_TYPE_WILDCARD: + ft.length = htons(sizeof(uint8_t)); + err |= ibuf_add(buf, &ft, sizeof(ft)); + err |= ibuf_add(buf, &map->type, sizeof(map->type)); + break; + case MAP_TYPE_PREFIX: + len = PREFIX_SIZE(map->fec.prefix.prefixlen); + ft.length = htons(sizeof(map->type) + sizeof(family) + + sizeof(map->fec.prefix.prefixlen) + len); + err |= ibuf_add(buf, &ft, sizeof(ft)); + err |= ibuf_add(buf, &map->type, sizeof(map->type)); + switch (map->fec.prefix.af) { + case AF_INET: + family = htons(AF_IPV4); + break; + case AF_INET6: + family = htons(AF_IPV6); + break; + default: + fatalx("gen_fec_tlv: unknown af"); + break; + } + err |= ibuf_add(buf, &family, sizeof(family)); + err |= ibuf_add(buf, &map->fec.prefix.prefixlen, + sizeof(map->fec.prefix.prefixlen)); + if (len) + err |= ibuf_add(buf, &map->fec.prefix.prefix, len); + break; + case MAP_TYPE_PWID: + if (map->flags & F_MAP_PW_ID) + pw_len += PW_STATUS_TLV_LEN; + if (map->flags & F_MAP_PW_IFMTU) + pw_len += FEC_SUBTLV_IFMTU_SIZE; + + len = FEC_PWID_ELM_MIN_LEN + pw_len; + + ft.length = htons(len); + err |= ibuf_add(buf, &ft, sizeof(ft)); + + err |= ibuf_add(buf, &map->type, sizeof(uint8_t)); + pw_type = map->fec.pwid.type; + if (map->flags & F_MAP_PW_CWORD) + pw_type |= CONTROL_WORD_FLAG; + pw_type = htons(pw_type); + err |= ibuf_add(buf, &pw_type, sizeof(uint16_t)); + err |= ibuf_add(buf, &pw_len, sizeof(uint8_t)); + group_id = htonl(map->fec.pwid.group_id); + err |= ibuf_add(buf, &group_id, sizeof(uint32_t)); + if (map->flags & F_MAP_PW_ID) { + pwid = htonl(map->fec.pwid.pwid); + err |= ibuf_add(buf, &pwid, sizeof(uint32_t)); + } + if (map->flags & F_MAP_PW_IFMTU) { + struct subtlv stlv; + + stlv.type = SUBTLV_IFMTU; + stlv.length = FEC_SUBTLV_IFMTU_SIZE; + err |= ibuf_add(buf, &stlv, sizeof(uint16_t)); + + ifmtu = htons(map->fec.pwid.ifmtu); + err |= ibuf_add(buf, &ifmtu, sizeof(uint16_t)); + } + break; + default: + break; + } + + return (err); +} + +int +tlv_decode_fec_elm(struct nbr *nbr, struct ldp_msg *msg, char *buf, + uint16_t len, struct map *map) +{ + uint16_t off = 0; + uint8_t pw_len; + + map->type = *buf; + off += sizeof(uint8_t); + + switch (map->type) { + case MAP_TYPE_WILDCARD: + if (len == FEC_ELM_WCARD_LEN) + return (off); + else { + session_shutdown(nbr, S_BAD_TLV_VAL, msg->id, + msg->type); + return (-1); + } + break; + case MAP_TYPE_PREFIX: + if (len < FEC_ELM_PREFIX_MIN_LEN) { + session_shutdown(nbr, S_BAD_TLV_LEN, msg->id, + msg->type); + return (-1); + } + + /* Address Family */ + memcpy(&map->fec.prefix.af, buf + off, + sizeof(map->fec.prefix.af)); + off += sizeof(map->fec.prefix.af); + map->fec.prefix.af = ntohs(map->fec.prefix.af); + switch (map->fec.prefix.af) { + case AF_IPV4: + map->fec.prefix.af = AF_INET; + break; + case AF_IPV6: + map->fec.prefix.af = AF_INET6; + break; + default: + send_notification_nbr(nbr, S_UNSUP_ADDR, msg->id, + msg->type); + return (-1); + } + + /* Prefix Length */ + map->fec.prefix.prefixlen = buf[off]; + off += sizeof(uint8_t); + if (len < off + PREFIX_SIZE(map->fec.prefix.prefixlen)) { + session_shutdown(nbr, S_BAD_TLV_LEN, msg->id, + msg->type); + return (-1); + } + + /* Prefix */ + memset(&map->fec.prefix.prefix, 0, + sizeof(map->fec.prefix.prefix)); + memcpy(&map->fec.prefix.prefix, buf + off, + PREFIX_SIZE(map->fec.prefix.prefixlen)); + + /* Just in case... */ + ldp_applymask(map->fec.prefix.af, &map->fec.prefix.prefix, + &map->fec.prefix.prefix, map->fec.prefix.prefixlen); + + return (off + PREFIX_SIZE(map->fec.prefix.prefixlen)); + case MAP_TYPE_PWID: + if (len < FEC_PWID_ELM_MIN_LEN) { + session_shutdown(nbr, S_BAD_TLV_LEN, msg->id, + msg->type); + return (-1); + } + + /* PW type */ + memcpy(&map->fec.pwid.type, buf + off, sizeof(uint16_t)); + map->fec.pwid.type = ntohs(map->fec.pwid.type); + if (map->fec.pwid.type & CONTROL_WORD_FLAG) { + map->flags |= F_MAP_PW_CWORD; + map->fec.pwid.type &= ~CONTROL_WORD_FLAG; + } + off += sizeof(uint16_t); + + /* PW info Length */ + pw_len = buf[off]; + off += sizeof(uint8_t); + + if (len != FEC_PWID_ELM_MIN_LEN + pw_len) { + session_shutdown(nbr, S_BAD_TLV_LEN, msg->id, + msg->type); + return (-1); + } + + /* Group ID */ + memcpy(&map->fec.pwid.group_id, buf + off, sizeof(uint32_t)); + map->fec.pwid.group_id = ntohl(map->fec.pwid.group_id); + off += sizeof(uint32_t); + + /* PW ID */ + if (pw_len == 0) + return (off); + + if (pw_len < sizeof(uint32_t)) { + session_shutdown(nbr, S_BAD_TLV_LEN, msg->id, + msg->type); + return (-1); + } + + memcpy(&map->fec.pwid.pwid, buf + off, sizeof(uint32_t)); + map->fec.pwid.pwid = ntohl(map->fec.pwid.pwid); + map->flags |= F_MAP_PW_ID; + off += sizeof(uint32_t); + pw_len -= sizeof(uint32_t); + + /* Optional Interface Parameter Sub-TLVs */ + while (pw_len > 0) { + struct subtlv stlv; + + if (pw_len < sizeof(stlv)) { + session_shutdown(nbr, S_BAD_TLV_LEN, msg->id, + msg->type); + return (-1); + } + + memcpy(&stlv, buf + off, sizeof(stlv)); + if (stlv.length > pw_len) { + session_shutdown(nbr, S_BAD_TLV_LEN, msg->id, + msg->type); + return (-1); + } + + switch (stlv.type) { + case SUBTLV_IFMTU: + if (stlv.length != FEC_SUBTLV_IFMTU_SIZE) { + session_shutdown(nbr, S_BAD_TLV_LEN, + msg->id, msg->type); + return (-1); + } + memcpy(&map->fec.pwid.ifmtu, buf + off + + SUBTLV_HDR_SIZE, sizeof(uint16_t)); + map->fec.pwid.ifmtu = ntohs(map->fec.pwid.ifmtu); + map->flags |= F_MAP_PW_IFMTU; + break; + default: + /* ignore */ + break; + } + off += stlv.length; + pw_len -= stlv.length; + } + + return (off); + default: + send_notification_nbr(nbr, S_UNKNOWN_FEC, msg->id, msg->type); + break; + } + + return (-1); +} diff --git a/ldpd/lde.c b/ldpd/lde.c new file mode 100644 index 0000000000..904d0f8d9a --- /dev/null +++ b/ldpd/lde.c @@ -0,0 +1,1354 @@ +/* $OpenBSD$ */ + +/* + * Copyright (c) 2013, 2016 Renato Westphal + * Copyright (c) 2004, 2005 Claudio Jeker + * Copyright (c) 2004 Esben Norby + * Copyright (c) 2003, 2004 Henning Brauer + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include + +#include "ldp.h" +#include "ldpd.h" +#include "ldpe.h" +#include "log.h" +#include "lde.h" +#include "ldp_debug.h" + +#include +#include "memory.h" +#include "privs.h" +#include "sigevent.h" +#include "mpls.h" + +static void lde_shutdown(void); +static int lde_dispatch_imsg(struct thread *); +static int lde_dispatch_parent(struct thread *); +static __inline int lde_nbr_compare(struct lde_nbr *, + struct lde_nbr *); +static struct lde_nbr *lde_nbr_new(uint32_t, struct lde_nbr *); +static void lde_nbr_del(struct lde_nbr *); +static struct lde_nbr *lde_nbr_find(uint32_t); +static void lde_nbr_clear(void); +static void lde_nbr_addr_update(struct lde_nbr *, + struct lde_addr *, int); +static void lde_map_free(void *); +static int lde_address_add(struct lde_nbr *, struct lde_addr *); +static int lde_address_del(struct lde_nbr *, struct lde_addr *); +static void lde_address_list_free(struct lde_nbr *); + +RB_GENERATE(nbr_tree, lde_nbr, entry, lde_nbr_compare) + +struct ldpd_conf *ldeconf; +struct nbr_tree lde_nbrs = RB_INITIALIZER(&lde_nbrs); + +static struct imsgev *iev_ldpe; +static struct imsgev *iev_main; + +/* Master of threads. */ +struct thread_master *master; + +/* lde privileges */ +static zebra_capabilities_t _caps_p [] = +{ + /* none */ +}; + +static struct zebra_privs_t lde_privs = +{ +#if defined(QUAGGA_USER) && defined(QUAGGA_GROUP) + .user = QUAGGA_USER, + .group = QUAGGA_GROUP, +#endif +#if defined(VTY_GROUP) + .vty_group = VTY_GROUP, +#endif + .caps_p = _caps_p, + .cap_num_p = array_size(_caps_p), + .cap_num_i = 0 +}; + +/* SIGINT / SIGTERM handler. */ +static void +sigint(void) +{ + lde_shutdown(); +} + +static struct quagga_signal_t lde_signals[] = +{ + { + .signal = SIGINT, + .handler = &sigint, + }, + { + .signal = SIGTERM, + .handler = &sigint, + }, +}; + +/* label decision engine */ +void +lde(const char *user, const char *group) +{ + struct thread thread; + struct timeval now; + + ldeconf = config_new_empty(); + +#ifdef HAVE_SETPROCTITLE + setproctitle("label decision engine"); +#endif + ldpd_process = PROC_LDE_ENGINE; + + /* drop privileges */ + if (user) + lde_privs.user = user; + if (group) + lde_privs.group = group; + zprivs_init(&lde_privs); + +#ifdef HAVE_PLEDGE + if (pledge("stdio recvfd", NULL) == -1) + fatal("pledge"); +#endif + + master = thread_master_create(); + + /* setup signal handler */ + signal_init(master, array_size(lde_signals), lde_signals); + + /* setup pipe and event handler to the parent process */ + if ((iev_main = malloc(sizeof(struct imsgev))) == NULL) + fatal(NULL); + imsg_init(&iev_main->ibuf, 3); + iev_main->handler_read = lde_dispatch_parent; + iev_main->ev_read = thread_add_read(master, iev_main->handler_read, + iev_main, iev_main->ibuf.fd); + iev_main->handler_write = ldp_write_handler; + iev_main->ev_write = NULL; + + /* start the LIB garbage collector */ + lde_gc_start_timer(); + + gettimeofday(&now, NULL); + global.uptime = now.tv_sec; + + /* Fetch next active thread. */ + while (thread_fetch(master, &thread)) + thread_call(&thread); +} + +static void +lde_shutdown(void) +{ + /* close pipes */ + msgbuf_clear(&iev_ldpe->ibuf.w); + close(iev_ldpe->ibuf.fd); + msgbuf_clear(&iev_main->ibuf.w); + close(iev_main->ibuf.fd); + + lde_gc_stop_timer(); + lde_nbr_clear(); + fec_tree_clear(); + + config_clear(ldeconf); + + free(iev_ldpe); + free(iev_main); + + log_info("label decision engine exiting"); + exit(0); +} + +/* imesg */ +int +lde_imsg_compose_parent(int type, pid_t pid, void *data, uint16_t datalen) +{ + return (imsg_compose_event(iev_main, type, 0, pid, -1, data, datalen)); +} + +int +lde_imsg_compose_ldpe(int type, uint32_t peerid, pid_t pid, void *data, + uint16_t datalen) +{ + return (imsg_compose_event(iev_ldpe, type, peerid, pid, + -1, data, datalen)); +} + +/* ARGSUSED */ +static int +lde_dispatch_imsg(struct thread *thread) +{ + struct imsgev *iev = THREAD_ARG(thread); + struct imsgbuf *ibuf = &iev->ibuf; + struct imsg imsg; + struct lde_nbr *ln; + struct map map; + struct lde_addr lde_addr; + struct notify_msg nm; + ssize_t n; + int shut = 0; + + iev->ev_read = NULL; + + if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN) + fatal("imsg_read error"); + if (n == 0) /* connection closed */ + shut = 1; + + for (;;) { + if ((n = imsg_get(ibuf, &imsg)) == -1) + fatal("lde_dispatch_imsg: imsg_get error"); + if (n == 0) + break; + + switch (imsg.hdr.type) { + case IMSG_LABEL_MAPPING_FULL: + ln = lde_nbr_find(imsg.hdr.peerid); + if (ln == NULL) { + log_debug("%s: cannot find lde neighbor", + __func__); + break; + } + + fec_snap(ln); + break; + case IMSG_LABEL_MAPPING: + case IMSG_LABEL_REQUEST: + case IMSG_LABEL_RELEASE: + case IMSG_LABEL_WITHDRAW: + case IMSG_LABEL_ABORT: + if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(map)) + fatalx("lde_dispatch_imsg: wrong imsg len"); + memcpy(&map, imsg.data, sizeof(map)); + + ln = lde_nbr_find(imsg.hdr.peerid); + if (ln == NULL) { + log_debug("%s: cannot find lde neighbor", + __func__); + break; + } + + switch (imsg.hdr.type) { + case IMSG_LABEL_MAPPING: + lde_check_mapping(&map, ln); + break; + case IMSG_LABEL_REQUEST: + lde_check_request(&map, ln); + break; + case IMSG_LABEL_RELEASE: + if (map.type == MAP_TYPE_WILDCARD) + lde_check_release_wcard(&map, ln); + else + lde_check_release(&map, ln); + break; + case IMSG_LABEL_WITHDRAW: + if (map.type == MAP_TYPE_WILDCARD) + lde_check_withdraw_wcard(&map, ln); + else + lde_check_withdraw(&map, ln); + break; + case IMSG_LABEL_ABORT: + /* not necessary */ + break; + } + break; + case IMSG_ADDRESS_ADD: + if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(lde_addr)) + fatalx("lde_dispatch_imsg: wrong imsg len"); + memcpy(&lde_addr, imsg.data, sizeof(lde_addr)); + + ln = lde_nbr_find(imsg.hdr.peerid); + if (ln == NULL) { + log_debug("%s: cannot find lde neighbor", + __func__); + break; + } + if (lde_address_add(ln, &lde_addr) < 0) { + log_debug("%s: cannot add address %s, it " + "already exists", __func__, + log_addr(lde_addr.af, &lde_addr.addr)); + } + break; + case IMSG_ADDRESS_DEL: + if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(lde_addr)) + fatalx("lde_dispatch_imsg: wrong imsg len"); + memcpy(&lde_addr, imsg.data, sizeof(lde_addr)); + + ln = lde_nbr_find(imsg.hdr.peerid); + if (ln == NULL) { + log_debug("%s: cannot find lde neighbor", + __func__); + break; + } + if (lde_address_del(ln, &lde_addr) < 0) { + log_debug("%s: cannot delete address %s, it " + "does not exist", __func__, + log_addr(lde_addr.af, &lde_addr.addr)); + } + break; + case IMSG_NOTIFICATION: + if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(nm)) + fatalx("lde_dispatch_imsg: wrong imsg len"); + memcpy(&nm, imsg.data, sizeof(nm)); + + ln = lde_nbr_find(imsg.hdr.peerid); + if (ln == NULL) { + log_debug("%s: cannot find lde neighbor", + __func__); + break; + } + + switch (nm.status_code) { + case S_PW_STATUS: + l2vpn_recv_pw_status(ln, &nm); + break; + default: + break; + } + break; + case IMSG_NEIGHBOR_UP: + if (imsg.hdr.len - IMSG_HEADER_SIZE != + sizeof(struct lde_nbr)) + fatalx("lde_dispatch_imsg: wrong imsg len"); + + if (lde_nbr_find(imsg.hdr.peerid)) + fatalx("lde_dispatch_imsg: " + "neighbor already exists"); + lde_nbr_new(imsg.hdr.peerid, imsg.data); + break; + case IMSG_NEIGHBOR_DOWN: + lde_nbr_del(lde_nbr_find(imsg.hdr.peerid)); + break; + case IMSG_CTL_SHOW_LIB: + rt_dump(imsg.hdr.pid); + + lde_imsg_compose_ldpe(IMSG_CTL_END, 0, + imsg.hdr.pid, NULL, 0); + break; + case IMSG_CTL_SHOW_L2VPN_PW: + l2vpn_pw_ctl(imsg.hdr.pid); + + lde_imsg_compose_ldpe(IMSG_CTL_END, 0, + imsg.hdr.pid, NULL, 0); + break; + case IMSG_CTL_SHOW_L2VPN_BINDING: + l2vpn_binding_ctl(imsg.hdr.pid); + + lde_imsg_compose_ldpe(IMSG_CTL_END, 0, + imsg.hdr.pid, NULL, 0); + break; + default: + log_debug("%s: unexpected imsg %d", __func__, + imsg.hdr.type); + break; + } + imsg_free(&imsg); + } + if (!shut) + imsg_event_add(iev); + else { + /* this pipe is dead, so remove the event handlers and exit */ + THREAD_READ_OFF(iev->ev_read); + THREAD_WRITE_OFF(iev->ev_write); + lde_shutdown(); + } + + return (0); +} + +/* ARGSUSED */ +static int +lde_dispatch_parent(struct thread *thread) +{ + static struct ldpd_conf *nconf; + struct iface *niface; + struct tnbr *ntnbr; + struct nbr_params *nnbrp; + static struct l2vpn *nl2vpn; + struct l2vpn_if *nlif; + struct l2vpn_pw *npw; + struct imsg imsg; + struct kroute kr; + int fd = THREAD_FD(thread); + struct imsgev *iev = THREAD_ARG(thread); + struct imsgbuf *ibuf = &iev->ibuf; + ssize_t n; + int shut = 0; + struct fec fec; + + iev->ev_read = NULL; + + if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN) + fatal("imsg_read error"); + if (n == 0) /* connection closed */ + shut = 1; + + for (;;) { + if ((n = imsg_get(ibuf, &imsg)) == -1) + fatal("lde_dispatch_parent: imsg_get error"); + if (n == 0) + break; + + switch (imsg.hdr.type) { + case IMSG_NETWORK_ADD: + case IMSG_NETWORK_ADD_END: + case IMSG_NETWORK_DEL: + if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(kr)) { + log_warnx("%s: wrong imsg len", __func__); + break; + } + memcpy(&kr, imsg.data, sizeof(kr)); + + switch (kr.af) { + case AF_INET: + fec.type = FEC_TYPE_IPV4; + fec.u.ipv4.prefix = kr.prefix.v4; + fec.u.ipv4.prefixlen = kr.prefixlen; + break; + case AF_INET6: + fec.type = FEC_TYPE_IPV6; + fec.u.ipv6.prefix = kr.prefix.v6; + fec.u.ipv6.prefixlen = kr.prefixlen; + break; + default: + fatalx("lde_dispatch_parent: unknown af"); + } + + switch (imsg.hdr.type) { + case IMSG_NETWORK_ADD: + lde_kernel_insert(&fec, kr.af, &kr.nexthop, + kr.priority, kr.flags & F_CONNECTED, NULL); + break; + case IMSG_NETWORK_ADD_END: + lde_kernel_reevaluate(&fec); + break; + case IMSG_NETWORK_DEL: + lde_kernel_remove(&fec, kr.af, &kr.nexthop, + kr.priority); + break; + } + break; + case IMSG_SOCKET_IPC: + if (iev_ldpe) { + log_warnx("%s: received unexpected imsg fd " + "to ldpe", __func__); + break; + } + if ((fd = imsg.fd) == -1) { + log_warnx("%s: expected to receive imsg fd to " + "ldpe but didn't receive any", __func__); + break; + } + + if ((iev_ldpe = malloc(sizeof(struct imsgev))) == NULL) + fatal(NULL); + imsg_init(&iev_ldpe->ibuf, fd); + iev_ldpe->handler_read = lde_dispatch_imsg; + iev_ldpe->ev_read = thread_add_read(master, + iev_ldpe->handler_read, iev_ldpe, iev_ldpe->ibuf.fd); + iev_ldpe->handler_write = ldp_write_handler; + iev_ldpe->ev_write = NULL; + break; + case IMSG_RECONF_CONF: + if ((nconf = malloc(sizeof(struct ldpd_conf))) == + NULL) + fatal(NULL); + memcpy(nconf, imsg.data, sizeof(struct ldpd_conf)); + + LIST_INIT(&nconf->iface_list); + LIST_INIT(&nconf->tnbr_list); + LIST_INIT(&nconf->nbrp_list); + LIST_INIT(&nconf->l2vpn_list); + break; + case IMSG_RECONF_IFACE: + if ((niface = malloc(sizeof(struct iface))) == NULL) + fatal(NULL); + memcpy(niface, imsg.data, sizeof(struct iface)); + + LIST_INIT(&niface->addr_list); + LIST_INIT(&niface->ipv4.adj_list); + LIST_INIT(&niface->ipv6.adj_list); + niface->ipv4.iface = niface; + niface->ipv6.iface = niface; + + LIST_INSERT_HEAD(&nconf->iface_list, niface, entry); + break; + case IMSG_RECONF_TNBR: + if ((ntnbr = malloc(sizeof(struct tnbr))) == NULL) + fatal(NULL); + memcpy(ntnbr, imsg.data, sizeof(struct tnbr)); + + LIST_INSERT_HEAD(&nconf->tnbr_list, ntnbr, entry); + break; + case IMSG_RECONF_NBRP: + if ((nnbrp = malloc(sizeof(struct nbr_params))) == NULL) + fatal(NULL); + memcpy(nnbrp, imsg.data, sizeof(struct nbr_params)); + + LIST_INSERT_HEAD(&nconf->nbrp_list, nnbrp, entry); + break; + case IMSG_RECONF_L2VPN: + if ((nl2vpn = malloc(sizeof(struct l2vpn))) == NULL) + fatal(NULL); + memcpy(nl2vpn, imsg.data, sizeof(struct l2vpn)); + + LIST_INIT(&nl2vpn->if_list); + LIST_INIT(&nl2vpn->pw_list); + LIST_INIT(&nl2vpn->pw_inactive_list); + + LIST_INSERT_HEAD(&nconf->l2vpn_list, nl2vpn, entry); + break; + case IMSG_RECONF_L2VPN_IF: + if ((nlif = malloc(sizeof(struct l2vpn_if))) == NULL) + fatal(NULL); + memcpy(nlif, imsg.data, sizeof(struct l2vpn_if)); + + nlif->l2vpn = nl2vpn; + LIST_INSERT_HEAD(&nl2vpn->if_list, nlif, entry); + break; + case IMSG_RECONF_L2VPN_PW: + if ((npw = malloc(sizeof(struct l2vpn_pw))) == NULL) + fatal(NULL); + memcpy(npw, imsg.data, sizeof(struct l2vpn_pw)); + + npw->l2vpn = nl2vpn; + LIST_INSERT_HEAD(&nl2vpn->pw_list, npw, entry); + break; + case IMSG_RECONF_L2VPN_IPW: + if ((npw = malloc(sizeof(struct l2vpn_pw))) == NULL) + fatal(NULL); + memcpy(npw, imsg.data, sizeof(struct l2vpn_pw)); + + npw->l2vpn = nl2vpn; + LIST_INSERT_HEAD(&nl2vpn->pw_inactive_list, npw, entry); + break; + case IMSG_RECONF_END: + merge_config(ldeconf, nconf); + nconf = NULL; + break; + case IMSG_DEBUG_UPDATE: + if (imsg.hdr.len != IMSG_HEADER_SIZE + + sizeof(ldp_debug)) { + log_warnx("%s: wrong imsg len", __func__); + break; + } + memcpy(&ldp_debug, imsg.data, sizeof(ldp_debug)); + break; + default: + log_debug("%s: unexpected imsg %d", __func__, + imsg.hdr.type); + break; + } + imsg_free(&imsg); + } + if (!shut) + imsg_event_add(iev); + else { + /* this pipe is dead, so remove the event handlers and exit */ + THREAD_READ_OFF(iev->ev_read); + THREAD_WRITE_OFF(iev->ev_write); + lde_shutdown(); + } + + return (0); +} + +uint32_t +lde_assign_label(void) +{ + static uint32_t label = MPLS_LABEL_RESERVED_MAX; + + /* + * TODO: request label to zebra or define a range of labels for ldpd. + */ + + label++; + return (label); +} + +void +lde_send_change_klabel(struct fec_node *fn, struct fec_nh *fnh) +{ + struct kroute kr; + struct kpw kpw; + struct l2vpn_pw *pw; + + switch (fn->fec.type) { + case FEC_TYPE_IPV4: + memset(&kr, 0, sizeof(kr)); + kr.af = AF_INET; + kr.prefix.v4 = fn->fec.u.ipv4.prefix; + kr.prefixlen = fn->fec.u.ipv4.prefixlen; + kr.nexthop.v4 = fnh->nexthop.v4; + kr.local_label = fn->local_label; + kr.remote_label = fnh->remote_label; + kr.priority = fnh->priority; + + lde_imsg_compose_parent(IMSG_KLABEL_CHANGE, 0, &kr, + sizeof(kr)); + + if (fn->fec.u.ipv4.prefixlen == 32) + l2vpn_sync_pws(AF_INET, (union ldpd_addr *) + &fn->fec.u.ipv4.prefix); + break; + case FEC_TYPE_IPV6: + memset(&kr, 0, sizeof(kr)); + kr.af = AF_INET6; + kr.prefix.v6 = fn->fec.u.ipv6.prefix; + kr.prefixlen = fn->fec.u.ipv6.prefixlen; + kr.nexthop.v6 = fnh->nexthop.v6; + kr.local_label = fn->local_label; + kr.remote_label = fnh->remote_label; + kr.priority = fnh->priority; + + lde_imsg_compose_parent(IMSG_KLABEL_CHANGE, 0, &kr, + sizeof(kr)); + + if (fn->fec.u.ipv6.prefixlen == 128) + l2vpn_sync_pws(AF_INET6, (union ldpd_addr *) + &fn->fec.u.ipv6.prefix); + break; + case FEC_TYPE_PWID: + if (fn->local_label == NO_LABEL || + fnh->remote_label == NO_LABEL) + return; + + pw = (struct l2vpn_pw *) fn->data; + pw->flags |= F_PW_STATUS_UP; + + memset(&kpw, 0, sizeof(kpw)); + kpw.ifindex = pw->ifindex; + kpw.pw_type = fn->fec.u.pwid.type; + kpw.af = pw->af; + kpw.nexthop = pw->addr; + kpw.local_label = fn->local_label; + kpw.remote_label = fnh->remote_label; + kpw.flags = pw->flags; + + lde_imsg_compose_parent(IMSG_KPWLABEL_CHANGE, 0, &kpw, + sizeof(kpw)); + break; + } +} + +void +lde_send_delete_klabel(struct fec_node *fn, struct fec_nh *fnh) +{ + struct kroute kr; + struct kpw kpw; + struct l2vpn_pw *pw; + + switch (fn->fec.type) { + case FEC_TYPE_IPV4: + memset(&kr, 0, sizeof(kr)); + kr.af = AF_INET; + kr.prefix.v4 = fn->fec.u.ipv4.prefix; + kr.prefixlen = fn->fec.u.ipv4.prefixlen; + kr.nexthop.v4 = fnh->nexthop.v4; + kr.local_label = fn->local_label; + kr.remote_label = fnh->remote_label; + kr.priority = fnh->priority; + + lde_imsg_compose_parent(IMSG_KLABEL_DELETE, 0, &kr, + sizeof(kr)); + + if (fn->fec.u.ipv4.prefixlen == 32) + l2vpn_sync_pws(AF_INET, (union ldpd_addr *) + &fn->fec.u.ipv4.prefix); + break; + case FEC_TYPE_IPV6: + memset(&kr, 0, sizeof(kr)); + kr.af = AF_INET6; + kr.prefix.v6 = fn->fec.u.ipv6.prefix; + kr.prefixlen = fn->fec.u.ipv6.prefixlen; + kr.nexthop.v6 = fnh->nexthop.v6; + kr.local_label = fn->local_label; + kr.remote_label = fnh->remote_label; + kr.priority = fnh->priority; + + lde_imsg_compose_parent(IMSG_KLABEL_DELETE, 0, &kr, + sizeof(kr)); + + if (fn->fec.u.ipv6.prefixlen == 128) + l2vpn_sync_pws(AF_INET6, (union ldpd_addr *) + &fn->fec.u.ipv6.prefix); + break; + case FEC_TYPE_PWID: + pw = (struct l2vpn_pw *) fn->data; + if (!(pw->flags & F_PW_STATUS_UP)) + return; + pw->flags &= ~F_PW_STATUS_UP; + + memset(&kpw, 0, sizeof(kpw)); + kpw.ifindex = pw->ifindex; + kpw.pw_type = fn->fec.u.pwid.type; + kpw.af = pw->af; + kpw.nexthop = pw->addr; + kpw.local_label = fn->local_label; + kpw.remote_label = fnh->remote_label; + kpw.flags = pw->flags; + + lde_imsg_compose_parent(IMSG_KPWLABEL_DELETE, 0, &kpw, + sizeof(kpw)); + break; + } +} + +void +lde_fec2map(struct fec *fec, struct map *map) +{ + memset(map, 0, sizeof(*map)); + + switch (fec->type) { + case FEC_TYPE_IPV4: + map->type = MAP_TYPE_PREFIX; + map->fec.prefix.af = AF_INET; + map->fec.prefix.prefix.v4 = fec->u.ipv4.prefix; + map->fec.prefix.prefixlen = fec->u.ipv4.prefixlen; + break; + case FEC_TYPE_IPV6: + map->type = MAP_TYPE_PREFIX; + map->fec.prefix.af = AF_INET6; + map->fec.prefix.prefix.v6 = fec->u.ipv6.prefix; + map->fec.prefix.prefixlen = fec->u.ipv6.prefixlen; + break; + case FEC_TYPE_PWID: + map->type = MAP_TYPE_PWID; + map->fec.pwid.type = fec->u.pwid.type; + map->fec.pwid.group_id = 0; + map->flags |= F_MAP_PW_ID; + map->fec.pwid.pwid = fec->u.pwid.pwid; + break; + } +} + +void +lde_map2fec(struct map *map, struct in_addr lsr_id, struct fec *fec) +{ + memset(fec, 0, sizeof(*fec)); + + switch (map->type) { + case MAP_TYPE_PREFIX: + switch (map->fec.prefix.af) { + case AF_INET: + fec->type = FEC_TYPE_IPV4; + fec->u.ipv4.prefix = map->fec.prefix.prefix.v4; + fec->u.ipv4.prefixlen = map->fec.prefix.prefixlen; + break; + case AF_INET6: + fec->type = FEC_TYPE_IPV6; + fec->u.ipv6.prefix = map->fec.prefix.prefix.v6; + fec->u.ipv6.prefixlen = map->fec.prefix.prefixlen; + break; + default: + fatalx("lde_map2fec: unknown af"); + break; + } + break; + case MAP_TYPE_PWID: + fec->type = FEC_TYPE_PWID; + fec->u.pwid.type = map->fec.pwid.type; + fec->u.pwid.pwid = map->fec.pwid.pwid; + fec->u.pwid.lsr_id = lsr_id; + break; + } +} + +void +lde_send_labelmapping(struct lde_nbr *ln, struct fec_node *fn, int single) +{ + struct lde_req *lre; + struct lde_map *me; + struct map map; + struct l2vpn_pw *pw; + + /* + * This function skips SL.1 - 3 and SL.9 - 14 because the label + * allocation is done way earlier (because of the merging nature of + * ldpd). + */ + + lde_fec2map(&fn->fec, &map); + switch (fn->fec.type) { + case FEC_TYPE_IPV4: + if (!ln->v4_enabled) + return; + break; + case FEC_TYPE_IPV6: + if (!ln->v6_enabled) + return; + break; + case FEC_TYPE_PWID: + pw = (struct l2vpn_pw *) fn->data; + if (pw == NULL || pw->lsr_id.s_addr != ln->id.s_addr) + /* not the remote end of the pseudowire */ + return; + + map.flags |= F_MAP_PW_IFMTU; + map.fec.pwid.ifmtu = pw->l2vpn->mtu; + if (pw->flags & F_PW_CWORD) + map.flags |= F_MAP_PW_CWORD; + if (pw->flags & F_PW_STATUSTLV) { + map.flags |= F_MAP_PW_STATUS; + /* VPLS are always up */ + map.pw_status = PW_FORWARDING; + } + break; + } + map.label = fn->local_label; + + /* SL.6: is there a pending request for this mapping? */ + lre = (struct lde_req *)fec_find(&ln->recv_req, &fn->fec); + if (lre) { + /* set label request msg id in the mapping response. */ + map.requestid = lre->msg_id; + map.flags = F_MAP_REQ_ID; + + /* SL.7: delete record of pending request */ + lde_req_del(ln, lre, 0); + } + + /* SL.4: send label mapping */ + lde_imsg_compose_ldpe(IMSG_MAPPING_ADD, ln->peerid, 0, + &map, sizeof(map)); + if (single) + lde_imsg_compose_ldpe(IMSG_MAPPING_ADD_END, ln->peerid, 0, + NULL, 0); + + /* SL.5: record sent label mapping */ + me = (struct lde_map *)fec_find(&ln->sent_map, &fn->fec); + if (me == NULL) + me = lde_map_add(ln, fn, 1); + me->map = map; +} + +void +lde_send_labelwithdraw(struct lde_nbr *ln, struct fec_node *fn, uint32_t label, + struct status_tlv *st) +{ + struct lde_wdraw *lw; + struct map map; + struct fec *f; + struct l2vpn_pw *pw; + + if (fn) { + lde_fec2map(&fn->fec, &map); + switch (fn->fec.type) { + case FEC_TYPE_IPV4: + if (!ln->v4_enabled) + return; + break; + case FEC_TYPE_IPV6: + if (!ln->v6_enabled) + return; + break; + case FEC_TYPE_PWID: + pw = (struct l2vpn_pw *) fn->data; + if (pw == NULL || pw->lsr_id.s_addr != ln->id.s_addr) + /* not the remote end of the pseudowire */ + return; + + if (pw->flags & F_PW_CWORD) + map.flags |= F_MAP_PW_CWORD; + break; + } + map.label = fn->local_label; + } else { + memset(&map, 0, sizeof(map)); + map.type = MAP_TYPE_WILDCARD; + map.label = label; + } + + if (st) { + map.st.status_code = st->status_code; + map.st.msg_id = st->msg_id; + map.st.msg_type = st->msg_type; + map.flags |= F_MAP_STATUS; + } + + /* SWd.1: send label withdraw. */ + lde_imsg_compose_ldpe(IMSG_WITHDRAW_ADD, ln->peerid, 0, + &map, sizeof(map)); + lde_imsg_compose_ldpe(IMSG_WITHDRAW_ADD_END, ln->peerid, 0, NULL, 0); + + /* SWd.2: record label withdraw. */ + if (fn) { + lw = (struct lde_wdraw *)fec_find(&ln->sent_wdraw, &fn->fec); + if (lw == NULL) + lw = lde_wdraw_add(ln, fn); + lw->label = map.label; + } else { + RB_FOREACH(f, fec_tree, &ft) { + fn = (struct fec_node *)f; + + lw = (struct lde_wdraw *)fec_find(&ln->sent_wdraw, + &fn->fec); + if (lw == NULL) + lw = lde_wdraw_add(ln, fn); + lw->label = map.label; + } + } +} + +void +lde_send_labelwithdraw_all(struct fec_node *fn, uint32_t label) +{ + struct lde_nbr *ln; + + RB_FOREACH(ln, nbr_tree, &lde_nbrs) + lde_send_labelwithdraw(ln, fn, label, NULL); +} + +void +lde_send_labelrelease(struct lde_nbr *ln, struct fec_node *fn, uint32_t label) +{ + struct map map; + struct l2vpn_pw *pw; + + if (fn) { + lde_fec2map(&fn->fec, &map); + switch (fn->fec.type) { + case FEC_TYPE_IPV4: + if (!ln->v4_enabled) + return; + break; + case FEC_TYPE_IPV6: + if (!ln->v6_enabled) + return; + break; + case FEC_TYPE_PWID: + pw = (struct l2vpn_pw *) fn->data; + if (pw == NULL || pw->lsr_id.s_addr != ln->id.s_addr) + /* not the remote end of the pseudowire */ + return; + + if (pw->flags & F_PW_CWORD) + map.flags |= F_MAP_PW_CWORD; + break; + } + } else { + memset(&map, 0, sizeof(map)); + map.type = MAP_TYPE_WILDCARD; + } + map.label = label; + + lde_imsg_compose_ldpe(IMSG_RELEASE_ADD, ln->peerid, 0, + &map, sizeof(map)); + lde_imsg_compose_ldpe(IMSG_RELEASE_ADD_END, ln->peerid, 0, NULL, 0); +} + +void +lde_send_notification(uint32_t peerid, uint32_t status_code, uint32_t msg_id, + uint16_t msg_type) +{ + struct notify_msg nm; + + memset(&nm, 0, sizeof(nm)); + nm.status_code = status_code; + /* 'msg_id' and 'msg_type' should be in network byte order */ + nm.msg_id = msg_id; + nm.msg_type = msg_type; + + lde_imsg_compose_ldpe(IMSG_NOTIFICATION_SEND, peerid, 0, + &nm, sizeof(nm)); +} + +static __inline int +lde_nbr_compare(struct lde_nbr *a, struct lde_nbr *b) +{ + return (a->peerid - b->peerid); +} + +static struct lde_nbr * +lde_nbr_new(uint32_t peerid, struct lde_nbr *new) +{ + struct lde_nbr *ln; + + if ((ln = calloc(1, sizeof(*ln))) == NULL) + fatal(__func__); + + ln->id = new->id; + ln->v4_enabled = new->v4_enabled; + ln->v6_enabled = new->v6_enabled; + ln->peerid = peerid; + fec_init(&ln->recv_map); + fec_init(&ln->sent_map); + fec_init(&ln->recv_req); + fec_init(&ln->sent_req); + fec_init(&ln->sent_wdraw); + + TAILQ_INIT(&ln->addr_list); + + if (RB_INSERT(nbr_tree, &lde_nbrs, ln) != NULL) + fatalx("lde_nbr_new: RB_INSERT failed"); + + return (ln); +} + +static void +lde_nbr_del(struct lde_nbr *ln) +{ + struct fec *f; + struct fec_node *fn; + struct fec_nh *fnh; + struct l2vpn_pw *pw; + + if (ln == NULL) + return; + + /* uninstall received mappings */ + RB_FOREACH(f, fec_tree, &ft) { + fn = (struct fec_node *)f; + + LIST_FOREACH(fnh, &fn->nexthops, entry) { + switch (f->type) { + case FEC_TYPE_IPV4: + case FEC_TYPE_IPV6: + if (!lde_address_find(ln, fnh->af, + &fnh->nexthop)) + continue; + break; + case FEC_TYPE_PWID: + if (f->u.pwid.lsr_id.s_addr != ln->id.s_addr) + continue; + pw = (struct l2vpn_pw *) fn->data; + if (pw) + l2vpn_pw_reset(pw); + break; + default: + break; + } + + lde_send_delete_klabel(fn, fnh); + fnh->remote_label = NO_LABEL; + } + } + + lde_address_list_free(ln); + + fec_clear(&ln->recv_map, lde_map_free); + fec_clear(&ln->sent_map, lde_map_free); + fec_clear(&ln->recv_req, free); + fec_clear(&ln->sent_req, free); + fec_clear(&ln->sent_wdraw, free); + + RB_REMOVE(nbr_tree, &lde_nbrs, ln); + + free(ln); +} + +static struct lde_nbr * +lde_nbr_find(uint32_t peerid) +{ + struct lde_nbr ln; + + ln.peerid = peerid; + + return (RB_FIND(nbr_tree, &lde_nbrs, &ln)); +} + +struct lde_nbr * +lde_nbr_find_by_lsrid(struct in_addr addr) +{ + struct lde_nbr *ln; + + RB_FOREACH(ln, nbr_tree, &lde_nbrs) + if (ln->id.s_addr == addr.s_addr) + return (ln); + + return (NULL); +} + +struct lde_nbr * +lde_nbr_find_by_addr(int af, union ldpd_addr *addr) +{ + struct lde_nbr *ln; + + RB_FOREACH(ln, nbr_tree, &lde_nbrs) + if (lde_address_find(ln, af, addr) != NULL) + return (ln); + + return (NULL); +} + +static void +lde_nbr_clear(void) +{ + struct lde_nbr *ln; + + while ((ln = RB_ROOT(&lde_nbrs)) != NULL) + lde_nbr_del(ln); +} + +static void +lde_nbr_addr_update(struct lde_nbr *ln, struct lde_addr *lde_addr, int removed) +{ + struct fec *fec; + struct fec_node *fn; + struct fec_nh *fnh; + struct lde_map *me; + + RB_FOREACH(fec, fec_tree, &ln->recv_map) { + fn = (struct fec_node *)fec_find(&ft, fec); + switch (fec->type) { + case FEC_TYPE_IPV4: + if (lde_addr->af != AF_INET) + continue; + break; + case FEC_TYPE_IPV6: + if (lde_addr->af != AF_INET6) + continue; + break; + default: + continue; + } + + LIST_FOREACH(fnh, &fn->nexthops, entry) { + if (ldp_addrcmp(fnh->af, &fnh->nexthop, + &lde_addr->addr)) + continue; + + if (removed) { + lde_send_delete_klabel(fn, fnh); + fnh->remote_label = NO_LABEL; + } else { + me = (struct lde_map *)fec; + fnh->remote_label = me->map.label; + lde_send_change_klabel(fn, fnh); + } + break; + } + } +} + +struct lde_map * +lde_map_add(struct lde_nbr *ln, struct fec_node *fn, int sent) +{ + struct lde_map *me; + + me = calloc(1, sizeof(*me)); + if (me == NULL) + fatal(__func__); + + me->fec = fn->fec; + me->nexthop = ln; + + if (sent) { + LIST_INSERT_HEAD(&fn->upstream, me, entry); + if (fec_insert(&ln->sent_map, &me->fec)) + log_warnx("failed to add %s to sent map", + log_fec(&me->fec)); + /* XXX on failure more cleanup is needed */ + } else { + LIST_INSERT_HEAD(&fn->downstream, me, entry); + if (fec_insert(&ln->recv_map, &me->fec)) + log_warnx("failed to add %s to recv map", + log_fec(&me->fec)); + } + + return (me); +} + +void +lde_map_del(struct lde_nbr *ln, struct lde_map *me, int sent) +{ + if (sent) + fec_remove(&ln->sent_map, &me->fec); + else + fec_remove(&ln->recv_map, &me->fec); + + lde_map_free(me); +} + +static void +lde_map_free(void *ptr) +{ + struct lde_map *map = ptr; + + LIST_REMOVE(map, entry); + free(map); +} + +struct lde_req * +lde_req_add(struct lde_nbr *ln, struct fec *fec, int sent) +{ + struct fec_tree *t; + struct lde_req *lre; + + t = sent ? &ln->sent_req : &ln->recv_req; + + lre = calloc(1, sizeof(*lre)); + if (lre != NULL) { + lre->fec = *fec; + + if (fec_insert(t, &lre->fec)) { + log_warnx("failed to add %s to %s req", + log_fec(&lre->fec), sent ? "sent" : "recv"); + free(lre); + return (NULL); + } + } + + return (lre); +} + +void +lde_req_del(struct lde_nbr *ln, struct lde_req *lre, int sent) +{ + if (sent) + fec_remove(&ln->sent_req, &lre->fec); + else + fec_remove(&ln->recv_req, &lre->fec); + + free(lre); +} + +struct lde_wdraw * +lde_wdraw_add(struct lde_nbr *ln, struct fec_node *fn) +{ + struct lde_wdraw *lw; + + lw = calloc(1, sizeof(*lw)); + if (lw == NULL) + fatal(__func__); + + lw->fec = fn->fec; + + if (fec_insert(&ln->sent_wdraw, &lw->fec)) + log_warnx("failed to add %s to sent wdraw", + log_fec(&lw->fec)); + + return (lw); +} + +void +lde_wdraw_del(struct lde_nbr *ln, struct lde_wdraw *lw) +{ + fec_remove(&ln->sent_wdraw, &lw->fec); + free(lw); +} + +void +lde_change_egress_label(int af, int was_implicit) +{ + struct lde_nbr *ln; + struct fec *f; + struct fec_node *fn; + + RB_FOREACH(ln, nbr_tree, &lde_nbrs) { + /* explicit withdraw */ + if (was_implicit) + lde_send_labelwithdraw(ln, NULL, MPLS_LABEL_IMPLNULL, + NULL); + else { + if (ln->v4_enabled) + lde_send_labelwithdraw(ln, NULL, + MPLS_LABEL_IPV4NULL, NULL); + if (ln->v6_enabled) + lde_send_labelwithdraw(ln, NULL, + MPLS_LABEL_IPV6NULL, NULL); + } + + /* advertise new label of connected prefixes */ + RB_FOREACH(f, fec_tree, &ft) { + fn = (struct fec_node *)f; + if (fn->local_label > MPLS_LABEL_RESERVED_MAX) + continue; + + switch (af) { + case AF_INET: + if (fn->fec.type != FEC_TYPE_IPV4) + continue; + break; + case AF_INET6: + if (fn->fec.type != FEC_TYPE_IPV6) + continue; + break; + default: + fatalx("lde_change_egress_label: unknown af"); + } + + fn->local_label = egress_label(fn->fec.type); + lde_send_labelmapping(ln, fn, 0); + } + + lde_imsg_compose_ldpe(IMSG_MAPPING_ADD_END, ln->peerid, 0, + NULL, 0); + } +} + +static int +lde_address_add(struct lde_nbr *ln, struct lde_addr *lde_addr) +{ + struct lde_addr *new; + + if (lde_address_find(ln, lde_addr->af, &lde_addr->addr) != NULL) + return (-1); + + if ((new = calloc(1, sizeof(*new))) == NULL) + fatal(__func__); + + new->af = lde_addr->af; + new->addr = lde_addr->addr; + TAILQ_INSERT_TAIL(&ln->addr_list, new, entry); + + /* reevaluate the previously received mappings from this neighbor */ + lde_nbr_addr_update(ln, lde_addr, 0); + + return (0); +} + +static int +lde_address_del(struct lde_nbr *ln, struct lde_addr *lde_addr) +{ + lde_addr = lde_address_find(ln, lde_addr->af, &lde_addr->addr); + if (lde_addr == NULL) + return (-1); + + /* reevaluate the previously received mappings from this neighbor */ + lde_nbr_addr_update(ln, lde_addr, 1); + + TAILQ_REMOVE(&ln->addr_list, lde_addr, entry); + free(lde_addr); + + return (0); +} + +struct lde_addr * +lde_address_find(struct lde_nbr *ln, int af, union ldpd_addr *addr) +{ + struct lde_addr *lde_addr; + + TAILQ_FOREACH(lde_addr, &ln->addr_list, entry) + if (lde_addr->af == af && + ldp_addrcmp(af, &lde_addr->addr, addr) == 0) + return (lde_addr); + + return (NULL); +} + +static void +lde_address_list_free(struct lde_nbr *ln) +{ + struct lde_addr *lde_addr; + + while ((lde_addr = TAILQ_FIRST(&ln->addr_list)) != NULL) { + TAILQ_REMOVE(&ln->addr_list, lde_addr, entry); + free(lde_addr); + } +} diff --git a/ldpd/lde.h b/ldpd/lde.h new file mode 100644 index 0000000000..cf8f2129af --- /dev/null +++ b/ldpd/lde.h @@ -0,0 +1,207 @@ +/* $OpenBSD$ */ + +/* + * Copyright (c) 2013, 2016 Renato Westphal + * Copyright (c) 2009 Michele Marchetto + * Copyright (c) 2004, 2005 Esben Norby + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _LDE_H_ +#define _LDE_H_ + +#include "openbsd-queue.h" +#include "openbsd-tree.h" + +enum fec_type { + FEC_TYPE_IPV4, + FEC_TYPE_IPV6, + FEC_TYPE_PWID +}; + +struct fec { + RB_ENTRY(fec) entry; + enum fec_type type; + union { + struct { + struct in_addr prefix; + uint8_t prefixlen; + } ipv4; + struct { + struct in6_addr prefix; + uint8_t prefixlen; + } ipv6; + struct { + uint16_t type; + uint32_t pwid; + struct in_addr lsr_id; + } pwid; + } u; +}; +RB_HEAD(fec_tree, fec); +RB_PROTOTYPE(fec_tree, fec, entry, fec_compare) + +/* request entries */ +struct lde_req { + struct fec fec; + uint32_t msg_id; +}; + +/* mapping entries */ +struct lde_map { + struct fec fec; + LIST_ENTRY(lde_map) entry; + struct lde_nbr *nexthop; + struct map map; +}; + +/* withdraw entries */ +struct lde_wdraw { + struct fec fec; + uint32_t label; +}; + +/* Addresses belonging to neighbor */ +struct lde_addr { + TAILQ_ENTRY(lde_addr) entry; + int af; + union ldpd_addr addr; +}; + +/* just the info LDE needs */ +struct lde_nbr { + RB_ENTRY(lde_nbr) entry; + uint32_t peerid; + struct in_addr id; + int v4_enabled; /* announce/process v4 msgs */ + int v6_enabled; /* announce/process v6 msgs */ + struct fec_tree recv_req; + struct fec_tree sent_req; + struct fec_tree recv_map; + struct fec_tree sent_map; + struct fec_tree sent_wdraw; + TAILQ_HEAD(, lde_addr) addr_list; +}; +RB_HEAD(nbr_tree, lde_nbr); +RB_PROTOTYPE(nbr_tree, lde_nbr, entry, lde_nbr_compare) + +struct fec_nh { + LIST_ENTRY(fec_nh) entry; + int af; + union ldpd_addr nexthop; + uint32_t remote_label; + uint8_t priority; + uint8_t flags; +}; +#define F_FEC_NH_NEW 0x01 + +struct fec_node { + struct fec fec; + + LIST_HEAD(, fec_nh) nexthops; /* fib nexthops */ + LIST_HEAD(, lde_map) downstream; /* recv mappings */ + LIST_HEAD(, lde_map) upstream; /* sent mappings */ + + uint32_t local_label; + void *data; /* fec specific data */ +}; + +#define LDE_GC_INTERVAL 300 + +extern struct ldpd_conf *ldeconf; +extern struct fec_tree ft; +extern struct nbr_tree lde_nbrs; +extern struct thread *gc_timer; + +/* lde.c */ +void lde(const char *, const char *); +int lde_imsg_compose_parent(int, pid_t, void *, uint16_t); +int lde_imsg_compose_ldpe(int, uint32_t, pid_t, void *, uint16_t); +uint32_t lde_assign_label(void); +void lde_send_change_klabel(struct fec_node *, struct fec_nh *); +void lde_send_delete_klabel(struct fec_node *, struct fec_nh *); +void lde_fec2map(struct fec *, struct map *); +void lde_map2fec(struct map *, struct in_addr, struct fec *); +void lde_send_labelmapping(struct lde_nbr *, struct fec_node *, + int); +void lde_send_labelwithdraw(struct lde_nbr *, struct fec_node *, + uint32_t, struct status_tlv *); +void lde_send_labelwithdraw_all(struct fec_node *, uint32_t); +void lde_send_labelrelease(struct lde_nbr *, struct fec_node *, + uint32_t); +void lde_send_notification(uint32_t, uint32_t, uint32_t, uint16_t); +struct lde_nbr *lde_nbr_find_by_lsrid(struct in_addr); +struct lde_nbr *lde_nbr_find_by_addr(int, union ldpd_addr *); +struct lde_map *lde_map_add(struct lde_nbr *, struct fec_node *, int); +void lde_map_del(struct lde_nbr *, struct lde_map *, int); +struct lde_req *lde_req_add(struct lde_nbr *, struct fec *, int); +void lde_req_del(struct lde_nbr *, struct lde_req *, int); +struct lde_wdraw *lde_wdraw_add(struct lde_nbr *, struct fec_node *); +void lde_wdraw_del(struct lde_nbr *, struct lde_wdraw *); +void lde_change_egress_label(int, int); +struct lde_addr *lde_address_find(struct lde_nbr *, int, + union ldpd_addr *); + +/* lde_lib.c */ +void fec_init(struct fec_tree *); +struct fec *fec_find(struct fec_tree *, struct fec *); +int fec_insert(struct fec_tree *, struct fec *); +int fec_remove(struct fec_tree *, struct fec *); +void fec_clear(struct fec_tree *, void (*)(void *)); +void rt_dump(pid_t); +void fec_snap(struct lde_nbr *); +void fec_tree_clear(void); +struct fec_nh *fec_nh_find(struct fec_node *, int, union ldpd_addr *, + uint8_t); +uint32_t egress_label(enum fec_type); +void lde_kernel_insert(struct fec *, int, union ldpd_addr *, + uint8_t, int, void *); +void lde_kernel_remove(struct fec *, int, union ldpd_addr *, + uint8_t); +void lde_kernel_reevaluate(struct fec *); +void lde_check_mapping(struct map *, struct lde_nbr *); +void lde_check_request(struct map *, struct lde_nbr *); +void lde_check_release(struct map *, struct lde_nbr *); +void lde_check_release_wcard(struct map *, struct lde_nbr *); +void lde_check_withdraw(struct map *, struct lde_nbr *); +void lde_check_withdraw_wcard(struct map *, struct lde_nbr *); +int lde_gc_timer(struct thread *); +void lde_gc_start_timer(void); +void lde_gc_stop_timer(void); + +/* l2vpn.c */ +struct l2vpn *l2vpn_new(const char *); +struct l2vpn *l2vpn_find(struct ldpd_conf *, const char *); +void l2vpn_del(struct l2vpn *); +void l2vpn_init(struct l2vpn *); +void l2vpn_exit(struct l2vpn *); +struct l2vpn_if *l2vpn_if_new(struct l2vpn *, struct kif *); +struct l2vpn_if *l2vpn_if_find(struct l2vpn *, unsigned int); +struct l2vpn_if *l2vpn_if_find_name(struct l2vpn *, const char *); +struct l2vpn_pw *l2vpn_pw_new(struct l2vpn *, struct kif *); +struct l2vpn_pw *l2vpn_pw_find(struct l2vpn *, unsigned int); +struct l2vpn_pw *l2vpn_pw_find_name(struct l2vpn *, const char *); +void l2vpn_pw_init(struct l2vpn_pw *); +void l2vpn_pw_exit(struct l2vpn_pw *); +void l2vpn_pw_reset(struct l2vpn_pw *); +int l2vpn_pw_ok(struct l2vpn_pw *, struct fec_nh *); +int l2vpn_pw_negotiate(struct lde_nbr *, struct fec_node *, + struct map *); +void l2vpn_send_pw_status(uint32_t, uint32_t, struct fec *); +void l2vpn_recv_pw_status(struct lde_nbr *, struct notify_msg *); +void l2vpn_sync_pws(int, union ldpd_addr *); +void l2vpn_pw_ctl(pid_t); +void l2vpn_binding_ctl(pid_t); + +#endif /* _LDE_H_ */ diff --git a/ldpd/lde_lib.c b/ldpd/lde_lib.c new file mode 100644 index 0000000000..43e5f92f2f --- /dev/null +++ b/ldpd/lde_lib.c @@ -0,0 +1,804 @@ +/* $OpenBSD$ */ + +/* + * Copyright (c) 2013, 2016 Renato Westphal + * Copyright (c) 2009 Michele Marchetto + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include + +#include "ldpd.h" +#include "lde.h" +#include "log.h" + +#include "mpls.h" + +static __inline int fec_compare(struct fec *, struct fec *); +static int lde_nbr_is_nexthop(struct fec_node *, + struct lde_nbr *); +static void fec_free(void *); +static struct fec_node *fec_add(struct fec *fec); +static struct fec_nh *fec_nh_add(struct fec_node *, int, union ldpd_addr *, + uint8_t priority); +static void fec_nh_del(struct fec_nh *); + +RB_GENERATE(fec_tree, fec, entry, fec_compare) + +struct fec_tree ft = RB_INITIALIZER(&ft); +struct thread *gc_timer; + +/* FEC tree functions */ +void +fec_init(struct fec_tree *fh) +{ + RB_INIT(fh); +} + +static __inline int +fec_compare(struct fec *a, struct fec *b) +{ + if (a->type < b->type) + return (-1); + if (a->type > b->type) + return (1); + + switch (a->type) { + case FEC_TYPE_IPV4: + if (ntohl(a->u.ipv4.prefix.s_addr) < + ntohl(b->u.ipv4.prefix.s_addr)) + return (-1); + if (ntohl(a->u.ipv4.prefix.s_addr) > + ntohl(b->u.ipv4.prefix.s_addr)) + return (1); + if (a->u.ipv4.prefixlen < b->u.ipv4.prefixlen) + return (-1); + if (a->u.ipv4.prefixlen > b->u.ipv4.prefixlen) + return (1); + return (0); + case FEC_TYPE_IPV6: + if (memcmp(&a->u.ipv6.prefix, &b->u.ipv6.prefix, + sizeof(struct in6_addr)) < 0) + return (-1); + if (memcmp(&a->u.ipv6.prefix, &b->u.ipv6.prefix, + sizeof(struct in6_addr)) > 0) + return (1); + if (a->u.ipv6.prefixlen < b->u.ipv6.prefixlen) + return (-1); + if (a->u.ipv6.prefixlen > b->u.ipv6.prefixlen) + return (1); + return (0); + case FEC_TYPE_PWID: + if (a->u.pwid.type < b->u.pwid.type) + return (-1); + if (a->u.pwid.type > b->u.pwid.type) + return (1); + if (a->u.pwid.pwid < b->u.pwid.pwid) + return (-1); + if (a->u.pwid.pwid > b->u.pwid.pwid) + return (1); + if (ntohl(a->u.pwid.lsr_id.s_addr) < + ntohl(b->u.pwid.lsr_id.s_addr)) + return (-1); + if (ntohl(a->u.pwid.lsr_id.s_addr) > + ntohl(b->u.pwid.lsr_id.s_addr)) + return (1); + return (0); + } + + return (-1); +} + +struct fec * +fec_find(struct fec_tree *fh, struct fec *f) +{ + return (RB_FIND(fec_tree, fh, f)); +} + +int +fec_insert(struct fec_tree *fh, struct fec *f) +{ + if (RB_INSERT(fec_tree, fh, f) != NULL) + return (-1); + return (0); +} + +int +fec_remove(struct fec_tree *fh, struct fec *f) +{ + if (RB_REMOVE(fec_tree, fh, f) == NULL) { + log_warnx("%s failed for %s", __func__, log_fec(f)); + return (-1); + } + return (0); +} + +void +fec_clear(struct fec_tree *fh, void (*free_cb)(void *)) +{ + struct fec *f; + + while ((f = RB_ROOT(fh)) != NULL) { + fec_remove(fh, f); + free_cb(f); + } +} + +/* routing table functions */ +static int +lde_nbr_is_nexthop(struct fec_node *fn, struct lde_nbr *ln) +{ + struct fec_nh *fnh; + + LIST_FOREACH(fnh, &fn->nexthops, entry) + if (lde_address_find(ln, fnh->af, &fnh->nexthop)) + return (1); + + return (0); +} + +void +rt_dump(pid_t pid) +{ + struct fec *f; + struct fec_node *fn; + struct lde_map *me; + static struct ctl_rt rtctl; + + RB_FOREACH(f, fec_tree, &ft) { + fn = (struct fec_node *)f; + if (fn->local_label == NO_LABEL && + LIST_EMPTY(&fn->downstream)) + continue; + + rtctl.first = 1; + switch (fn->fec.type) { + case FEC_TYPE_IPV4: + rtctl.af = AF_INET; + rtctl.prefix.v4 = fn->fec.u.ipv4.prefix; + rtctl.prefixlen = fn->fec.u.ipv4.prefixlen; + break; + case FEC_TYPE_IPV6: + rtctl.af = AF_INET6; + rtctl.prefix.v6 = fn->fec.u.ipv6.prefix; + rtctl.prefixlen = fn->fec.u.ipv6.prefixlen; + break; + default: + continue; + } + + rtctl.local_label = fn->local_label; + LIST_FOREACH(me, &fn->downstream, entry) { + rtctl.in_use = lde_nbr_is_nexthop(fn, me->nexthop); + rtctl.nexthop = me->nexthop->id; + rtctl.remote_label = me->map.label; + + lde_imsg_compose_ldpe(IMSG_CTL_SHOW_LIB, 0, pid, + &rtctl, sizeof(rtctl)); + rtctl.first = 0; + } + if (LIST_EMPTY(&fn->downstream)) { + rtctl.in_use = 0; + rtctl.nexthop.s_addr = INADDR_ANY; + rtctl.remote_label = NO_LABEL; + + lde_imsg_compose_ldpe(IMSG_CTL_SHOW_LIB, 0, pid, + &rtctl, sizeof(rtctl)); + } + } +} + +void +fec_snap(struct lde_nbr *ln) +{ + struct fec *f; + struct fec_node *fn; + + RB_FOREACH(f, fec_tree, &ft) { + fn = (struct fec_node *)f; + if (fn->local_label == NO_LABEL) + continue; + + lde_send_labelmapping(ln, fn, 0); + } + + lde_imsg_compose_ldpe(IMSG_MAPPING_ADD_END, ln->peerid, 0, NULL, 0); +} + +static void +fec_free(void *arg) +{ + struct fec_node *fn = arg; + struct fec_nh *fnh; + + while ((fnh = LIST_FIRST(&fn->nexthops))) + fec_nh_del(fnh); + if (!LIST_EMPTY(&fn->downstream)) + log_warnx("%s: fec %s downstream list not empty", __func__, + log_fec(&fn->fec)); + if (!LIST_EMPTY(&fn->upstream)) + log_warnx("%s: fec %s upstream list not empty", __func__, + log_fec(&fn->fec)); + + free(fn); +} + +void +fec_tree_clear(void) +{ + fec_clear(&ft, fec_free); +} + +static struct fec_node * +fec_add(struct fec *fec) +{ + struct fec_node *fn; + + fn = calloc(1, sizeof(*fn)); + if (fn == NULL) + fatal(__func__); + + fn->fec = *fec; + fn->local_label = NO_LABEL; + LIST_INIT(&fn->upstream); + LIST_INIT(&fn->downstream); + LIST_INIT(&fn->nexthops); + + if (fec_insert(&ft, &fn->fec)) + log_warnx("failed to add %s to ft tree", + log_fec(&fn->fec)); + + return (fn); +} + +struct fec_nh * +fec_nh_find(struct fec_node *fn, int af, union ldpd_addr *nexthop, + uint8_t priority) +{ + struct fec_nh *fnh; + + LIST_FOREACH(fnh, &fn->nexthops, entry) + if (fnh->af == af && + ldp_addrcmp(af, &fnh->nexthop, nexthop) == 0 && + fnh->priority == priority) + return (fnh); + + return (NULL); +} + +static struct fec_nh * +fec_nh_add(struct fec_node *fn, int af, union ldpd_addr *nexthop, + uint8_t priority) +{ + struct fec_nh *fnh; + + fnh = calloc(1, sizeof(*fnh)); + if (fnh == NULL) + fatal(__func__); + + fnh->af = af; + fnh->nexthop = *nexthop; + fnh->remote_label = NO_LABEL; + fnh->priority = priority; + LIST_INSERT_HEAD(&fn->nexthops, fnh, entry); + + return (fnh); +} + +static void +fec_nh_del(struct fec_nh *fnh) +{ + LIST_REMOVE(fnh, entry); + free(fnh); +} + +uint32_t +egress_label(enum fec_type fec_type) +{ + switch (fec_type) { + case FEC_TYPE_IPV4: + if (ldeconf->ipv4.flags & F_LDPD_AF_EXPNULL) + return (MPLS_LABEL_IPV4NULL); + break; + case FEC_TYPE_IPV6: + if (ldeconf->ipv6.flags & F_LDPD_AF_EXPNULL) + return (MPLS_LABEL_IPV6NULL); + break; + default: + fatalx("egress_label: unexpected fec type"); + } + + return (MPLS_LABEL_IMPLNULL); +} + +void +lde_kernel_insert(struct fec *fec, int af, union ldpd_addr *nexthop, + uint8_t priority, int connected, void *data) +{ + struct fec_node *fn; + struct fec_nh *fnh; + struct lde_map *me; + struct lde_nbr *ln; + + fn = (struct fec_node *)fec_find(&ft, fec); + if (fn == NULL) + fn = fec_add(fec); + fnh = fec_nh_find(fn, af, nexthop, priority); + if (fnh != NULL) { + lde_send_change_klabel(fn, fnh); + fnh->flags |= F_FEC_NH_NEW; + return; + } + + if (fn->fec.type == FEC_TYPE_PWID) + fn->data = data; + + if (fn->local_label == NO_LABEL) { + if (connected) + fn->local_label = egress_label(fn->fec.type); + else + fn->local_label = lde_assign_label(); + + /* FEC.1: perform lsr label distribution procedure */ + RB_FOREACH(ln, nbr_tree, &lde_nbrs) + lde_send_labelmapping(ln, fn, 1); + } + + fnh = fec_nh_add(fn, af, nexthop, priority); + fnh->flags |= F_FEC_NH_NEW; + lde_send_change_klabel(fn, fnh); + + switch (fn->fec.type) { + case FEC_TYPE_IPV4: + case FEC_TYPE_IPV6: + ln = lde_nbr_find_by_addr(af, &fnh->nexthop); + break; + case FEC_TYPE_PWID: + ln = lde_nbr_find_by_lsrid(fn->fec.u.pwid.lsr_id); + break; + default: + ln = NULL; + break; + } + + if (ln) { + /* FEC.2 */ + me = (struct lde_map *)fec_find(&ln->recv_map, &fn->fec); + if (me) + /* FEC.5 */ + lde_check_mapping(&me->map, ln); + } +} + +void +lde_kernel_remove(struct fec *fec, int af, union ldpd_addr *nexthop, + uint8_t priority) +{ + struct fec_node *fn; + struct fec_nh *fnh; + + fn = (struct fec_node *)fec_find(&ft, fec); + if (fn == NULL) + /* route lost */ + return; + fnh = fec_nh_find(fn, af, nexthop, priority); + if (fnh == NULL) + /* route lost */ + return; + + lde_send_delete_klabel(fn, fnh); + fec_nh_del(fnh); + if (LIST_EMPTY(&fn->nexthops)) { + lde_send_labelwithdraw_all(fn, NO_LABEL); + fn->local_label = NO_LABEL; + if (fn->fec.type == FEC_TYPE_PWID) + fn->data = NULL; + } +} + +/* + * Whenever a route is changed, zebra advertises its new version without + * withdrawing the old one. So, after processing a ZEBRA_REDISTRIBUTE_IPV[46]_ADD + * message, we need to check for nexthops that were removed and, for each of + * them (if any), withdraw the associated labels from zebra. + */ +void +lde_kernel_reevaluate(struct fec *fec) +{ + struct fec_node *fn; + struct fec_nh *fnh, *safe; + + fn = (struct fec_node *)fec_find(&ft, fec); + if (fn == NULL) + return; + + LIST_FOREACH_SAFE(fnh, &fn->nexthops, entry, safe) { + if (fnh->flags & F_FEC_NH_NEW) + fnh->flags &= ~F_FEC_NH_NEW; + else + lde_kernel_remove(fec, fnh->af, &fnh->nexthop, + fnh->priority); + } +} + +void +lde_check_mapping(struct map *map, struct lde_nbr *ln) +{ + struct fec fec; + struct fec_node *fn; + struct fec_nh *fnh; + struct lde_req *lre; + struct lde_map *me; + struct l2vpn_pw *pw; + int msgsource = 0; + + lde_map2fec(map, ln->id, &fec); + fn = (struct fec_node *)fec_find(&ft, &fec); + if (fn == NULL) + fn = fec_add(&fec); + + /* LMp.1: first check if we have a pending request running */ + lre = (struct lde_req *)fec_find(&ln->sent_req, &fn->fec); + if (lre) + /* LMp.2: delete record of outstanding label request */ + lde_req_del(ln, lre, 1); + + /* RFC 4447 control word and status tlv negotiation */ + if (map->type == MAP_TYPE_PWID && l2vpn_pw_negotiate(ln, fn, map)) + return; + + /* + * LMp.3 - LMp.8: loop detection - unnecessary for frame-mode + * mpls networks. + */ + + /* LMp.9 */ + me = (struct lde_map *)fec_find(&ln->recv_map, &fn->fec); + if (me) { + /* LMp.10 */ + if (me->map.label != map->label && lre == NULL) { + /* LMp.10a */ + lde_send_labelrelease(ln, fn, me->map.label); + + /* + * Can not use lde_nbr_find_by_addr() because there's + * the possibility of multipath. + */ + LIST_FOREACH(fnh, &fn->nexthops, entry) { + if (lde_address_find(ln, fnh->af, + &fnh->nexthop) == NULL) + continue; + + lde_send_delete_klabel(fn, fnh); + fnh->remote_label = NO_LABEL; + } + } + } + + /* + * LMp.11 - 12: consider multiple nexthops in order to + * support multipath + */ + LIST_FOREACH(fnh, &fn->nexthops, entry) { + /* LMp.15: install FEC in FIB */ + switch (fec.type) { + case FEC_TYPE_IPV4: + case FEC_TYPE_IPV6: + if (!lde_address_find(ln, fnh->af, &fnh->nexthop)) + continue; + + fnh->remote_label = map->label; + lde_send_change_klabel(fn, fnh); + break; + case FEC_TYPE_PWID: + pw = (struct l2vpn_pw *) fn->data; + if (pw == NULL) + continue; + + pw->remote_group = map->fec.pwid.group_id; + if (map->flags & F_MAP_PW_IFMTU) + pw->remote_mtu = map->fec.pwid.ifmtu; + if (map->flags & F_MAP_PW_STATUS) + pw->remote_status = map->pw_status; + fnh->remote_label = map->label; + if (l2vpn_pw_ok(pw, fnh)) + lde_send_change_klabel(fn, fnh); + break; + default: + break; + } + + msgsource = 1; + } + /* LMp.13 & LMp.16: Record the mapping from this peer */ + if (me == NULL) + me = lde_map_add(ln, fn, 0); + me->map = *map; + + if (msgsource == 0) + /* LMp.13: just return since we use liberal lbl retention */ + return; + + /* + * LMp.17 - LMp.27 are unnecessary since we don't need to implement + * loop detection. LMp.28 - LMp.30 are unnecessary because we are + * merging capable. + */ +} + +void +lde_check_request(struct map *map, struct lde_nbr *ln) +{ + struct fec fec; + struct lde_req *lre; + struct fec_node *fn; + struct fec_nh *fnh; + + /* LRq.1: skip loop detection (not necessary) */ + + /* LRq.2: is there a next hop for fec? */ + lde_map2fec(map, ln->id, &fec); + fn = (struct fec_node *)fec_find(&ft, &fec); + if (fn == NULL || LIST_EMPTY(&fn->nexthops)) { + /* LRq.5: send No Route notification */ + lde_send_notification(ln->peerid, S_NO_ROUTE, map->msg_id, + htons(MSG_TYPE_LABELREQUEST)); + return; + } + + /* LRq.3: is MsgSource the next hop? */ + LIST_FOREACH(fnh, &fn->nexthops, entry) { + switch (fec.type) { + case FEC_TYPE_IPV4: + case FEC_TYPE_IPV6: + if (!lde_address_find(ln, fnh->af, &fnh->nexthop)) + continue; + + /* LRq.4: send Loop Detected notification */ + lde_send_notification(ln->peerid, S_LOOP_DETECTED, + map->msg_id, htons(MSG_TYPE_LABELREQUEST)); + return; + default: + break; + } + } + + /* LRq.6: first check if we have a pending request running */ + lre = (struct lde_req *)fec_find(&ln->recv_req, &fn->fec); + if (lre != NULL) + /* LRq.7: duplicate request */ + return; + + /* LRq.8: record label request */ + lre = lde_req_add(ln, &fn->fec, 0); + if (lre != NULL) + lre->msg_id = ntohl(map->msg_id); + + /* LRq.9: perform LSR label distribution */ + lde_send_labelmapping(ln, fn, 1); + + /* + * LRq.10: do nothing (Request Never) since we use liberal + * label retention. + * LRq.11 - 12 are unnecessary since we are merging capable. + */ +} + +void +lde_check_release(struct map *map, struct lde_nbr *ln) +{ + struct fec fec; + struct fec_node *fn; + struct lde_wdraw *lw; + struct lde_map *me; + + /* TODO group wildcard */ + if (map->type == MAP_TYPE_PWID && !(map->flags & F_MAP_PW_ID)) + return; + + lde_map2fec(map, ln->id, &fec); + fn = (struct fec_node *)fec_find(&ft, &fec); + /* LRl.1: does FEC match a known FEC? */ + if (fn == NULL) + return; + + /* LRl.3: first check if we have a pending withdraw running */ + lw = (struct lde_wdraw *)fec_find(&ln->sent_wdraw, &fn->fec); + if (lw && (map->label == NO_LABEL || + (lw->label != NO_LABEL && map->label == lw->label))) { + /* LRl.4: delete record of outstanding label withdraw */ + lde_wdraw_del(ln, lw); + } + + /* LRl.6: check sent map list and remove it if available */ + me = (struct lde_map *)fec_find(&ln->sent_map, &fn->fec); + if (me && (map->label == NO_LABEL || map->label == me->map.label)) + lde_map_del(ln, me, 1); + + /* + * LRl.11 - 13 are unnecessary since we remove the label from + * forwarding/switching as soon as the FEC is unreachable. + */ +} + +void +lde_check_release_wcard(struct map *map, struct lde_nbr *ln) +{ + struct fec *f; + struct fec_node *fn; + struct lde_wdraw *lw; + struct lde_map *me; + + RB_FOREACH(f, fec_tree, &ft) { + fn = (struct fec_node *)f; + + /* LRl.3: first check if we have a pending withdraw running */ + lw = (struct lde_wdraw *)fec_find(&ln->sent_wdraw, &fn->fec); + if (lw && (map->label == NO_LABEL || + (lw->label != NO_LABEL && map->label == lw->label))) { + /* LRl.4: delete record of outstanding lbl withdraw */ + lde_wdraw_del(ln, lw); + } + + /* LRl.6: check sent map list and remove it if available */ + me = (struct lde_map *)fec_find(&ln->sent_map, &fn->fec); + if (me && + (map->label == NO_LABEL || map->label == me->map.label)) + lde_map_del(ln, me, 1); + + /* + * LRl.11 - 13 are unnecessary since we remove the label from + * forwarding/switching as soon as the FEC is unreachable. + */ + } +} + +void +lde_check_withdraw(struct map *map, struct lde_nbr *ln) +{ + struct fec fec; + struct fec_node *fn; + struct fec_nh *fnh; + struct lde_map *me; + struct l2vpn_pw *pw; + + /* TODO group wildcard */ + if (map->type == MAP_TYPE_PWID && !(map->flags & F_MAP_PW_ID)) + return; + + lde_map2fec(map, ln->id, &fec); + fn = (struct fec_node *)fec_find(&ft, &fec); + if (fn == NULL) + fn = fec_add(&fec); + + /* LWd.1: remove label from forwarding/switching use */ + LIST_FOREACH(fnh, &fn->nexthops, entry) { + switch (fec.type) { + case FEC_TYPE_IPV4: + case FEC_TYPE_IPV6: + if (!lde_address_find(ln, fnh->af, &fnh->nexthop)) + continue; + break; + case FEC_TYPE_PWID: + pw = (struct l2vpn_pw *) fn->data; + if (pw == NULL) + continue; + break; + default: + break; + } + lde_send_delete_klabel(fn, fnh); + fnh->remote_label = NO_LABEL; + } + + /* LWd.2: send label release */ + lde_send_labelrelease(ln, fn, map->label); + + /* LWd.3: check previously received label mapping */ + me = (struct lde_map *)fec_find(&ln->recv_map, &fn->fec); + if (me && (map->label == NO_LABEL || map->label == me->map.label)) + /* LWd.4: remove record of previously received lbl mapping */ + lde_map_del(ln, me, 0); +} + +void +lde_check_withdraw_wcard(struct map *map, struct lde_nbr *ln) +{ + struct fec *f; + struct fec_node *fn; + struct fec_nh *fnh; + struct lde_map *me; + + /* LWd.2: send label release */ + lde_send_labelrelease(ln, NULL, map->label); + + RB_FOREACH(f, fec_tree, &ft) { + fn = (struct fec_node *)f; + + /* LWd.1: remove label from forwarding/switching use */ + LIST_FOREACH(fnh, &fn->nexthops, entry) { + switch (f->type) { + case FEC_TYPE_IPV4: + case FEC_TYPE_IPV6: + if (!lde_address_find(ln, fnh->af, + &fnh->nexthop)) + continue; + break; + case FEC_TYPE_PWID: + if (f->u.pwid.lsr_id.s_addr != ln->id.s_addr) + continue; + break; + default: + break; + } + lde_send_delete_klabel(fn, fnh); + fnh->remote_label = NO_LABEL; + } + + /* LWd.3: check previously received label mapping */ + me = (struct lde_map *)fec_find(&ln->recv_map, &fn->fec); + if (me && (map->label == NO_LABEL || + map->label == me->map.label)) + /* + * LWd.4: remove record of previously received + * label mapping + */ + lde_map_del(ln, me, 0); + } +} + +/* gabage collector timer: timer to remove dead entries from the LIB */ + +/* ARGSUSED */ +int +lde_gc_timer(struct thread *thread) +{ + struct fec *fec, *safe; + struct fec_node *fn; + int count = 0; + + RB_FOREACH_SAFE(fec, fec_tree, &ft, safe) { + fn = (struct fec_node *) fec; + + if (!LIST_EMPTY(&fn->nexthops) || + !LIST_EMPTY(&fn->downstream) || + !LIST_EMPTY(&fn->upstream)) + continue; + + fec_remove(&ft, &fn->fec); + free(fn); + count++; + } + + if (count > 0) + log_debug("%s: %u entries removed", __func__, count); + + lde_gc_start_timer(); + + return (0); +} + +void +lde_gc_start_timer(void) +{ + THREAD_TIMER_OFF(gc_timer); + gc_timer = thread_add_timer(master, lde_gc_timer, NULL, + LDE_GC_INTERVAL); +} + +void +lde_gc_stop_timer(void) +{ + THREAD_TIMER_OFF(gc_timer); +} diff --git a/ldpd/ldp.h b/ldpd/ldp.h new file mode 100644 index 0000000000..c421cddc38 --- /dev/null +++ b/ldpd/ldp.h @@ -0,0 +1,302 @@ +/* $OpenBSD$ */ + +/* + * Copyright (c) 2013, 2016 Renato Westphal + * Copyright (c) 2009 Michele Marchetto + * Copyright (c) 2004, 2005, 2008 Esben Norby + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* LDP protocol definitions */ + +#ifndef _LDP_H_ +#define _LDP_H_ + +/* misc */ +#define LDP_VERSION 1 +#define LDP_PORT 646 +#define LDP_MAX_LEN 4096 + +/* All Routers on this Subnet group multicast addresses */ +#define AllRouters_v4 "224.0.0.2" +#define AllRouters_v6 "ff02::2" + +#define LINK_DFLT_HOLDTIME 15 +#define TARGETED_DFLT_HOLDTIME 45 +#define MIN_HOLDTIME 3 +#define MAX_HOLDTIME 0xffff +#define INFINITE_HOLDTIME 0xffff + +#define DEFAULT_KEEPALIVE 180 +#define MIN_KEEPALIVE 3 +#define MAX_KEEPALIVE 0xffff +#define KEEPALIVE_PER_PERIOD 3 +#define INIT_FSM_TIMEOUT 15 + +#define DEFAULT_HELLO_INTERVAL 5 +#define MIN_HELLO_INTERVAL 1 +#define MAX_HELLO_INTERVAL 0xffff + +#define INIT_DELAY_TMR 15 +#define MAX_DELAY_TMR 120 + +#define MIN_PWID_ID 1 +#define MAX_PWID_ID 0xffffffff + +#define DEFAULT_L2VPN_MTU 1500 +#define MIN_L2VPN_MTU 512 +#define MAX_L2VPN_MTU 0xffff + +/* LDP message types */ +#define MSG_TYPE_NOTIFICATION 0x0001 +#define MSG_TYPE_HELLO 0x0100 +#define MSG_TYPE_INIT 0x0200 +#define MSG_TYPE_KEEPALIVE 0x0201 +#define MSG_TYPE_ADDR 0x0300 +#define MSG_TYPE_ADDRWITHDRAW 0x0301 +#define MSG_TYPE_LABELMAPPING 0x0400 +#define MSG_TYPE_LABELREQUEST 0x0401 +#define MSG_TYPE_LABELWITHDRAW 0x0402 +#define MSG_TYPE_LABELRELEASE 0x0403 +#define MSG_TYPE_LABELABORTREQ 0x0404 + +/* LDP TLV types */ +#define TLV_TYPE_FEC 0x0100 +#define TLV_TYPE_ADDRLIST 0x0101 +#define TLV_TYPE_HOPCOUNT 0x0103 +#define TLV_TYPE_PATHVECTOR 0x0104 +#define TLV_TYPE_GENERICLABEL 0x0200 +#define TLV_TYPE_ATMLABEL 0x0201 +#define TLV_TYPE_FRLABEL 0x0202 +#define TLV_TYPE_STATUS 0x0300 +#define TLV_TYPE_EXTSTATUS 0x0301 +#define TLV_TYPE_RETURNEDPDU 0x0302 +#define TLV_TYPE_RETURNEDMSG 0x0303 +#define TLV_TYPE_COMMONHELLO 0x0400 +#define TLV_TYPE_IPV4TRANSADDR 0x0401 +#define TLV_TYPE_CONFIG 0x0402 +#define TLV_TYPE_IPV6TRANSADDR 0x0403 +#define TLV_TYPE_COMMONSESSION 0x0500 +#define TLV_TYPE_ATMSESSIONPAR 0x0501 +#define TLV_TYPE_FRSESSION 0x0502 +#define TLV_TYPE_LABELREQUEST 0x0600 +/* RFC 4447 */ +#define TLV_TYPE_PW_STATUS 0x096A +#define TLV_TYPE_PW_IF_PARAM 0x096B +#define TLV_TYPE_PW_GROUP_ID 0x096C +/* RFC 7552 */ +#define TLV_TYPE_DUALSTACK 0x8701 + +/* LDP header */ +struct ldp_hdr { + uint16_t version; + uint16_t length; + uint32_t lsr_id; + uint16_t lspace_id; +} __attribute__ ((packed)); + +#define LDP_HDR_SIZE 10 /* actual size of the LDP header */ +#define LDP_HDR_PDU_LEN 6 /* minimum "PDU Length" */ +#define LDP_HDR_DEAD_LEN 4 + +/* TLV record */ +struct tlv { + uint16_t type; + uint16_t length; +}; +#define TLV_HDR_SIZE 4 + +struct ldp_msg { + uint16_t type; + uint16_t length; + uint32_t id; + /* Mandatory Parameters */ + /* Optional Parameters */ +} __attribute__ ((packed)); + +#define LDP_MSG_SIZE 8 /* minimum size of LDP message */ +#define LDP_MSG_LEN 4 /* minimum "Message Length" */ +#define LDP_MSG_DEAD_LEN 4 + +#define UNKNOWN_FLAG 0x8000 +#define FORWARD_FLAG 0xc000 + +struct hello_prms_tlv { + uint16_t type; + uint16_t length; + uint16_t holdtime; + uint16_t flags; +}; +#define F_HELLO_TARGETED 0x8000 +#define F_HELLO_REQ_TARG 0x4000 +#define F_HELLO_GTSM 0x2000 + +struct hello_prms_opt4_tlv { + uint16_t type; + uint16_t length; + uint32_t value; +}; + +struct hello_prms_opt16_tlv { + uint16_t type; + uint16_t length; + uint8_t value[16]; +}; + +#define DUAL_STACK_LDPOV4 4 +#define DUAL_STACK_LDPOV6 6 + +#define F_HELLO_TLV_RCVD_ADDR 0x01 +#define F_HELLO_TLV_RCVD_CONF 0x02 +#define F_HELLO_TLV_RCVD_DS 0x04 + +#define S_SUCCESS 0x00000000 +#define S_BAD_LDP_ID 0x80000001 +#define S_BAD_PROTO_VER 0x80000002 +#define S_BAD_PDU_LEN 0x80000003 +#define S_UNKNOWN_MSG 0x00000004 +#define S_BAD_MSG_LEN 0x80000005 +#define S_UNKNOWN_TLV 0x00000006 +#define S_BAD_TLV_LEN 0x80000007 +#define S_BAD_TLV_VAL 0x80000008 +#define S_HOLDTIME_EXP 0x80000009 +#define S_SHUTDOWN 0x8000000A +#define S_LOOP_DETECTED 0x0000000B +#define S_UNKNOWN_FEC 0x0000000C +#define S_NO_ROUTE 0x0000000D +#define S_NO_LABEL_RES 0x0000000E +#define S_AVAILABLE 0x0000000F +#define S_NO_HELLO 0x80000010 +#define S_PARM_ADV_MODE 0x80000011 +#define S_MAX_PDU_LEN 0x80000012 +#define S_PARM_L_RANGE 0x80000013 +#define S_KEEPALIVE_TMR 0x80000014 +#define S_LAB_REQ_ABRT 0x00000015 +#define S_MISS_MSG 0x00000016 +#define S_UNSUP_ADDR 0x00000017 +#define S_KEEPALIVE_BAD 0x80000018 +#define S_INTERN_ERR 0x80000019 +/* RFC 4447 */ +#define S_ILLEGAL_CBIT 0x00000024 +#define S_WRONG_CBIT 0x00000025 +#define S_INCPT_BITRATE 0x00000026 +#define S_CEP_MISCONF 0x00000027 +#define S_PW_STATUS 0x00000028 +#define S_UNASSIGN_TAI 0x00000029 +#define S_MISCONF_ERR 0x0000002A +#define S_WITHDRAW_MTHD 0x0000002B +/* RFC 7552 */ +#define S_TRANS_MISMTCH 0x80000032 +#define S_DS_NONCMPLNCE 0x80000033 + +struct sess_prms_tlv { + uint16_t type; + uint16_t length; + uint16_t proto_version; + uint16_t keepalive_time; + uint8_t reserved; + uint8_t pvlim; + uint16_t max_pdu_len; + uint32_t lsr_id; + uint16_t lspace_id; +} __attribute__ ((packed)); + +#define SESS_PRMS_SIZE 18 +#define SESS_PRMS_LEN 14 + +struct status_tlv { + uint16_t type; + uint16_t length; + uint32_t status_code; + uint32_t msg_id; + uint16_t msg_type; +} __attribute__ ((packed)); + +#define STATUS_SIZE 14 +#define STATUS_TLV_LEN 10 +#define STATUS_FATAL 0x80000000 + +#define AF_IPV4 0x1 +#define AF_IPV6 0x2 + +struct address_list_tlv { + uint16_t type; + uint16_t length; + uint16_t family; + /* address entries */ +} __attribute__ ((packed)); + +#define ADDR_LIST_SIZE 6 + +#define FEC_ELM_WCARD_LEN 1 +#define FEC_ELM_PREFIX_MIN_LEN 4 +#define FEC_PWID_ELM_MIN_LEN 8 + +#define MAP_TYPE_WILDCARD 0x01 +#define MAP_TYPE_PREFIX 0x02 +#define MAP_TYPE_PWID 0x80 +#define MAP_TYPE_GENPWID 0x81 + +#define CONTROL_WORD_FLAG 0x8000 +#define PW_TYPE_ETHERNET_TAGGED 0x0004 +#define PW_TYPE_ETHERNET 0x0005 +#define DEFAULT_PW_TYPE PW_TYPE_ETHERNET + +/* RFC 4447 Sub-TLV record */ +struct subtlv { + uint8_t type; + uint8_t length; +}; +#define SUBTLV_HDR_SIZE 2 + +#define SUBTLV_IFMTU 0x01 +#define SUBTLV_VLANID 0x06 + +#define FEC_SUBTLV_IFMTU_SIZE 4 +#define FEC_SUBTLV_VLANID_SIZE 4 + +struct label_tlv { + uint16_t type; + uint16_t length; + uint32_t label; +}; +#define LABEL_TLV_SIZE 8 +#define LABEL_TLV_LEN 4 + +struct reqid_tlv { + uint16_t type; + uint16_t length; + uint32_t reqid; +}; +#define REQID_TLV_SIZE 8 +#define REQID_TLV_LEN 4 + +struct pw_status_tlv { + uint16_t type; + uint16_t length; + uint32_t value; +}; +#define PW_STATUS_TLV_SIZE 8 +#define PW_STATUS_TLV_LEN 4 + +#define PW_FORWARDING 0 +#define PW_NOT_FORWARDING (1 << 0) +#define PW_LOCAL_RX_FAULT (1 << 1) +#define PW_LOCAL_TX_FAULT (1 << 2) +#define PW_PSN_RX_FAULT (1 << 3) +#define PW_PSN_TX_FAULT (1 << 4) + +#define NO_LABEL UINT32_MAX + +#endif /* !_LDP_H_ */ diff --git a/ldpd/ldp_debug.c b/ldpd/ldp_debug.c new file mode 100644 index 0000000000..15dd06a0f3 --- /dev/null +++ b/ldpd/ldp_debug.c @@ -0,0 +1,198 @@ +/* + * Copyright (C) 2016 by Open Source Routing. + * + * This file is part of GNU Zebra. + * + * GNU Zebra is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * GNU Zebra is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU Zebra; see the file COPYING. If not, write to the Free + * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ + +#include + +#include "command.h" +#include "vty.h" + +#include "ldpd.h" +#include "ldp_debug.h" +#include "ldp_vty.h" + +struct ldp_debug conf_ldp_debug; +struct ldp_debug ldp_debug; + +/* Debug node. */ +struct cmd_node ldp_debug_node = +{ + DEBUG_NODE, + "", + 1 +}; + +int +ldp_vty_debug(struct vty *vty, struct vty_arg *args[]) +{ + const char *type_str, *dir_str; + int disable, all; + + disable = (vty_get_arg_value(args, "no")) ? 1 : 0; + type_str = vty_get_arg_value(args, "type"); + + if (strcmp(type_str, "discovery") == 0) { + dir_str = vty_get_arg_value(args, "dir"); + if (dir_str == NULL) + return (CMD_WARNING); + + if (dir_str[0] == 'r') { + if (disable) + DEBUG_OFF(hello, HELLO_RECV); + else + DEBUG_ON(hello, HELLO_RECV); + } else { + if (disable) + DEBUG_OFF(hello, HELLO_SEND); + else + DEBUG_ON(hello, HELLO_SEND); + } + } else if (strcmp(type_str, "errors") == 0) { + if (disable) + DEBUG_OFF(errors, ERRORS); + else + DEBUG_ON(errors, ERRORS); + } else if (strcmp(type_str, "event") == 0) { + if (disable) + DEBUG_OFF(event, EVENT); + else + DEBUG_ON(event, EVENT); + } else if (strcmp(type_str, "messages") == 0) { + all = (vty_get_arg_value(args, "all")) ? 1 : 0; + dir_str = vty_get_arg_value(args, "dir"); + if (dir_str == NULL) + return (CMD_WARNING); + + if (dir_str[0] == 'r') { + if (disable) { + DEBUG_OFF(msg, MSG_RECV); + DEBUG_OFF(msg, MSG_RECV_ALL); + } else { + DEBUG_ON(msg, MSG_RECV); + if (all) + DEBUG_ON(msg, MSG_RECV_ALL); + } + } else { + if (disable) { + DEBUG_OFF(msg, MSG_SEND); + DEBUG_OFF(msg, MSG_SEND_ALL); + } else { + DEBUG_ON(msg, MSG_SEND); + if (all) + DEBUG_ON(msg, MSG_SEND_ALL); + } + } + } else if (strcmp(type_str, "zebra") == 0) { + if (disable) + DEBUG_OFF(zebra, ZEBRA); + else + DEBUG_ON(zebra, ZEBRA); + } + + main_imsg_compose_both(IMSG_DEBUG_UPDATE, &ldp_debug, + sizeof(ldp_debug)); + + return (CMD_SUCCESS); +} + +int +ldp_vty_show_debugging(struct vty *vty, struct vty_arg *args[]) +{ + vty_out(vty, "LDP debugging status:%s", VTY_NEWLINE); + + if (LDP_DEBUG(hello, HELLO_RECV)) + vty_out(vty, " LDP discovery debugging is on (inbound)%s", + VTY_NEWLINE); + if (LDP_DEBUG(hello, HELLO_SEND)) + vty_out(vty, " LDP discovery debugging is on (outbound)%s", + VTY_NEWLINE); + if (LDP_DEBUG(errors, ERRORS)) + vty_out(vty, " LDP errors debugging is on%s", VTY_NEWLINE); + if (LDP_DEBUG(event, EVENT)) + vty_out(vty, " LDP events debugging is on%s", VTY_NEWLINE); + if (LDP_DEBUG(msg, MSG_RECV_ALL)) + vty_out(vty, " LDP detailed messages debugging is on " + "(inbound)%s", VTY_NEWLINE); + else if (LDP_DEBUG(msg, MSG_RECV)) + vty_out(vty, " LDP messages debugging is on (inbound)%s", + VTY_NEWLINE); + if (LDP_DEBUG(msg, MSG_SEND_ALL)) + vty_out(vty, " LDP detailed messages debugging is on " + "(outbound)%s", VTY_NEWLINE); + else if (LDP_DEBUG(msg, MSG_SEND)) + vty_out(vty, " LDP messages debugging is on (outbound)%s", + VTY_NEWLINE); + if (LDP_DEBUG(zebra, ZEBRA)) + vty_out(vty, " LDP zebra debugging is on%s", VTY_NEWLINE); + vty_out (vty, "%s", VTY_NEWLINE); + + return (CMD_SUCCESS); +} + +int +ldp_debug_config_write(struct vty *vty) +{ + int write = 0; + + if (CONF_LDP_DEBUG(hello, HELLO_RECV)) { + vty_out(vty, "debug mpls ldp discovery hello recv%s", + VTY_NEWLINE); + write = 1; + } + + if (CONF_LDP_DEBUG(hello, HELLO_SEND)) { + vty_out(vty, "debug mpls ldp discovery hello sent%s", + VTY_NEWLINE); + write = 1; + } + + if (CONF_LDP_DEBUG(errors, ERRORS)) { + vty_out(vty, "debug mpls ldp errors%s", VTY_NEWLINE); + write = 1; + } + + if (CONF_LDP_DEBUG(event, EVENT)) { + vty_out(vty, "debug mpls ldp event%s", VTY_NEWLINE); + write = 1; + } + + if (CONF_LDP_DEBUG(msg, MSG_RECV_ALL)) { + vty_out(vty, "debug mpls ldp messages recv all%s", VTY_NEWLINE); + write = 1; + } else if (CONF_LDP_DEBUG(msg, MSG_RECV)) { + vty_out(vty, "debug mpls ldp messages recv%s", VTY_NEWLINE); + write = 1; + } + + if (CONF_LDP_DEBUG(msg, MSG_SEND_ALL)) { + vty_out(vty, "debug mpls ldp messages sent all%s", VTY_NEWLINE); + write = 1; + } else if (CONF_LDP_DEBUG(msg, MSG_SEND)) { + vty_out(vty, "debug mpls ldp messages sent%s", VTY_NEWLINE); + write = 1; + } + + if (CONF_LDP_DEBUG(zebra, ZEBRA)) { + vty_out(vty, "debug mpls ldp zebra%s", VTY_NEWLINE); + write = 1; + } + + return (write); +} diff --git a/ldpd/ldp_debug.h b/ldpd/ldp_debug.h new file mode 100644 index 0000000000..aa0cd47e7b --- /dev/null +++ b/ldpd/ldp_debug.h @@ -0,0 +1,131 @@ +/* + * Copyright (C) 2016 by Open Source Routing. + * + * This file is part of GNU Zebra. + * + * GNU Zebra is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * GNU Zebra is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU Zebra; see the file COPYING. If not, write to the Free + * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ + +#ifndef _LDP_DEBUG_H_ +#define _LDP_DEBUG_H_ + +struct ldp_debug { + int hello; +#define LDP_DEBUG_HELLO_RECV 0x01 +#define LDP_DEBUG_HELLO_SEND 0x02 + + int errors; +#define LDP_DEBUG_ERRORS 0x01 + + int event; +#define LDP_DEBUG_EVENT 0x01 + + int msg; +#define LDP_DEBUG_MSG_RECV 0x01 +#define LDP_DEBUG_MSG_RECV_ALL 0x02 +#define LDP_DEBUG_MSG_SEND 0x04 +#define LDP_DEBUG_MSG_SEND_ALL 0x08 + + int zebra; +#define LDP_DEBUG_ZEBRA 0x01 +}; +extern struct ldp_debug conf_ldp_debug; +extern struct ldp_debug ldp_debug; + +#define CONF_DEBUG_ON(a, b) (conf_ldp_debug.a |= (LDP_DEBUG_ ## b)) +#define CONF_DEBUG_OFF(a, b) (conf_ldp_debug.a &= ~(LDP_DEBUG_ ## b)) + +#define TERM_DEBUG_ON(a, b) (ldp_debug.a |= (LDP_DEBUG_ ## b)) +#define TERM_DEBUG_OFF(a, b) (ldp_debug.a &= ~(LDP_DEBUG_ ## b)) + +#define DEBUG_ON(a, b) \ + do { \ + if (vty->node == CONFIG_NODE) { \ + CONF_DEBUG_ON(a, b); \ + TERM_DEBUG_ON(a, b); \ + } else \ + TERM_DEBUG_ON(a, b); \ + } while (0) +#define DEBUG_OFF(a, b) \ + do { \ + CONF_DEBUG_OFF(a, b); \ + TERM_DEBUG_OFF(a, b); \ + } while (0) + +#define LDP_DEBUG(a, b) (ldp_debug.a & LDP_DEBUG_ ## b) +#define CONF_LDP_DEBUG(a, b) (conf_ldp_debug.a & LDP_DEBUG_ ## b) + +#define debug_hello_recv(emsg, ...) \ +do { \ + if (LDP_DEBUG(hello, HELLO_RECV)) \ + log_debug("discovery[recv]: " emsg, __VA_ARGS__); \ +} while (0) + +#define debug_hello_send(emsg, ...) \ +do { \ + if (LDP_DEBUG(hello, HELLO_SEND)) \ + log_debug("discovery[send]: " emsg, __VA_ARGS__); \ +} while (0) + +#define debug_err(emsg, ...) \ +do { \ + if (LDP_DEBUG(errors, ERRORS)) \ + log_debug("error: " emsg, __VA_ARGS__); \ +} while (0) + +#define debug_evt(emsg, ...) \ +do { \ + if (LDP_DEBUG(event, EVENT)) \ + log_debug("event: " emsg, __VA_ARGS__); \ +} while (0) + +#define debug_msg_recv(emsg, ...) \ +do { \ + if (LDP_DEBUG(msg, MSG_RECV)) \ + log_debug("msg[in]: " emsg, __VA_ARGS__); \ +} while (0) + +#define debug_msg_send(emsg, ...) \ +do { \ + if (LDP_DEBUG(msg, MSG_SEND)) \ + log_debug("msg[out]: " emsg, __VA_ARGS__); \ +} while (0) + +#define debug_kalive_recv(emsg, ...) \ +do { \ + if (LDP_DEBUG(msg, MSG_RECV_ALL)) \ + log_debug("kalive[in]: " emsg, __VA_ARGS__); \ +} while (0) + +#define debug_kalive_send(emsg, ...) \ +do { \ + if (LDP_DEBUG(msg, MSG_SEND_ALL)) \ + log_debug("kalive[out]: " emsg, __VA_ARGS__); \ +} while (0) + +#define debug_zebra_in(emsg, ...) \ +do { \ + if (LDP_DEBUG(zebra, ZEBRA)) \ + log_debug("zebra[in]: " emsg, __VA_ARGS__); \ +} while (0) + +#define debug_zebra_out(emsg, ...) \ +do { \ + if (LDP_DEBUG(zebra, ZEBRA)) \ + log_debug("zebra[out]: " emsg, __VA_ARGS__); \ +} while (0) + +#endif /* _LDP_DEBUG_H_ */ diff --git a/ldpd/ldp_vty.h b/ldpd/ldp_vty.h new file mode 100644 index 0000000000..735554badf --- /dev/null +++ b/ldpd/ldp_vty.h @@ -0,0 +1,81 @@ +/* + * Copyright (C) 2016 by Open Source Routing. + * + * This file is part of GNU Zebra. + * + * GNU Zebra is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * GNU Zebra is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU Zebra; see the file COPYING. If not, write to the Free + * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ + +#ifndef _LDP_VTY_H_ +#define _LDP_VTY_H_ + +#include "vty.h" + +extern struct cmd_node ldp_node; +extern struct cmd_node ldp_ipv4_node; +extern struct cmd_node ldp_ipv6_node; +extern struct cmd_node ldp_ipv4_iface_node; +extern struct cmd_node ldp_ipv6_iface_node; +extern struct cmd_node ldp_l2vpn_node; +extern struct cmd_node ldp_pseudowire_node; +extern struct cmd_node ldp_debug_node; + +union ldpd_addr; +int ldp_get_address(const char *, int *, union ldpd_addr *); +int ldp_config_write(struct vty *); +int ldp_l2vpn_config_write(struct vty *); +int ldp_debug_config_write(struct vty *); +int ldp_vty_mpls_ldp (struct vty *, struct vty_arg *[]); +int ldp_vty_address_family (struct vty *, struct vty_arg *[]); +int ldp_vty_disc_holdtime(struct vty *, struct vty_arg *[]); +int ldp_vty_disc_interval(struct vty *, struct vty_arg *[]); +int ldp_vty_targeted_hello_accept(struct vty *, struct vty_arg *[]); +int ldp_vty_session_holdtime(struct vty *, struct vty_arg *[]); +int ldp_vty_interface(struct vty *, struct vty_arg *[]); +int ldp_vty_trans_addr(struct vty *, struct vty_arg *[]); +int ldp_vty_neighbor_targeted(struct vty *, struct vty_arg *[]); +int ldp_vty_explicit_null(struct vty *, struct vty_arg *[]); +int ldp_vty_ttl_security(struct vty *, struct vty_arg *[]); +int ldp_vty_router_id(struct vty *, struct vty_arg *[]); +int ldp_vty_ds_cisco_interop(struct vty *, struct vty_arg *[]); +int ldp_vty_trans_pref_ipv4(struct vty *, struct vty_arg *[]); +int ldp_vty_neighbor_password(struct vty *, struct vty_arg *[]); +int ldp_vty_neighbor_ttl_security(struct vty *, struct vty_arg *[]); +int ldp_vty_l2vpn(struct vty *, struct vty_arg *[]); +int ldp_vty_l2vpn_bridge(struct vty *, struct vty_arg *[]); +int ldp_vty_l2vpn_mtu(struct vty *, struct vty_arg *[]); +int ldp_vty_l2vpn_pwtype(struct vty *, struct vty_arg *[]); +int ldp_vty_l2vpn_interface(struct vty *, struct vty_arg *[]); +int ldp_vty_l2vpn_pseudowire(struct vty *, struct vty_arg *[]); +int ldp_vty_l2vpn_pw_cword(struct vty *, struct vty_arg *[]); +int ldp_vty_l2vpn_pw_nbr_addr(struct vty *, struct vty_arg *[]); +int ldp_vty_l2vpn_pw_nbr_id(struct vty *, struct vty_arg *[]); +int ldp_vty_l2vpn_pw_pwid(struct vty *, struct vty_arg *[]); +int ldp_vty_l2vpn_pw_pwstatus(struct vty *, struct vty_arg *[]); +int ldp_vty_show_binding(struct vty *, struct vty_arg *[]); +int ldp_vty_show_discovery(struct vty *, struct vty_arg *[]); +int ldp_vty_show_interface(struct vty *, struct vty_arg *[]); +int ldp_vty_show_neighbor(struct vty *, struct vty_arg *[]); +int ldp_vty_show_atom_binding(struct vty *, struct vty_arg *[]); +int ldp_vty_show_atom_vc(struct vty *, struct vty_arg *[]); +int ldp_vty_clear_nbr(struct vty *, struct vty_arg *[]); +int ldp_vty_debug(struct vty *, struct vty_arg *[]); +int ldp_vty_show_debugging(struct vty *, struct vty_arg *[]); + +void ldp_vty_init(void); +void ldp_vty_if_init(void); + +#endif /* _LDP_VTY_H_ */ diff --git a/ldpd/ldp_vty.xml b/ldpd/ldp_vty.xml new file mode 100644 index 0000000000..ee5c6e4df2 --- /dev/null +++ b/ldpd/ldp_vty.xml @@ -0,0 +1,378 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ldpd/ldp_vty_cmds.c b/ldpd/ldp_vty_cmds.c new file mode 100644 index 0000000000..64715999f2 --- /dev/null +++ b/ldpd/ldp_vty_cmds.c @@ -0,0 +1,1726 @@ +/* Auto-generated from ldp_vty.xml. */ +/* Do not edit! */ + +#include + +#include "command.h" +#include "vty.h" +#include "ldp_vty.h" + +DEFUN (ldp_mpls_ldp, + ldp_mpls_ldp_cmd, + "mpls ldp", + "Global MPLS configuration subcommands\n" + "Label Distribution Protocol\n") +{ + struct vty_arg *args[] = { NULL }; + return ldp_vty_mpls_ldp (vty, args); +} + +DEFUN (ldp_l2vpn_word_type_vpls, + ldp_l2vpn_word_type_vpls_cmd, + "l2vpn WORD type vpls", + "Configure l2vpn commands\n" + "L2VPN name\n" + "L2VPN type\n" + "Virtual Private LAN Service\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "name", .value = argv[0] }, + NULL + }; + return ldp_vty_l2vpn (vty, args); +} + +DEFUN (ldp_no_mpls_ldp, + ldp_no_mpls_ldp_cmd, + "no mpls ldp", + "Negate a command or set its defaults\n" + "Global MPLS configuration subcommands\n" + "Label Distribution Protocol\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "no", .value = "no" }, + NULL + }; + return ldp_vty_mpls_ldp (vty, args); +} + +DEFUN (ldp_no_l2vpn_word_type_vpls, + ldp_no_l2vpn_word_type_vpls_cmd, + "no l2vpn WORD type vpls", + "Negate a command or set its defaults\n" + "Configure l2vpn commands\n" + "L2VPN name\n" + "L2VPN type\n" + "Virtual Private LAN Service\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "no", .value = "no" }, + &(struct vty_arg) { .name = "name", .value = argv[0] }, + NULL + }; + return ldp_vty_l2vpn (vty, args); +} + +DEFUN (ldp_address_family_ipv4, + ldp_address_family_ipv4_cmd, + "address-family ipv4", + "Configure Address Family and its parameters\n" + "IPv4\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "address-family", .value = "ipv4" }, + NULL + }; + return ldp_vty_address_family (vty, args); +} + +DEFUN (ldp_address_family_ipv6, + ldp_address_family_ipv6_cmd, + "address-family ipv6", + "Configure Address Family and its parameters\n" + "IPv6\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "address-family", .value = "ipv6" }, + NULL + }; + return ldp_vty_address_family (vty, args); +} + +DEFUN (ldp_discovery_hello_holdtime_disc_time, + ldp_discovery_hello_holdtime_disc_time_cmd, + "discovery hello holdtime (1-65535)", + "Configure discovery parameters\n" + "LDP Link Hellos\n" + "Hello holdtime\n" + "Time (seconds) - 65535 implies infinite\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "hello_type", .value = "hello" }, + &(struct vty_arg) { .name = "seconds", .value = argv[0] }, + NULL + }; + return ldp_vty_disc_holdtime (vty, args); +} + +DEFUN (ldp_discovery_hello_interval_disc_time, + ldp_discovery_hello_interval_disc_time_cmd, + "discovery hello interval (1-65535)", + "Configure discovery parameters\n" + "LDP Link Hellos\n" + "Hello interval\n" + "Time (seconds)\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "hello_type", .value = "hello" }, + &(struct vty_arg) { .name = "seconds", .value = argv[0] }, + NULL + }; + return ldp_vty_disc_interval (vty, args); +} + +DEFUN (ldp_discovery_targeted_hello_holdtime_disc_time, + ldp_discovery_targeted_hello_holdtime_disc_time_cmd, + "discovery targeted-hello holdtime (1-65535)", + "Configure discovery parameters\n" + "LDP Targeted Hellos\n" + "Targeted hello holdtime\n" + "Time (seconds) - 65535 implies infinite\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "hello_type", .value = "targeted-hello" }, + &(struct vty_arg) { .name = "seconds", .value = argv[0] }, + NULL + }; + return ldp_vty_disc_holdtime (vty, args); +} + +DEFUN (ldp_discovery_targeted_hello_interval_disc_time, + ldp_discovery_targeted_hello_interval_disc_time_cmd, + "discovery targeted-hello interval (1-65535)", + "Configure discovery parameters\n" + "LDP Targeted Hellos\n" + "Targeted hello interval\n" + "Time (seconds)\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "hello_type", .value = "targeted-hello" }, + &(struct vty_arg) { .name = "seconds", .value = argv[0] }, + NULL + }; + return ldp_vty_disc_interval (vty, args); +} + +DEFUN (ldp_dual_stack_transport_connection_prefer_ipv4, + ldp_dual_stack_transport_connection_prefer_ipv4_cmd, + "dual-stack transport-connection prefer ipv4", + "Configure dual stack parameters\n" + "Configure TCP transport parameters\n" + "Configure prefered address family for TCP transport connection with neighbor\n" + "IPv4\n") +{ + struct vty_arg *args[] = { NULL }; + return ldp_vty_trans_pref_ipv4 (vty, args); +} + +DEFUN (ldp_dual_stack_cisco_interop, + ldp_dual_stack_cisco_interop_cmd, + "dual-stack cisco-interop", + "Configure dual stack parameters\n" + "Use Cisco non-compliant format to send and interpret the Dual-Stack capability TLV\n") +{ + struct vty_arg *args[] = { NULL }; + return ldp_vty_ds_cisco_interop (vty, args); +} + +DEFUN (ldp_neighbor_ipv4_password_word, + ldp_neighbor_ipv4_password_word_cmd, + "neighbor A.B.C.D password WORD", + "Configure neighbor parameters\n" + "LDP Id of neighbor\n" + "Configure password for MD5 authentication\n" + "The password\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "lsr_id", .value = argv[0] }, + &(struct vty_arg) { .name = "password", .value = argv[1] }, + NULL + }; + return ldp_vty_neighbor_password (vty, args); +} + +DEFUN (ldp_neighbor_ipv4_session_holdtime_session_time, + ldp_neighbor_ipv4_session_holdtime_session_time_cmd, + "neighbor A.B.C.D session holdtime (15-65535)", + "Configure neighbor parameters\n" + "LDP Id of neighbor\n" + "Configure session parameters\n" + "Configure session holdtime\n" + "Time (seconds)\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "lsr_id", .value = argv[0] }, + &(struct vty_arg) { .name = "seconds", .value = argv[1] }, + NULL + }; + return ldp_vty_session_holdtime (vty, args); +} + +DEFUN (ldp_neighbor_ipv4_ttl_security_disable, + ldp_neighbor_ipv4_ttl_security_disable_cmd, + "neighbor A.B.C.D ttl-security disable", + "Configure neighbor parameters\n" + "LDP Id of neighbor\n" + "LDP ttl security check\n" + "Disable ttl security\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "lsr_id", .value = argv[0] }, + NULL + }; + return ldp_vty_neighbor_ttl_security (vty, args); +} + +DEFUN (ldp_neighbor_ipv4_ttl_security_hops_hops, + ldp_neighbor_ipv4_ttl_security_hops_hops_cmd, + "neighbor A.B.C.D ttl-security hops (1-254)", + "Configure neighbor parameters\n" + "LDP Id of neighbor\n" + "LDP ttl security check\n" + "IP hops\n" + "maximum number of hops\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "lsr_id", .value = argv[0] }, + &(struct vty_arg) { .name = "hops", .value = argv[1] }, + NULL + }; + return ldp_vty_neighbor_ttl_security (vty, args); +} + +DEFUN (ldp_router_id_ipv4, + ldp_router_id_ipv4_cmd, + "router-id A.B.C.D", + "Configure router Id\n" + "LSR Id (in form of an IPv4 address)\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "addr", .value = argv[0] }, + NULL + }; + return ldp_vty_router_id (vty, args); +} + +DEFUN (ldp_no_address_family_ipv4, + ldp_no_address_family_ipv4_cmd, + "no address-family ipv4", + "Negate a command or set its defaults\n" + "Configure Address Family and its parameters\n" + "IPv4\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "no", .value = "no" }, + &(struct vty_arg) { .name = "address-family", .value = "ipv4" }, + NULL + }; + return ldp_vty_address_family (vty, args); +} + +DEFUN (ldp_no_address_family_ipv6, + ldp_no_address_family_ipv6_cmd, + "no address-family ipv6", + "Negate a command or set its defaults\n" + "Configure Address Family and its parameters\n" + "IPv6\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "no", .value = "no" }, + &(struct vty_arg) { .name = "address-family", .value = "ipv6" }, + NULL + }; + return ldp_vty_address_family (vty, args); +} + +DEFUN (ldp_no_discovery_hello_holdtime_disc_time, + ldp_no_discovery_hello_holdtime_disc_time_cmd, + "no discovery hello holdtime (1-65535)", + "Negate a command or set its defaults\n" + "Configure discovery parameters\n" + "LDP Link Hellos\n" + "Hello holdtime\n" + "Time (seconds) - 65535 implies infinite\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "no", .value = "no" }, + &(struct vty_arg) { .name = "hello_type", .value = "hello" }, + &(struct vty_arg) { .name = "seconds", .value = argv[0] }, + NULL + }; + return ldp_vty_disc_holdtime (vty, args); +} + +DEFUN (ldp_no_discovery_hello_interval_disc_time, + ldp_no_discovery_hello_interval_disc_time_cmd, + "no discovery hello interval (1-65535)", + "Negate a command or set its defaults\n" + "Configure discovery parameters\n" + "LDP Link Hellos\n" + "Hello interval\n" + "Time (seconds)\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "no", .value = "no" }, + &(struct vty_arg) { .name = "hello_type", .value = "hello" }, + &(struct vty_arg) { .name = "seconds", .value = argv[0] }, + NULL + }; + return ldp_vty_disc_interval (vty, args); +} + +DEFUN (ldp_no_discovery_targeted_hello_holdtime_disc_time, + ldp_no_discovery_targeted_hello_holdtime_disc_time_cmd, + "no discovery targeted-hello holdtime (1-65535)", + "Negate a command or set its defaults\n" + "Configure discovery parameters\n" + "LDP Targeted Hellos\n" + "Targeted hello holdtime\n" + "Time (seconds) - 65535 implies infinite\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "no", .value = "no" }, + &(struct vty_arg) { .name = "hello_type", .value = "targeted-hello" }, + &(struct vty_arg) { .name = "seconds", .value = argv[0] }, + NULL + }; + return ldp_vty_disc_holdtime (vty, args); +} + +DEFUN (ldp_no_discovery_targeted_hello_interval_disc_time, + ldp_no_discovery_targeted_hello_interval_disc_time_cmd, + "no discovery targeted-hello interval (1-65535)", + "Negate a command or set its defaults\n" + "Configure discovery parameters\n" + "LDP Targeted Hellos\n" + "Targeted hello interval\n" + "Time (seconds)\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "no", .value = "no" }, + &(struct vty_arg) { .name = "hello_type", .value = "targeted-hello" }, + &(struct vty_arg) { .name = "seconds", .value = argv[0] }, + NULL + }; + return ldp_vty_disc_interval (vty, args); +} + +DEFUN (ldp_no_dual_stack_transport_connection_prefer_ipv4, + ldp_no_dual_stack_transport_connection_prefer_ipv4_cmd, + "no dual-stack transport-connection prefer ipv4", + "Negate a command or set its defaults\n" + "Configure dual stack parameters\n" + "Configure TCP transport parameters\n" + "Configure prefered address family for TCP transport connection with neighbor\n" + "IPv4\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "no", .value = "no" }, + NULL + }; + return ldp_vty_trans_pref_ipv4 (vty, args); +} + +DEFUN (ldp_no_dual_stack_cisco_interop, + ldp_no_dual_stack_cisco_interop_cmd, + "no dual-stack cisco-interop", + "Negate a command or set its defaults\n" + "Configure dual stack parameters\n" + "Use Cisco non-compliant format to send and interpret the Dual-Stack capability TLV\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "no", .value = "no" }, + NULL + }; + return ldp_vty_ds_cisco_interop (vty, args); +} + +DEFUN (ldp_no_neighbor_ipv4_password_word, + ldp_no_neighbor_ipv4_password_word_cmd, + "no neighbor A.B.C.D password WORD", + "Negate a command or set its defaults\n" + "Configure neighbor parameters\n" + "LDP Id of neighbor\n" + "Configure password for MD5 authentication\n" + "The password\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "no", .value = "no" }, + &(struct vty_arg) { .name = "lsr_id", .value = argv[0] }, + &(struct vty_arg) { .name = "password", .value = argv[1] }, + NULL + }; + return ldp_vty_neighbor_password (vty, args); +} + +DEFUN (ldp_no_neighbor_ipv4_session_holdtime_session_time, + ldp_no_neighbor_ipv4_session_holdtime_session_time_cmd, + "no neighbor A.B.C.D session holdtime (15-65535)", + "Negate a command or set its defaults\n" + "Configure neighbor parameters\n" + "LDP Id of neighbor\n" + "Configure session parameters\n" + "Configure session holdtime\n" + "Time (seconds)\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "no", .value = "no" }, + &(struct vty_arg) { .name = "lsr_id", .value = argv[0] }, + &(struct vty_arg) { .name = "seconds", .value = argv[1] }, + NULL + }; + return ldp_vty_session_holdtime (vty, args); +} + +DEFUN (ldp_no_neighbor_ipv4_ttl_security_disable, + ldp_no_neighbor_ipv4_ttl_security_disable_cmd, + "no neighbor A.B.C.D ttl-security disable", + "Negate a command or set its defaults\n" + "Configure neighbor parameters\n" + "LDP Id of neighbor\n" + "LDP ttl security check\n" + "Disable ttl security\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "no", .value = "no" }, + &(struct vty_arg) { .name = "lsr_id", .value = argv[0] }, + NULL + }; + return ldp_vty_neighbor_ttl_security (vty, args); +} + +DEFUN (ldp_no_neighbor_ipv4_ttl_security_hops_hops, + ldp_no_neighbor_ipv4_ttl_security_hops_hops_cmd, + "no neighbor A.B.C.D ttl-security hops (1-254)", + "Negate a command or set its defaults\n" + "Configure neighbor parameters\n" + "LDP Id of neighbor\n" + "LDP ttl security check\n" + "IP hops\n" + "maximum number of hops\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "no", .value = "no" }, + &(struct vty_arg) { .name = "lsr_id", .value = argv[0] }, + &(struct vty_arg) { .name = "hops", .value = argv[1] }, + NULL + }; + return ldp_vty_neighbor_ttl_security (vty, args); +} + +DEFUN (ldp_no_router_id_ipv4, + ldp_no_router_id_ipv4_cmd, + "no router-id A.B.C.D", + "Negate a command or set its defaults\n" + "Configure router Id\n" + "LSR Id (in form of an IPv4 address)\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "no", .value = "no" }, + &(struct vty_arg) { .name = "addr", .value = argv[0] }, + NULL + }; + return ldp_vty_router_id (vty, args); +} + +DEFUN (ldp_discovery_targeted_hello_accept, + ldp_discovery_targeted_hello_accept_cmd, + "discovery targeted-hello accept", + "Configure discovery parameters\n" + "LDP Targeted Hellos\n" + "Accept and respond to targeted hellos\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "hello_type", .value = "targeted-hello" }, + NULL + }; + return ldp_vty_targeted_hello_accept (vty, args); +} + +DEFUN (ldp_label_local_advertise_explicit_null, + ldp_label_local_advertise_explicit_null_cmd, + "label local advertise explicit-null", + "Configure label control and policies\n" + "Configure local label control and policies\n" + "Configure outbound label advertisement control\n" + "Configure explicit-null advertisement\n") +{ + struct vty_arg *args[] = { NULL }; + return ldp_vty_explicit_null (vty, args); +} + +DEFUN (ldp_ttl_security_disable, + ldp_ttl_security_disable_cmd, + "ttl-security disable", + "LDP ttl security check\n" + "Disable ttl security\n") +{ + struct vty_arg *args[] = { NULL }; + return ldp_vty_ttl_security (vty, args); +} + +DEFUN (ldp_session_holdtime_session_time, + ldp_session_holdtime_session_time_cmd, + "session holdtime (15-65535)", + "Configure session parameters\n" + "Configure session holdtime\n" + "Time (seconds)\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "seconds", .value = argv[0] }, + NULL + }; + return ldp_vty_session_holdtime (vty, args); +} + +DEFUN (ldp_interface_ifname, + ldp_interface_ifname_cmd, + "interface IFNAME", + "Enable LDP on an interface and enter interface submode\n" + "Interface's name\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "ifname", .value = argv[0] }, + NULL + }; + return ldp_vty_interface (vty, args); +} + +DEFUN (ldp_discovery_transport_address_ipv4, + ldp_discovery_transport_address_ipv4_cmd, + "discovery transport-address A.B.C.D", + "Configure discovery parameters\n" + "Specify transport address for TCP connection\n" + "IP address to be used as transport address\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "addr", .value = argv[0] }, + NULL + }; + return ldp_vty_trans_addr (vty, args); +} + +DEFUN (ldp_neighbor_ipv4_targeted, + ldp_neighbor_ipv4_targeted_cmd, + "neighbor A.B.C.D targeted", + "Configure neighbor parameters\n" + "IP address of neighbor\n" + "Establish targeted session\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "addr", .value = argv[0] }, + NULL + }; + return ldp_vty_neighbor_targeted (vty, args); +} + +DEFUN (ldp_no_discovery_targeted_hello_accept, + ldp_no_discovery_targeted_hello_accept_cmd, + "no discovery targeted-hello accept", + "Negate a command or set its defaults\n" + "Configure discovery parameters\n" + "LDP Targeted Hellos\n" + "Accept and respond to targeted hellos\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "no", .value = "no" }, + &(struct vty_arg) { .name = "hello_type", .value = "targeted-hello" }, + NULL + }; + return ldp_vty_targeted_hello_accept (vty, args); +} + +DEFUN (ldp_no_label_local_advertise_explicit_null, + ldp_no_label_local_advertise_explicit_null_cmd, + "no label local advertise explicit-null", + "Negate a command or set its defaults\n" + "Configure label control and policies\n" + "Configure local label control and policies\n" + "Configure outbound label advertisement control\n" + "Configure explicit-null advertisement\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "no", .value = "no" }, + NULL + }; + return ldp_vty_explicit_null (vty, args); +} + +DEFUN (ldp_no_ttl_security_disable, + ldp_no_ttl_security_disable_cmd, + "no ttl-security disable", + "Negate a command or set its defaults\n" + "LDP ttl security check\n" + "Disable ttl security\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "no", .value = "no" }, + NULL + }; + return ldp_vty_ttl_security (vty, args); +} + +DEFUN (ldp_no_session_holdtime_session_time, + ldp_no_session_holdtime_session_time_cmd, + "no session holdtime (15-65535)", + "Negate a command or set its defaults\n" + "Configure session parameters\n" + "Configure session holdtime\n" + "Time (seconds)\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "no", .value = "no" }, + &(struct vty_arg) { .name = "seconds", .value = argv[0] }, + NULL + }; + return ldp_vty_session_holdtime (vty, args); +} + +DEFUN (ldp_no_interface_ifname, + ldp_no_interface_ifname_cmd, + "no interface IFNAME", + "Negate a command or set its defaults\n" + "Enable LDP on an interface and enter interface submode\n" + "Interface's name\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "no", .value = "no" }, + &(struct vty_arg) { .name = "ifname", .value = argv[0] }, + NULL + }; + return ldp_vty_interface (vty, args); +} + +DEFUN (ldp_no_discovery_transport_address_ipv4, + ldp_no_discovery_transport_address_ipv4_cmd, + "no discovery transport-address A.B.C.D", + "Negate a command or set its defaults\n" + "Configure discovery parameters\n" + "Specify transport address for TCP connection\n" + "IP address to be used as transport address\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "no", .value = "no" }, + &(struct vty_arg) { .name = "addr", .value = argv[0] }, + NULL + }; + return ldp_vty_trans_addr (vty, args); +} + +DEFUN (ldp_no_neighbor_ipv4_targeted, + ldp_no_neighbor_ipv4_targeted_cmd, + "no neighbor A.B.C.D targeted", + "Negate a command or set its defaults\n" + "Configure neighbor parameters\n" + "IP address of neighbor\n" + "Establish targeted session\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "no", .value = "no" }, + &(struct vty_arg) { .name = "addr", .value = argv[0] }, + NULL + }; + return ldp_vty_neighbor_targeted (vty, args); +} + +DEFUN (ldp_discovery_transport_address_ipv6, + ldp_discovery_transport_address_ipv6_cmd, + "discovery transport-address X:X::X:X", + "Configure discovery parameters\n" + "Specify transport address for TCP connection\n" + "IPv6 address to be used as transport address\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "addr", .value = argv[0] }, + NULL + }; + return ldp_vty_trans_addr (vty, args); +} + +DEFUN (ldp_neighbor_ipv6_targeted, + ldp_neighbor_ipv6_targeted_cmd, + "neighbor X:X::X:X targeted", + "Configure neighbor parameters\n" + "IPv6 address of neighbor\n" + "Establish targeted session\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "addr", .value = argv[0] }, + NULL + }; + return ldp_vty_neighbor_targeted (vty, args); +} + +DEFUN (ldp_no_discovery_transport_address_ipv6, + ldp_no_discovery_transport_address_ipv6_cmd, + "no discovery transport-address X:X::X:X", + "Negate a command or set its defaults\n" + "Configure discovery parameters\n" + "Specify transport address for TCP connection\n" + "IPv6 address to be used as transport address\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "no", .value = "no" }, + &(struct vty_arg) { .name = "addr", .value = argv[0] }, + NULL + }; + return ldp_vty_trans_addr (vty, args); +} + +DEFUN (ldp_no_neighbor_ipv6_targeted, + ldp_no_neighbor_ipv6_targeted_cmd, + "no neighbor X:X::X:X targeted", + "Negate a command or set its defaults\n" + "Configure neighbor parameters\n" + "IPv6 address of neighbor\n" + "Establish targeted session\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "no", .value = "no" }, + &(struct vty_arg) { .name = "addr", .value = argv[0] }, + NULL + }; + return ldp_vty_neighbor_targeted (vty, args); +} + +DEFUN (ldp_bridge_ifname, + ldp_bridge_ifname_cmd, + "bridge IFNAME", + "Bridge interface\n" + "Interface's name\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "ifname", .value = argv[0] }, + NULL + }; + return ldp_vty_l2vpn_bridge (vty, args); +} + +DEFUN (ldp_mtu_mtu, + ldp_mtu_mtu_cmd, + "mtu (1500-9180)", + "set Maximum Transmission Unit\n" + "Maximum Transmission Unit value\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "mtu", .value = argv[0] }, + NULL + }; + return ldp_vty_l2vpn_mtu (vty, args); +} + +DEFUN (ldp_member_interface_ifname, + ldp_member_interface_ifname_cmd, + "member interface IFNAME", + "L2VPN member configuration\n" + "Local interface\n" + "Interface's name\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "ifname", .value = argv[0] }, + NULL + }; + return ldp_vty_l2vpn_interface (vty, args); +} + +DEFUN (ldp_member_pseudowire_ifname, + ldp_member_pseudowire_ifname_cmd, + "member pseudowire IFNAME", + "L2VPN member configuration\n" + "Pseudowire interface\n" + "Interface's name\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "ifname", .value = argv[0] }, + NULL + }; + return ldp_vty_l2vpn_pseudowire (vty, args); +} + +DEFUN (ldp_vc_type_pwtype, + ldp_vc_type_pwtype_cmd, + "vc type ", + "Virtual Circuit options\n" + "Virtual Circuit type to use\n" + "Ethernet (type 5)\n" + "Ethernet-tagged (type 4)\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "type", .value = argv[0] }, + NULL + }; + return ldp_vty_l2vpn_pwtype (vty, args); +} + +DEFUN (ldp_no_bridge_ifname, + ldp_no_bridge_ifname_cmd, + "no bridge IFNAME", + "Negate a command or set its defaults\n" + "Bridge interface\n" + "Interface's name\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "no", .value = "no" }, + &(struct vty_arg) { .name = "ifname", .value = argv[0] }, + NULL + }; + return ldp_vty_l2vpn_bridge (vty, args); +} + +DEFUN (ldp_no_mtu_mtu, + ldp_no_mtu_mtu_cmd, + "no mtu (1500-9180)", + "Negate a command or set its defaults\n" + "set Maximum Transmission Unit\n" + "Maximum Transmission Unit value\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "no", .value = "no" }, + &(struct vty_arg) { .name = "mtu", .value = argv[0] }, + NULL + }; + return ldp_vty_l2vpn_mtu (vty, args); +} + +DEFUN (ldp_no_member_interface_ifname, + ldp_no_member_interface_ifname_cmd, + "no member interface IFNAME", + "Negate a command or set its defaults\n" + "L2VPN member configuration\n" + "Local interface\n" + "Interface's name\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "no", .value = "no" }, + &(struct vty_arg) { .name = "ifname", .value = argv[0] }, + NULL + }; + return ldp_vty_l2vpn_interface (vty, args); +} + +DEFUN (ldp_no_member_pseudowire_ifname, + ldp_no_member_pseudowire_ifname_cmd, + "no member pseudowire IFNAME", + "Negate a command or set its defaults\n" + "L2VPN member configuration\n" + "Pseudowire interface\n" + "Interface's name\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "no", .value = "no" }, + &(struct vty_arg) { .name = "ifname", .value = argv[0] }, + NULL + }; + return ldp_vty_l2vpn_pseudowire (vty, args); +} + +DEFUN (ldp_no_vc_type_pwtype, + ldp_no_vc_type_pwtype_cmd, + "no vc type ", + "Negate a command or set its defaults\n" + "Virtual Circuit options\n" + "Virtual Circuit type to use\n" + "Ethernet (type 5)\n" + "Ethernet-tagged (type 4)\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "no", .value = "no" }, + &(struct vty_arg) { .name = "type", .value = argv[0] }, + NULL + }; + return ldp_vty_l2vpn_pwtype (vty, args); +} + +DEFUN (ldp_control_word_cword, + ldp_control_word_cword_cmd, + "control-word ", + "Control-word options\n" + "Exclude control-word in pseudowire packets\n" + "Include control-word in pseudowire packets\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "preference", .value = argv[0] }, + NULL + }; + return ldp_vty_l2vpn_pw_cword (vty, args); +} + +DEFUN (ldp_neighbor_address_addr, + ldp_neighbor_address_addr_cmd, + "neighbor address ", + "Remote endpoint configuration\n" + "Specify the IPv4 or IPv6 address of the remote endpoint\n" + "IPv4 address\n" + "IPv6 address\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "addr", .value = argv[0] }, + NULL + }; + return ldp_vty_l2vpn_pw_nbr_addr (vty, args); +} + +DEFUN (ldp_neighbor_lsr_id_ipv4, + ldp_neighbor_lsr_id_ipv4_cmd, + "neighbor lsr-id A.B.C.D", + "Remote endpoint configuration\n" + "Specify the LSR-ID of the remote endpoint\n" + "IPv4 address\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "lsr-id", .value = argv[0] }, + NULL + }; + return ldp_vty_l2vpn_pw_nbr_id (vty, args); +} + +DEFUN (ldp_pw_id_pwid, + ldp_pw_id_pwid_cmd, + "pw-id (1-4294967295)", + "Set the Virtual Circuit ID\n" + "Virtual Circuit ID value\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "pwid", .value = argv[0] }, + NULL + }; + return ldp_vty_l2vpn_pw_pwid (vty, args); +} + +DEFUN (ldp_pw_status_disable, + ldp_pw_status_disable_cmd, + "pw-status disable", + "Configure PW status\n" + "Disable PW status\n") +{ + struct vty_arg *args[] = { NULL }; + return ldp_vty_l2vpn_pw_pwstatus (vty, args); +} + +DEFUN (ldp_no_control_word_cword, + ldp_no_control_word_cword_cmd, + "no control-word ", + "Negate a command or set its defaults\n" + "Control-word options\n" + "Exclude control-word in pseudowire packets\n" + "Include control-word in pseudowire packets\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "no", .value = "no" }, + &(struct vty_arg) { .name = "preference", .value = argv[0] }, + NULL + }; + return ldp_vty_l2vpn_pw_cword (vty, args); +} + +DEFUN (ldp_no_neighbor_address_addr, + ldp_no_neighbor_address_addr_cmd, + "no neighbor address ", + "Negate a command or set its defaults\n" + "Remote endpoint configuration\n" + "Specify the IPv4 or IPv6 address of the remote endpoint\n" + "IPv4 address\n" + "IPv6 address\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "no", .value = "no" }, + &(struct vty_arg) { .name = "addr", .value = argv[0] }, + NULL + }; + return ldp_vty_l2vpn_pw_nbr_addr (vty, args); +} + +DEFUN (ldp_no_neighbor_lsr_id_ipv4, + ldp_no_neighbor_lsr_id_ipv4_cmd, + "no neighbor lsr-id A.B.C.D", + "Negate a command or set its defaults\n" + "Remote endpoint configuration\n" + "Specify the LSR-ID of the remote endpoint\n" + "IPv4 address\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "no", .value = "no" }, + &(struct vty_arg) { .name = "lsr-id", .value = argv[0] }, + NULL + }; + return ldp_vty_l2vpn_pw_nbr_id (vty, args); +} + +DEFUN (ldp_no_pw_id_pwid, + ldp_no_pw_id_pwid_cmd, + "no pw-id (1-4294967295)", + "Negate a command or set its defaults\n" + "Set the Virtual Circuit ID\n" + "Virtual Circuit ID value\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "no", .value = "no" }, + &(struct vty_arg) { .name = "pwid", .value = argv[0] }, + NULL + }; + return ldp_vty_l2vpn_pw_pwid (vty, args); +} + +DEFUN (ldp_no_pw_status_disable, + ldp_no_pw_status_disable_cmd, + "no pw-status disable", + "Negate a command or set its defaults\n" + "Configure PW status\n" + "Disable PW status\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "no", .value = "no" }, + NULL + }; + return ldp_vty_l2vpn_pw_pwstatus (vty, args); +} + +DEFUN (ldp_show_mpls_ldp_neighbor, + ldp_show_mpls_ldp_neighbor_cmd, + "show mpls ldp neighbor", + "Show running system information\n" + "MPLS information\n" + "Label Distribution Protocol\n" + "Neighbor information\n") +{ + struct vty_arg *args[] = { NULL }; + return ldp_vty_show_neighbor (vty, args); +} + +DEFUN (ldp_show_mpls_ldp_binding, + ldp_show_mpls_ldp_binding_cmd, + "show mpls ldp binding", + "Show running system information\n" + "MPLS information\n" + "Label Distribution Protocol\n" + "Label Information Base (LIB) information\n") +{ + struct vty_arg *args[] = { NULL }; + return ldp_vty_show_binding (vty, args); +} + +DEFUN (ldp_show_mpls_ldp_discovery, + ldp_show_mpls_ldp_discovery_cmd, + "show mpls ldp discovery", + "Show running system information\n" + "MPLS information\n" + "Label Distribution Protocol\n" + "Discovery Hello Information\n") +{ + struct vty_arg *args[] = { NULL }; + return ldp_vty_show_discovery (vty, args); +} + +DEFUN (ldp_show_mpls_ldp_interface, + ldp_show_mpls_ldp_interface_cmd, + "show mpls ldp interface", + "Show running system information\n" + "MPLS information\n" + "Label Distribution Protocol\n" + "interface information\n") +{ + struct vty_arg *args[] = { NULL }; + return ldp_vty_show_interface (vty, args); +} + +DEFUN (ldp_show_mpls_ldp_address_family_binding, + ldp_show_mpls_ldp_address_family_binding_cmd, + "show mpls ldp binding", + "Show running system information\n" + "MPLS information\n" + "Label Distribution Protocol\n" + "IPv4 Address Family\n" + "IPv6 Address Family\n" + "Label Information Base (LIB) information\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "address-family", .value = argv[0] }, + NULL + }; + return ldp_vty_show_binding (vty, args); +} + +DEFUN (ldp_show_mpls_ldp_address_family_discovery, + ldp_show_mpls_ldp_address_family_discovery_cmd, + "show mpls ldp discovery", + "Show running system information\n" + "MPLS information\n" + "Label Distribution Protocol\n" + "IPv4 Address Family\n" + "IPv6 Address Family\n" + "Discovery Hello Information\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "address-family", .value = argv[0] }, + NULL + }; + return ldp_vty_show_discovery (vty, args); +} + +DEFUN (ldp_show_mpls_ldp_address_family_interface, + ldp_show_mpls_ldp_address_family_interface_cmd, + "show mpls ldp interface", + "Show running system information\n" + "MPLS information\n" + "Label Distribution Protocol\n" + "IPv4 Address Family\n" + "IPv6 Address Family\n" + "interface information\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "address-family", .value = argv[0] }, + NULL + }; + return ldp_vty_show_interface (vty, args); +} + +DEFUN (ldp_show_l2vpn_atom_binding, + ldp_show_l2vpn_atom_binding_cmd, + "show l2vpn atom binding", + "Show running system information\n" + "Show information about Layer2 VPN\n" + "Show Any Transport over MPLS information\n" + "Show AToM label binding information\n") +{ + struct vty_arg *args[] = { NULL }; + return ldp_vty_show_atom_binding (vty, args); +} + +DEFUN (ldp_show_l2vpn_atom_vc, + ldp_show_l2vpn_atom_vc_cmd, + "show l2vpn atom vc", + "Show running system information\n" + "Show information about Layer2 VPN\n" + "Show Any Transport over MPLS information\n" + "Show AToM virtual circuit information\n") +{ + struct vty_arg *args[] = { NULL }; + return ldp_vty_show_atom_vc (vty, args); +} + +DEFUN (ldp_show_debugging_mpls_ldp, + ldp_show_debugging_mpls_ldp_cmd, + "show debugging mpls ldp", + "Show running system information\n" + "Debugging functions\n" + "MPLS information\n" + "Label Distribution Protocol\n") +{ + struct vty_arg *args[] = { NULL }; + return ldp_vty_show_debugging (vty, args); +} + +DEFUN (ldp_clear_mpls_ldp_neighbor, + ldp_clear_mpls_ldp_neighbor_cmd, + "clear mpls ldp neighbor", + "Reset functions\n" + "Reset MPLS statistical information\n" + "Clear LDP state\n" + "Clear LDP neighbor sessions\n") +{ + struct vty_arg *args[] = { NULL }; + return ldp_vty_clear_nbr (vty, args); +} + +DEFUN (ldp_clear_mpls_ldp_neighbor_addr, + ldp_clear_mpls_ldp_neighbor_addr_cmd, + "clear mpls ldp neighbor ", + "Reset functions\n" + "Reset MPLS statistical information\n" + "Clear LDP state\n" + "Clear LDP neighbor sessions\n" + "IPv4 address\n" + "IPv6 address\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "addr", .value = argv[0] }, + NULL + }; + return ldp_vty_clear_nbr (vty, args); +} + +DEFUN (ldp_debug_mpls_ldp_discovery_hello_dir, + ldp_debug_mpls_ldp_discovery_hello_dir_cmd, + "debug mpls ldp discovery hello ", + "Debugging functions\n" + "MPLS information\n" + "Label Distribution Protocol\n" + "Discovery messages\n" + "Discovery hello message\n" + "Received messages\n" + "Sent messages\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "type", .value = "discovery" }, + &(struct vty_arg) { .name = "dir", .value = argv[0] }, + NULL + }; + return ldp_vty_debug (vty, args); +} + +DEFUN (ldp_debug_mpls_ldp_errors, + ldp_debug_mpls_ldp_errors_cmd, + "debug mpls ldp errors", + "Debugging functions\n" + "MPLS information\n" + "Label Distribution Protocol\n" + "Errors\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "type", .value = "errors" }, + NULL + }; + return ldp_vty_debug (vty, args); +} + +DEFUN (ldp_debug_mpls_ldp_event, + ldp_debug_mpls_ldp_event_cmd, + "debug mpls ldp event", + "Debugging functions\n" + "MPLS information\n" + "Label Distribution Protocol\n" + "LDP event information\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "type", .value = "event" }, + NULL + }; + return ldp_vty_debug (vty, args); +} + +DEFUN (ldp_debug_mpls_ldp_messages_recv, + ldp_debug_mpls_ldp_messages_recv_cmd, + "debug mpls ldp messages recv", + "Debugging functions\n" + "MPLS information\n" + "Label Distribution Protocol\n" + "Messages\n" + "Received messages, excluding periodic Keep Alives\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "type", .value = "messages" }, + &(struct vty_arg) { .name = "dir", .value = "recv" }, + NULL + }; + return ldp_vty_debug (vty, args); +} + +DEFUN (ldp_debug_mpls_ldp_messages_recv_all, + ldp_debug_mpls_ldp_messages_recv_all_cmd, + "debug mpls ldp messages recv all", + "Debugging functions\n" + "MPLS information\n" + "Label Distribution Protocol\n" + "Messages\n" + "Received messages, excluding periodic Keep Alives\n" + "Received messages, including periodic Keep Alives\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "type", .value = "messages" }, + &(struct vty_arg) { .name = "dir", .value = "recv" }, + &(struct vty_arg) { .name = "all", .value = "all" }, + NULL + }; + return ldp_vty_debug (vty, args); +} + +DEFUN (ldp_debug_mpls_ldp_messages_sent, + ldp_debug_mpls_ldp_messages_sent_cmd, + "debug mpls ldp messages sent", + "Debugging functions\n" + "MPLS information\n" + "Label Distribution Protocol\n" + "Messages\n" + "Sent messages, excluding periodic Keep Alives\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "type", .value = "messages" }, + &(struct vty_arg) { .name = "dir", .value = "sent" }, + NULL + }; + return ldp_vty_debug (vty, args); +} + +DEFUN (ldp_debug_mpls_ldp_messages_sent_all, + ldp_debug_mpls_ldp_messages_sent_all_cmd, + "debug mpls ldp messages sent all", + "Debugging functions\n" + "MPLS information\n" + "Label Distribution Protocol\n" + "Messages\n" + "Sent messages, excluding periodic Keep Alives\n" + "Sent messages, including periodic Keep Alives\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "type", .value = "messages" }, + &(struct vty_arg) { .name = "dir", .value = "sent" }, + &(struct vty_arg) { .name = "all", .value = "all" }, + NULL + }; + return ldp_vty_debug (vty, args); +} + +DEFUN (ldp_debug_mpls_ldp_zebra, + ldp_debug_mpls_ldp_zebra_cmd, + "debug mpls ldp zebra", + "Debugging functions\n" + "MPLS information\n" + "Label Distribution Protocol\n" + "LDP zebra information\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "type", .value = "zebra" }, + NULL + }; + return ldp_vty_debug (vty, args); +} + +DEFUN (ldp_no_debug_mpls_ldp_discovery_hello_dir, + ldp_no_debug_mpls_ldp_discovery_hello_dir_cmd, + "no debug mpls ldp discovery hello ", + "Negate a command or set its defaults\n" + "Debugging functions\n" + "MPLS information\n" + "Label Distribution Protocol\n" + "Discovery messages\n" + "Discovery hello message\n" + "Received messages\n" + "Sent messages\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "no", .value = "no" }, + &(struct vty_arg) { .name = "type", .value = "discovery" }, + &(struct vty_arg) { .name = "dir", .value = argv[0] }, + NULL + }; + return ldp_vty_debug (vty, args); +} + +DEFUN (ldp_no_debug_mpls_ldp_errors, + ldp_no_debug_mpls_ldp_errors_cmd, + "no debug mpls ldp errors", + "Negate a command or set its defaults\n" + "Debugging functions\n" + "MPLS information\n" + "Label Distribution Protocol\n" + "Errors\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "no", .value = "no" }, + &(struct vty_arg) { .name = "type", .value = "errors" }, + NULL + }; + return ldp_vty_debug (vty, args); +} + +DEFUN (ldp_no_debug_mpls_ldp_event, + ldp_no_debug_mpls_ldp_event_cmd, + "no debug mpls ldp event", + "Negate a command or set its defaults\n" + "Debugging functions\n" + "MPLS information\n" + "Label Distribution Protocol\n" + "LDP event information\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "no", .value = "no" }, + &(struct vty_arg) { .name = "type", .value = "event" }, + NULL + }; + return ldp_vty_debug (vty, args); +} + +DEFUN (ldp_no_debug_mpls_ldp_messages_recv, + ldp_no_debug_mpls_ldp_messages_recv_cmd, + "no debug mpls ldp messages recv", + "Negate a command or set its defaults\n" + "Debugging functions\n" + "MPLS information\n" + "Label Distribution Protocol\n" + "Messages\n" + "Received messages, excluding periodic Keep Alives\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "no", .value = "no" }, + &(struct vty_arg) { .name = "type", .value = "messages" }, + &(struct vty_arg) { .name = "dir", .value = "recv" }, + NULL + }; + return ldp_vty_debug (vty, args); +} + +DEFUN (ldp_no_debug_mpls_ldp_messages_recv_all, + ldp_no_debug_mpls_ldp_messages_recv_all_cmd, + "no debug mpls ldp messages recv all", + "Negate a command or set its defaults\n" + "Debugging functions\n" + "MPLS information\n" + "Label Distribution Protocol\n" + "Messages\n" + "Received messages, excluding periodic Keep Alives\n" + "Received messages, including periodic Keep Alives\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "no", .value = "no" }, + &(struct vty_arg) { .name = "type", .value = "messages" }, + &(struct vty_arg) { .name = "dir", .value = "recv" }, + &(struct vty_arg) { .name = "all", .value = "all" }, + NULL + }; + return ldp_vty_debug (vty, args); +} + +DEFUN (ldp_no_debug_mpls_ldp_messages_sent, + ldp_no_debug_mpls_ldp_messages_sent_cmd, + "no debug mpls ldp messages sent", + "Negate a command or set its defaults\n" + "Debugging functions\n" + "MPLS information\n" + "Label Distribution Protocol\n" + "Messages\n" + "Sent messages, excluding periodic Keep Alives\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "no", .value = "no" }, + &(struct vty_arg) { .name = "type", .value = "messages" }, + &(struct vty_arg) { .name = "dir", .value = "sent" }, + NULL + }; + return ldp_vty_debug (vty, args); +} + +DEFUN (ldp_no_debug_mpls_ldp_messages_sent_all, + ldp_no_debug_mpls_ldp_messages_sent_all_cmd, + "no debug mpls ldp messages sent all", + "Negate a command or set its defaults\n" + "Debugging functions\n" + "MPLS information\n" + "Label Distribution Protocol\n" + "Messages\n" + "Sent messages, excluding periodic Keep Alives\n" + "Sent messages, including periodic Keep Alives\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "no", .value = "no" }, + &(struct vty_arg) { .name = "type", .value = "messages" }, + &(struct vty_arg) { .name = "dir", .value = "sent" }, + &(struct vty_arg) { .name = "all", .value = "all" }, + NULL + }; + return ldp_vty_debug (vty, args); +} + +DEFUN (ldp_no_debug_mpls_ldp_zebra, + ldp_no_debug_mpls_ldp_zebra_cmd, + "no debug mpls ldp zebra", + "Negate a command or set its defaults\n" + "Debugging functions\n" + "MPLS information\n" + "Label Distribution Protocol\n" + "LDP zebra information\n") +{ + struct vty_arg *args[] = + { + &(struct vty_arg) { .name = "no", .value = "no" }, + &(struct vty_arg) { .name = "type", .value = "zebra" }, + NULL + }; + return ldp_vty_debug (vty, args); +} + +void +ldp_vty_init (void) +{ + install_element (CONFIG_NODE, &ldp_mpls_ldp_cmd); + install_element (CONFIG_NODE, &ldp_l2vpn_word_type_vpls_cmd); + install_element (CONFIG_NODE, &ldp_no_mpls_ldp_cmd); + install_element (CONFIG_NODE, &ldp_no_l2vpn_word_type_vpls_cmd); + install_element (CONFIG_NODE, &ldp_debug_mpls_ldp_discovery_hello_dir_cmd); + install_element (CONFIG_NODE, &ldp_debug_mpls_ldp_errors_cmd); + install_element (CONFIG_NODE, &ldp_debug_mpls_ldp_event_cmd); + install_element (CONFIG_NODE, &ldp_debug_mpls_ldp_messages_recv_cmd); + install_element (CONFIG_NODE, &ldp_debug_mpls_ldp_messages_recv_all_cmd); + install_element (CONFIG_NODE, &ldp_debug_mpls_ldp_messages_sent_cmd); + install_element (CONFIG_NODE, &ldp_debug_mpls_ldp_messages_sent_all_cmd); + install_element (CONFIG_NODE, &ldp_debug_mpls_ldp_zebra_cmd); + install_element (CONFIG_NODE, &ldp_no_debug_mpls_ldp_discovery_hello_dir_cmd); + install_element (CONFIG_NODE, &ldp_no_debug_mpls_ldp_errors_cmd); + install_element (CONFIG_NODE, &ldp_no_debug_mpls_ldp_event_cmd); + install_element (CONFIG_NODE, &ldp_no_debug_mpls_ldp_messages_recv_cmd); + install_element (CONFIG_NODE, &ldp_no_debug_mpls_ldp_messages_recv_all_cmd); + install_element (CONFIG_NODE, &ldp_no_debug_mpls_ldp_messages_sent_cmd); + install_element (CONFIG_NODE, &ldp_no_debug_mpls_ldp_messages_sent_all_cmd); + install_element (CONFIG_NODE, &ldp_no_debug_mpls_ldp_zebra_cmd); + install_node (&ldp_node, ldp_config_write); + install_default (LDP_NODE); + install_element (LDP_NODE, &ldp_address_family_ipv4_cmd); + install_element (LDP_NODE, &ldp_address_family_ipv6_cmd); + install_element (LDP_NODE, &ldp_discovery_hello_holdtime_disc_time_cmd); + install_element (LDP_NODE, &ldp_discovery_hello_interval_disc_time_cmd); + install_element (LDP_NODE, &ldp_discovery_targeted_hello_holdtime_disc_time_cmd); + install_element (LDP_NODE, &ldp_discovery_targeted_hello_interval_disc_time_cmd); + install_element (LDP_NODE, &ldp_dual_stack_transport_connection_prefer_ipv4_cmd); + install_element (LDP_NODE, &ldp_dual_stack_cisco_interop_cmd); + install_element (LDP_NODE, &ldp_neighbor_ipv4_password_word_cmd); + install_element (LDP_NODE, &ldp_neighbor_ipv4_session_holdtime_session_time_cmd); + install_element (LDP_NODE, &ldp_neighbor_ipv4_ttl_security_disable_cmd); + install_element (LDP_NODE, &ldp_neighbor_ipv4_ttl_security_hops_hops_cmd); + install_element (LDP_NODE, &ldp_router_id_ipv4_cmd); + install_element (LDP_NODE, &ldp_no_address_family_ipv4_cmd); + install_element (LDP_NODE, &ldp_no_address_family_ipv6_cmd); + install_element (LDP_NODE, &ldp_no_discovery_hello_holdtime_disc_time_cmd); + install_element (LDP_NODE, &ldp_no_discovery_hello_interval_disc_time_cmd); + install_element (LDP_NODE, &ldp_no_discovery_targeted_hello_holdtime_disc_time_cmd); + install_element (LDP_NODE, &ldp_no_discovery_targeted_hello_interval_disc_time_cmd); + install_element (LDP_NODE, &ldp_no_dual_stack_transport_connection_prefer_ipv4_cmd); + install_element (LDP_NODE, &ldp_no_dual_stack_cisco_interop_cmd); + install_element (LDP_NODE, &ldp_no_neighbor_ipv4_password_word_cmd); + install_element (LDP_NODE, &ldp_no_neighbor_ipv4_session_holdtime_session_time_cmd); + install_element (LDP_NODE, &ldp_no_neighbor_ipv4_ttl_security_disable_cmd); + install_element (LDP_NODE, &ldp_no_neighbor_ipv4_ttl_security_hops_hops_cmd); + install_element (LDP_NODE, &ldp_no_router_id_ipv4_cmd); + install_node (&ldp_ipv4_node, NULL); + install_default (LDP_IPV4_NODE); + install_element (LDP_IPV4_NODE, &ldp_discovery_hello_holdtime_disc_time_cmd); + install_element (LDP_IPV4_NODE, &ldp_discovery_hello_interval_disc_time_cmd); + install_element (LDP_IPV4_NODE, &ldp_discovery_targeted_hello_holdtime_disc_time_cmd); + install_element (LDP_IPV4_NODE, &ldp_discovery_targeted_hello_interval_disc_time_cmd); + install_element (LDP_IPV4_NODE, &ldp_discovery_targeted_hello_accept_cmd); + install_element (LDP_IPV4_NODE, &ldp_label_local_advertise_explicit_null_cmd); + install_element (LDP_IPV4_NODE, &ldp_ttl_security_disable_cmd); + install_element (LDP_IPV4_NODE, &ldp_session_holdtime_session_time_cmd); + install_element (LDP_IPV4_NODE, &ldp_interface_ifname_cmd); + install_element (LDP_IPV4_NODE, &ldp_discovery_transport_address_ipv4_cmd); + install_element (LDP_IPV4_NODE, &ldp_neighbor_ipv4_targeted_cmd); + install_element (LDP_IPV4_NODE, &ldp_no_discovery_hello_holdtime_disc_time_cmd); + install_element (LDP_IPV4_NODE, &ldp_no_discovery_hello_interval_disc_time_cmd); + install_element (LDP_IPV4_NODE, &ldp_no_discovery_targeted_hello_holdtime_disc_time_cmd); + install_element (LDP_IPV4_NODE, &ldp_no_discovery_targeted_hello_interval_disc_time_cmd); + install_element (LDP_IPV4_NODE, &ldp_no_discovery_targeted_hello_accept_cmd); + install_element (LDP_IPV4_NODE, &ldp_no_label_local_advertise_explicit_null_cmd); + install_element (LDP_IPV4_NODE, &ldp_no_ttl_security_disable_cmd); + install_element (LDP_IPV4_NODE, &ldp_no_session_holdtime_session_time_cmd); + install_element (LDP_IPV4_NODE, &ldp_no_interface_ifname_cmd); + install_element (LDP_IPV4_NODE, &ldp_no_discovery_transport_address_ipv4_cmd); + install_element (LDP_IPV4_NODE, &ldp_no_neighbor_ipv4_targeted_cmd); + install_node (&ldp_ipv6_node, NULL); + install_default (LDP_IPV6_NODE); + install_element (LDP_IPV6_NODE, &ldp_discovery_hello_holdtime_disc_time_cmd); + install_element (LDP_IPV6_NODE, &ldp_discovery_hello_interval_disc_time_cmd); + install_element (LDP_IPV6_NODE, &ldp_discovery_targeted_hello_holdtime_disc_time_cmd); + install_element (LDP_IPV6_NODE, &ldp_discovery_targeted_hello_interval_disc_time_cmd); + install_element (LDP_IPV6_NODE, &ldp_discovery_targeted_hello_accept_cmd); + install_element (LDP_IPV6_NODE, &ldp_label_local_advertise_explicit_null_cmd); + install_element (LDP_IPV6_NODE, &ldp_ttl_security_disable_cmd); + install_element (LDP_IPV6_NODE, &ldp_session_holdtime_session_time_cmd); + install_element (LDP_IPV6_NODE, &ldp_interface_ifname_cmd); + install_element (LDP_IPV6_NODE, &ldp_discovery_transport_address_ipv6_cmd); + install_element (LDP_IPV6_NODE, &ldp_neighbor_ipv6_targeted_cmd); + install_element (LDP_IPV6_NODE, &ldp_no_discovery_hello_holdtime_disc_time_cmd); + install_element (LDP_IPV6_NODE, &ldp_no_discovery_hello_interval_disc_time_cmd); + install_element (LDP_IPV6_NODE, &ldp_no_discovery_targeted_hello_holdtime_disc_time_cmd); + install_element (LDP_IPV6_NODE, &ldp_no_discovery_targeted_hello_interval_disc_time_cmd); + install_element (LDP_IPV6_NODE, &ldp_no_discovery_targeted_hello_accept_cmd); + install_element (LDP_IPV6_NODE, &ldp_no_label_local_advertise_explicit_null_cmd); + install_element (LDP_IPV6_NODE, &ldp_no_ttl_security_disable_cmd); + install_element (LDP_IPV6_NODE, &ldp_no_session_holdtime_session_time_cmd); + install_element (LDP_IPV6_NODE, &ldp_no_interface_ifname_cmd); + install_element (LDP_IPV6_NODE, &ldp_no_discovery_transport_address_ipv6_cmd); + install_element (LDP_IPV6_NODE, &ldp_no_neighbor_ipv6_targeted_cmd); + install_node (&ldp_ipv4_iface_node, NULL); + install_default (LDP_IPV4_IFACE_NODE); + install_element (LDP_IPV4_IFACE_NODE, &ldp_discovery_hello_holdtime_disc_time_cmd); + install_element (LDP_IPV4_IFACE_NODE, &ldp_discovery_hello_interval_disc_time_cmd); + install_element (LDP_IPV4_IFACE_NODE, &ldp_no_discovery_hello_holdtime_disc_time_cmd); + install_element (LDP_IPV4_IFACE_NODE, &ldp_no_discovery_hello_interval_disc_time_cmd); + install_node (&ldp_ipv6_iface_node, NULL); + install_default (LDP_IPV6_IFACE_NODE); + install_element (LDP_IPV6_IFACE_NODE, &ldp_discovery_hello_holdtime_disc_time_cmd); + install_element (LDP_IPV6_IFACE_NODE, &ldp_discovery_hello_interval_disc_time_cmd); + install_element (LDP_IPV6_IFACE_NODE, &ldp_no_discovery_hello_holdtime_disc_time_cmd); + install_element (LDP_IPV6_IFACE_NODE, &ldp_no_discovery_hello_interval_disc_time_cmd); + install_node (&ldp_l2vpn_node, ldp_l2vpn_config_write); + install_default (LDP_L2VPN_NODE); + install_element (LDP_L2VPN_NODE, &ldp_bridge_ifname_cmd); + install_element (LDP_L2VPN_NODE, &ldp_mtu_mtu_cmd); + install_element (LDP_L2VPN_NODE, &ldp_member_interface_ifname_cmd); + install_element (LDP_L2VPN_NODE, &ldp_member_pseudowire_ifname_cmd); + install_element (LDP_L2VPN_NODE, &ldp_vc_type_pwtype_cmd); + install_element (LDP_L2VPN_NODE, &ldp_no_bridge_ifname_cmd); + install_element (LDP_L2VPN_NODE, &ldp_no_mtu_mtu_cmd); + install_element (LDP_L2VPN_NODE, &ldp_no_member_interface_ifname_cmd); + install_element (LDP_L2VPN_NODE, &ldp_no_member_pseudowire_ifname_cmd); + install_element (LDP_L2VPN_NODE, &ldp_no_vc_type_pwtype_cmd); + install_node (&ldp_pseudowire_node, NULL); + install_default (LDP_PSEUDOWIRE_NODE); + install_element (LDP_PSEUDOWIRE_NODE, &ldp_control_word_cword_cmd); + install_element (LDP_PSEUDOWIRE_NODE, &ldp_neighbor_address_addr_cmd); + install_element (LDP_PSEUDOWIRE_NODE, &ldp_neighbor_lsr_id_ipv4_cmd); + install_element (LDP_PSEUDOWIRE_NODE, &ldp_pw_id_pwid_cmd); + install_element (LDP_PSEUDOWIRE_NODE, &ldp_pw_status_disable_cmd); + install_element (LDP_PSEUDOWIRE_NODE, &ldp_no_control_word_cword_cmd); + install_element (LDP_PSEUDOWIRE_NODE, &ldp_no_neighbor_address_addr_cmd); + install_element (LDP_PSEUDOWIRE_NODE, &ldp_no_neighbor_lsr_id_ipv4_cmd); + install_element (LDP_PSEUDOWIRE_NODE, &ldp_no_pw_id_pwid_cmd); + install_element (LDP_PSEUDOWIRE_NODE, &ldp_no_pw_status_disable_cmd); + install_node (&ldp_debug_node, ldp_debug_config_write); + install_element (ENABLE_NODE, &ldp_debug_mpls_ldp_discovery_hello_dir_cmd); + install_element (ENABLE_NODE, &ldp_debug_mpls_ldp_errors_cmd); + install_element (ENABLE_NODE, &ldp_debug_mpls_ldp_event_cmd); + install_element (ENABLE_NODE, &ldp_debug_mpls_ldp_messages_recv_cmd); + install_element (ENABLE_NODE, &ldp_debug_mpls_ldp_messages_recv_all_cmd); + install_element (ENABLE_NODE, &ldp_debug_mpls_ldp_messages_sent_cmd); + install_element (ENABLE_NODE, &ldp_debug_mpls_ldp_messages_sent_all_cmd); + install_element (ENABLE_NODE, &ldp_debug_mpls_ldp_zebra_cmd); + install_element (ENABLE_NODE, &ldp_no_debug_mpls_ldp_discovery_hello_dir_cmd); + install_element (ENABLE_NODE, &ldp_no_debug_mpls_ldp_errors_cmd); + install_element (ENABLE_NODE, &ldp_no_debug_mpls_ldp_event_cmd); + install_element (ENABLE_NODE, &ldp_no_debug_mpls_ldp_messages_recv_cmd); + install_element (ENABLE_NODE, &ldp_no_debug_mpls_ldp_messages_recv_all_cmd); + install_element (ENABLE_NODE, &ldp_no_debug_mpls_ldp_messages_sent_cmd); + install_element (ENABLE_NODE, &ldp_no_debug_mpls_ldp_messages_sent_all_cmd); + install_element (ENABLE_NODE, &ldp_no_debug_mpls_ldp_zebra_cmd); + install_element (VIEW_NODE, &ldp_show_mpls_ldp_neighbor_cmd); + install_element (VIEW_NODE, &ldp_show_mpls_ldp_binding_cmd); + install_element (VIEW_NODE, &ldp_show_mpls_ldp_discovery_cmd); + install_element (VIEW_NODE, &ldp_show_mpls_ldp_interface_cmd); + install_element (VIEW_NODE, &ldp_show_mpls_ldp_address_family_binding_cmd); + install_element (VIEW_NODE, &ldp_show_mpls_ldp_address_family_discovery_cmd); + install_element (VIEW_NODE, &ldp_show_mpls_ldp_address_family_interface_cmd); + install_element (VIEW_NODE, &ldp_show_l2vpn_atom_binding_cmd); + install_element (VIEW_NODE, &ldp_show_l2vpn_atom_vc_cmd); + install_element (VIEW_NODE, &ldp_show_debugging_mpls_ldp_cmd); + install_element (VIEW_NODE, &ldp_clear_mpls_ldp_neighbor_cmd); + install_element (VIEW_NODE, &ldp_clear_mpls_ldp_neighbor_addr_cmd); +} \ No newline at end of file diff --git a/ldpd/ldp_vty_conf.c b/ldpd/ldp_vty_conf.c new file mode 100644 index 0000000000..e5acada180 --- /dev/null +++ b/ldpd/ldp_vty_conf.c @@ -0,0 +1,1803 @@ +/* + * Copyright (C) 2016 by Open Source Routing. + * + * This file is part of GNU Zebra. + * + * GNU Zebra is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * GNU Zebra is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU Zebra; see the file COPYING. If not, write to the Free + * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ + +#include + +#include "ldpd.h" +#include "ldpe.h" +#include "lde.h" +#include "log.h" + +#include "command.h" +#include "vrf.h" +#include "if.h" +#include "vty.h" +#include "ldp_vty.h" + +static int interface_config_write(struct vty *); +static void ldp_af_iface_config_write(struct vty *, int); +static void ldp_af_config_write(struct vty *, int, struct ldpd_conf *, + struct ldpd_af_conf *); +static void ldp_l2vpn_pw_config_write(struct vty *, struct l2vpn_pw *); +static int ldp_vty_get_af(struct vty *); +static int ldp_iface_is_configured(struct ldpd_conf *, const char *); +static int ldp_vty_nbr_session_holdtime(struct vty *, struct vty_arg *[]); +static int ldp_vty_af_session_holdtime(struct vty *, struct vty_arg *[]); + +static struct cmd_node interface_node = +{ + INTERFACE_NODE, + "%s(config-if)# ", + 1 +}; + +struct cmd_node ldp_node = +{ + LDP_NODE, + "%s(config-ldp)# ", + 1, +}; + +struct cmd_node ldp_ipv4_node = +{ + LDP_IPV4_NODE, + "%s(config-ldp-af)# ", + 1, +}; + +struct cmd_node ldp_ipv6_node = +{ + LDP_IPV6_NODE, + "%s(config-ldp-af)# ", + 1, +}; + +struct cmd_node ldp_ipv4_iface_node = +{ + LDP_IPV4_IFACE_NODE, + "%s(config-ldp-af-if)# ", + 1, +}; + +struct cmd_node ldp_ipv6_iface_node = +{ + LDP_IPV6_IFACE_NODE, + "%s(config-ldp-af-if)# ", + 1, +}; + +struct cmd_node ldp_l2vpn_node = +{ + LDP_L2VPN_NODE, + "%s(config-l2vpn)# ", + 1, +}; + +struct cmd_node ldp_pseudowire_node = +{ + LDP_PSEUDOWIRE_NODE, + "%s(config-l2vpn-pw)# ", + 1, +}; + +int +ldp_get_address(const char *str, int *af, union ldpd_addr *addr) +{ + memset(addr, 0, sizeof(*addr)); + + if (inet_pton(AF_INET, str, &addr->v4) == 1) { + *af = AF_INET; + return (0); + } + + if (inet_pton(AF_INET6, str, &addr->v6) == 1) { + *af = AF_INET6; + return (0); + } + + return (-1); +} + +static int +interface_config_write(struct vty *vty) +{ + struct listnode *node; + struct interface *ifp; + int write = 0; + + for (ALL_LIST_ELEMENTS_RO(vrf_iflist (VRF_DEFAULT), node, ifp)) { + vty_out(vty, "!%s", VTY_NEWLINE); + vty_out(vty, "interface %s%s", ifp->name, VTY_NEWLINE); + if (ifp->desc) + vty_out(vty, " description %s%s", ifp->desc, + VTY_NEWLINE); + + write++; + } + + return (write); +} + +static void +ldp_af_iface_config_write(struct vty *vty, int af) +{ + struct iface *iface; + struct iface_af *ia; + + LIST_FOREACH(iface, &ldpd_conf->iface_list, entry) { + ia = iface_af_get(iface, af); + if (!ia->enabled) + continue; + + vty_out(vty, " !%s", VTY_NEWLINE); + vty_out(vty, " interface %s%s", iface->name, VTY_NEWLINE); + + if (ia->hello_holdtime != LINK_DFLT_HOLDTIME && + ia->hello_holdtime != 0) + vty_out(vty, " discovery hello holdtime %u%s", + ia->hello_holdtime, VTY_NEWLINE); + if (ia->hello_interval != DEFAULT_HELLO_INTERVAL && + ia->hello_interval != 0) + vty_out(vty, " discovery hello interval %u%s", + ia->hello_interval, VTY_NEWLINE); + } +} + +static void +ldp_af_config_write(struct vty *vty, int af, struct ldpd_conf *conf, + struct ldpd_af_conf *af_conf) +{ + struct tnbr *tnbr; + + if (!(af_conf->flags & F_LDPD_AF_ENABLED)) + return; + + vty_out(vty, " !%s", VTY_NEWLINE); + vty_out(vty, " address-family %s%s", af_name(af), VTY_NEWLINE); + + if (af_conf->lhello_holdtime != LINK_DFLT_HOLDTIME && + af_conf->lhello_holdtime != 0 ) + vty_out(vty, " discovery hello holdtime %u%s", + af_conf->lhello_holdtime, VTY_NEWLINE); + if (af_conf->lhello_interval != DEFAULT_HELLO_INTERVAL && + af_conf->lhello_interval != 0) + vty_out(vty, " discovery hello interval %u%s", + af_conf->lhello_interval, VTY_NEWLINE); + + if (af_conf->flags & F_LDPD_AF_THELLO_ACCEPT) + vty_out(vty, " discovery targeted-hello accept%s", + VTY_NEWLINE); + + if (af_conf->thello_holdtime != TARGETED_DFLT_HOLDTIME && + af_conf->thello_holdtime != 0) + vty_out(vty, " discovery targeted-hello holdtime %u%s", + af_conf->thello_holdtime, VTY_NEWLINE); + if (af_conf->thello_interval != DEFAULT_HELLO_INTERVAL && + af_conf->thello_interval != 0) + vty_out(vty, " discovery targeted-hello interval %u%s", + af_conf->thello_interval, VTY_NEWLINE); + + if (ldp_addrisset(af, &af_conf->trans_addr)) + vty_out(vty, " discovery transport-address %s%s", + log_addr(af, &af_conf->trans_addr), VTY_NEWLINE); + else + vty_out(vty, " ! Incomplete config, specify a discovery " + "transport-address%s", VTY_NEWLINE); + + if (af_conf->flags & F_LDPD_AF_EXPNULL) + vty_out(vty, " label local advertise explicit-null%s", + VTY_NEWLINE); + + if (af_conf->flags & F_LDPD_AF_NO_GTSM) + vty_out(vty, " ttl-security disable%s", VTY_NEWLINE); + + if (af_conf->keepalive != DEFAULT_KEEPALIVE) + vty_out(vty, " session holdtime %u%s", af_conf->keepalive, + VTY_NEWLINE); + + LIST_FOREACH(tnbr, &ldpd_conf->tnbr_list, entry) { + if (tnbr->af == af) { + vty_out(vty, " !%s", VTY_NEWLINE); + vty_out(vty, " neighbor %s targeted%s", + log_addr(tnbr->af, &tnbr->addr), VTY_NEWLINE); + } + } + + ldp_af_iface_config_write(vty, af); + + vty_out(vty, " !%s", VTY_NEWLINE); +} + +int +ldp_config_write(struct vty *vty) +{ + struct nbr_params *nbrp; + + if (!(ldpd_conf->flags & F_LDPD_ENABLED)) + return (0); + + vty_out(vty, "mpls ldp%s", VTY_NEWLINE); + + if (ldpd_conf->rtr_id.s_addr != 0) + vty_out(vty, " router-id %s%s", + inet_ntoa(ldpd_conf->rtr_id), VTY_NEWLINE); + + if (ldpd_conf->lhello_holdtime != LINK_DFLT_HOLDTIME && + ldpd_conf->lhello_holdtime != 0) + vty_out(vty, " discovery hello holdtime %u%s", + ldpd_conf->lhello_holdtime, VTY_NEWLINE); + if (ldpd_conf->lhello_interval != DEFAULT_HELLO_INTERVAL && + ldpd_conf->lhello_interval != 0) + vty_out(vty, " discovery hello interval %u%s", + ldpd_conf->lhello_interval, VTY_NEWLINE); + + if (ldpd_conf->thello_holdtime != TARGETED_DFLT_HOLDTIME && + ldpd_conf->thello_holdtime != 0) + vty_out(vty, " discovery targeted-hello holdtime %u%s", + ldpd_conf->thello_holdtime, VTY_NEWLINE); + if (ldpd_conf->thello_interval != DEFAULT_HELLO_INTERVAL && + ldpd_conf->thello_interval != 0) + vty_out(vty, " discovery targeted-hello interval %u%s", + ldpd_conf->thello_interval, VTY_NEWLINE); + + if (ldpd_conf->trans_pref == DUAL_STACK_LDPOV4) + vty_out(vty, " dual-stack transport-connection prefer ipv4%s", + VTY_NEWLINE); + + if (ldpd_conf->flags & F_LDPD_DS_CISCO_INTEROP) + vty_out(vty, " dual-stack cisco-interop%s", VTY_NEWLINE); + + LIST_FOREACH(nbrp, &ldpd_conf->nbrp_list, entry) { + if (nbrp->flags & F_NBRP_KEEPALIVE) + vty_out(vty, " neighbor %s session holdtime %u%s", + inet_ntoa(nbrp->lsr_id), nbrp->keepalive, + VTY_NEWLINE); + + if (nbrp->flags & F_NBRP_GTSM) { + if (nbrp->gtsm_enabled) + vty_out(vty, " neighbor %s ttl-security hops " + "%u%s", inet_ntoa(nbrp->lsr_id), + nbrp->gtsm_hops, VTY_NEWLINE); + else + vty_out(vty, " neighbor %s ttl-security " + "disable%s", inet_ntoa(nbrp->lsr_id), + VTY_NEWLINE); + } + + if (nbrp->auth.method == AUTH_MD5SIG) + vty_out(vty, " neighbor %s password %s%s", + inet_ntoa(nbrp->lsr_id), nbrp->auth.md5key, + VTY_NEWLINE); + } + + ldp_af_config_write(vty, AF_INET, ldpd_conf, &ldpd_conf->ipv4); + ldp_af_config_write(vty, AF_INET6, ldpd_conf, &ldpd_conf->ipv6); + vty_out(vty, " !%s", VTY_NEWLINE); + vty_out(vty, "!%s", VTY_NEWLINE); + + return (1); +} + +static void +ldp_l2vpn_pw_config_write(struct vty *vty, struct l2vpn_pw *pw) +{ + int missing_lsrid = 0; + int missing_pwid = 0; + + vty_out(vty, " !%s", VTY_NEWLINE); + vty_out(vty, " member pseudowire %s%s", pw->ifname, VTY_NEWLINE); + + if (pw->lsr_id.s_addr != INADDR_ANY) + vty_out(vty, " neighbor lsr-id %s%s", inet_ntoa(pw->lsr_id), + VTY_NEWLINE); + else + missing_lsrid = 1; + + if (pw->flags & F_PW_STATIC_NBR_ADDR) + vty_out(vty, " neighbor address %s%s", log_addr(pw->af, + &pw->addr), VTY_NEWLINE); + + if (pw->pwid != 0) + vty_out(vty, " pw-id %u%s", pw->pwid, VTY_NEWLINE); + else + missing_pwid = 1; + + if (!(pw->flags & F_PW_CWORD_CONF)) + vty_out(vty, " control-word exclude%s", VTY_NEWLINE); + + if (!(pw->flags & F_PW_STATUSTLV_CONF)) + vty_out(vty, " pw-status disable%s", VTY_NEWLINE); + + if (missing_lsrid) + vty_out(vty, " ! Incomplete config, specify a neighbor " + "lsr-id%s", VTY_NEWLINE); + if (missing_pwid) + vty_out(vty, " ! Incomplete config, specify a pw-id%s", + VTY_NEWLINE); +} + +int +ldp_l2vpn_config_write(struct vty *vty) +{ + struct l2vpn *l2vpn; + struct l2vpn_if *lif; + struct l2vpn_pw *pw; + + LIST_FOREACH(l2vpn, &ldpd_conf->l2vpn_list, entry) { + vty_out(vty, "l2vpn %s type vpls%s", l2vpn->name, VTY_NEWLINE); + + if (l2vpn->pw_type != DEFAULT_PW_TYPE) + vty_out(vty, " vc type ethernet-tagged%s", VTY_NEWLINE); + + if (l2vpn->mtu != DEFAULT_L2VPN_MTU) + vty_out(vty, " mtu %u%s", l2vpn->mtu, VTY_NEWLINE); + + if (l2vpn->br_ifname[0] != '\0') + vty_out(vty, " bridge %s%s", l2vpn->br_ifname, + VTY_NEWLINE); + + LIST_FOREACH(lif, &l2vpn->if_list, entry) + vty_out(vty, " member interface %s%s", lif->ifname, + VTY_NEWLINE); + + LIST_FOREACH(pw, &l2vpn->pw_list, entry) + ldp_l2vpn_pw_config_write(vty, pw); + LIST_FOREACH(pw, &l2vpn->pw_inactive_list, entry) + ldp_l2vpn_pw_config_write(vty, pw); + + vty_out(vty, " !%s", VTY_NEWLINE); + vty_out(vty, "!%s", VTY_NEWLINE); + } + + return (0); +} + +static int +ldp_vty_get_af(struct vty *vty) +{ + switch (vty->node) { + case LDP_IPV4_NODE: + case LDP_IPV4_IFACE_NODE: + return (AF_INET); + case LDP_IPV6_NODE: + case LDP_IPV6_IFACE_NODE: + return (AF_INET6); + default: + fatalx("ldp_vty_get_af: unexpected node"); + } +} + +static int +ldp_iface_is_configured(struct ldpd_conf *xconf, const char *ifname) +{ + struct l2vpn *l2vpn; + + if (if_lookup_name(xconf, ifname)) + return (1); + + LIST_FOREACH(l2vpn, &xconf->l2vpn_list, entry) { + if (l2vpn_if_find_name(l2vpn, ifname)) + return (1); + if (l2vpn_pw_find_name(l2vpn, ifname)) + return (1); + } + + return (0); +} + +int +ldp_vty_mpls_ldp(struct vty *vty, struct vty_arg *args[]) +{ + struct ldpd_conf *vty_conf; + int disable; + + vty_conf = ldp_dup_config(ldpd_conf); + + disable = (vty_get_arg_value(args, "no")) ? 1 : 0; + + if (disable) + vty_conf->flags &= ~F_LDPD_ENABLED; + else { + vty->node = LDP_NODE; + vty_conf->flags |= F_LDPD_ENABLED; + } + + ldp_reload(vty_conf); + + return (CMD_SUCCESS); +} + +int +ldp_vty_address_family(struct vty *vty, struct vty_arg *args[]) +{ + struct ldpd_conf *vty_conf; + struct ldpd_af_conf *af_conf; + int af; + const char *af_str; + int disable; + + disable = (vty_get_arg_value(args, "no")) ? 1 : 0; + af_str = vty_get_arg_value(args, "address-family"); + + vty_conf = ldp_dup_config(ldpd_conf); + if (strcmp(af_str, "ipv4") == 0) { + af = AF_INET; + af_conf = &vty_conf->ipv4; + } else if (strcmp(af_str, "ipv6") == 0) { + af = AF_INET6; + af_conf = &vty_conf->ipv6; + } else + return (CMD_WARNING); + + if (disable) { + af_conf->flags &= ~F_LDPD_AF_ENABLED; + ldp_reload(vty_conf); + return (CMD_SUCCESS); + } + + switch (af) { + case AF_INET: + vty->node = LDP_IPV4_NODE; + break; + case AF_INET6: + vty->node = LDP_IPV6_NODE; + break; + default: + fatalx("ldp_vty_address_family: unknown af"); + } + af_conf->flags |= F_LDPD_AF_ENABLED; + + ldp_reload(vty_conf); + + return (CMD_SUCCESS); +} + +int +ldp_vty_disc_holdtime(struct vty *vty, struct vty_arg *args[]) +{ + struct ldpd_conf *vty_conf; + struct ldpd_af_conf *af_conf; + struct iface *iface; + struct iface_af *ia; + int af; + char *ep; + long int secs; + enum hello_type hello_type; + const char *seconds_str; + const char *hello_type_str; + int disable; + + disable = (vty_get_arg_value(args, "no")) ? 1 : 0; + seconds_str = vty_get_arg_value(args, "seconds"); + hello_type_str = vty_get_arg_value(args, "hello_type"); + + secs = strtol(seconds_str, &ep, 10); + if (*ep != '\0' || secs < MIN_HOLDTIME || secs > MAX_HOLDTIME) { + vty_out(vty, "%% Invalid holdtime%s", VTY_NEWLINE); + return (CMD_WARNING); + } + + if (hello_type_str[0] == 'h') + hello_type = HELLO_LINK; + else + hello_type = HELLO_TARGETED; + + switch (vty->node) { + case LDP_NODE: + vty_conf = ldp_dup_config(ldpd_conf); + if (disable) { + switch (hello_type) { + case HELLO_LINK: + vty_conf->lhello_holdtime = LINK_DFLT_HOLDTIME; + break; + case HELLO_TARGETED: + vty_conf->thello_holdtime = + TARGETED_DFLT_HOLDTIME; + break; + } + } else { + switch (hello_type) { + case HELLO_LINK: + vty_conf->lhello_holdtime = secs; + break; + case HELLO_TARGETED: + vty_conf->thello_holdtime = secs; + break; + } + } + ldp_reload(vty_conf); + break; + case LDP_IPV4_NODE: + case LDP_IPV6_NODE: + vty_conf = ldp_dup_config(ldpd_conf); + af = ldp_vty_get_af(vty); + af_conf = ldp_af_conf_get(vty_conf, af); + + if (disable) { + switch (hello_type) { + case HELLO_LINK: + af_conf->lhello_holdtime = 0; + break; + case HELLO_TARGETED: + af_conf->thello_holdtime = 0; + break; + } + } else { + switch (hello_type) { + case HELLO_LINK: + af_conf->lhello_holdtime = secs; + break; + case HELLO_TARGETED: + af_conf->thello_holdtime = secs; + break; + } + } + ldp_reload(vty_conf); + break; + case LDP_IPV4_IFACE_NODE: + case LDP_IPV6_IFACE_NODE: + af = ldp_vty_get_af(vty); + iface = VTY_GET_CONTEXT(iface); + vty_conf = ldp_dup_config_ref(ldpd_conf, (void **)&iface); + + ia = iface_af_get(iface, af); + if (disable) + ia->hello_holdtime = 0; + else + ia->hello_holdtime = secs; + ldp_reload_ref(vty_conf, (void **)&iface); + break; + default: + fatalx("ldp_vty_disc_holdtime: unexpected node"); + } + + return (CMD_SUCCESS); +} + +int +ldp_vty_disc_interval(struct vty *vty, struct vty_arg *args[]) +{ + struct ldpd_conf *vty_conf; + struct ldpd_af_conf *af_conf; + struct iface *iface; + struct iface_af *ia; + int af; + char *ep; + long int secs; + enum hello_type hello_type; + const char *seconds_str; + const char *hello_type_str; + int disable; + + disable = (vty_get_arg_value(args, "no")) ? 1 : 0; + seconds_str = vty_get_arg_value(args, "seconds"); + hello_type_str = vty_get_arg_value(args, "hello_type"); + + secs = strtol(seconds_str, &ep, 10); + if (*ep != '\0' || secs < MIN_HELLO_INTERVAL || + secs > MAX_HELLO_INTERVAL) { + vty_out(vty, "%% Invalid interval%s", VTY_NEWLINE); + return (CMD_WARNING); + } + + if (hello_type_str[0] == 'h') + hello_type = HELLO_LINK; + else + hello_type = HELLO_TARGETED; + + switch (vty->node) { + case LDP_NODE: + vty_conf = ldp_dup_config(ldpd_conf); + if (disable) { + switch (hello_type) { + case HELLO_LINK: + vty_conf->lhello_interval = LINK_DFLT_HOLDTIME; + break; + case HELLO_TARGETED: + vty_conf->thello_interval = + TARGETED_DFLT_HOLDTIME; + break; + } + } else { + switch (hello_type) { + case HELLO_LINK: + vty_conf->lhello_interval = secs; + break; + case HELLO_TARGETED: + vty_conf->thello_interval = secs; + break; + } + } + ldp_reload(vty_conf); + break; + case LDP_IPV4_NODE: + case LDP_IPV6_NODE: + vty_conf = ldp_dup_config(ldpd_conf); + af = ldp_vty_get_af(vty); + af_conf = ldp_af_conf_get(vty_conf, af); + + if (disable) { + switch (hello_type) { + case HELLO_LINK: + af_conf->lhello_interval = 0; + break; + case HELLO_TARGETED: + af_conf->thello_interval = 0; + break; + } + } else { + switch (hello_type) { + case HELLO_LINK: + af_conf->lhello_interval = secs; + break; + case HELLO_TARGETED: + af_conf->thello_interval = secs; + break; + } + } + ldp_reload(vty_conf); + break; + case LDP_IPV4_IFACE_NODE: + case LDP_IPV6_IFACE_NODE: + af = ldp_vty_get_af(vty); + iface = VTY_GET_CONTEXT(iface); + vty_conf = ldp_dup_config_ref(ldpd_conf, (void **)&iface); + + ia = iface_af_get(iface, af); + if (disable) + ia->hello_interval = 0; + else + ia->hello_interval = secs; + ldp_reload_ref(vty_conf, (void **)&iface); + break; + default: + fatalx("ldp_vty_disc_interval: unexpected node"); + } + + return (CMD_SUCCESS); +} + +int +ldp_vty_targeted_hello_accept(struct vty *vty, struct vty_arg *args[]) +{ + struct ldpd_conf *vty_conf; + struct ldpd_af_conf *af_conf; + int af; + int disable; + + vty_conf = ldp_dup_config(ldpd_conf); + + disable = (vty_get_arg_value(args, "no")) ? 1 : 0; + + af = ldp_vty_get_af(vty); + af_conf = ldp_af_conf_get(vty_conf, af); + + if (disable) + af_conf->flags &= ~F_LDPD_AF_THELLO_ACCEPT; + else + af_conf->flags |= F_LDPD_AF_THELLO_ACCEPT; + + ldp_reload(vty_conf); + + return (CMD_SUCCESS); +} + +static int +ldp_vty_nbr_session_holdtime(struct vty *vty, struct vty_arg *args[]) +{ + struct ldpd_conf *vty_conf; + char *ep; + long int secs; + struct in_addr lsr_id; + struct nbr_params *nbrp; + const char *seconds_str; + const char *lsr_id_str; + int disable; + + disable = (vty_get_arg_value(args, "no")) ? 1 : 0; + seconds_str = vty_get_arg_value(args, "seconds"); + lsr_id_str = vty_get_arg_value(args, "lsr_id"); + + if (inet_pton(AF_INET, lsr_id_str, &lsr_id) != 1 || + bad_addr_v4(lsr_id)) { + vty_out(vty, "%% Malformed address%s", VTY_NEWLINE); + return (CMD_WARNING); + } + + vty_conf = ldp_dup_config(ldpd_conf); + nbrp = nbr_params_find(vty_conf, lsr_id); + + secs = strtol(seconds_str, &ep, 10); + if (*ep != '\0' || secs < MIN_KEEPALIVE || secs > MAX_KEEPALIVE) { + vty_out(vty, "%% Invalid holdtime%s", VTY_NEWLINE); + goto cancel; + } + + if (disable) { + if (nbrp == NULL) + goto cancel; + + nbrp->keepalive = 0; + nbrp->flags &= ~F_NBRP_KEEPALIVE; + } else { + if (nbrp == NULL) { + nbrp = nbr_params_new(lsr_id); + LIST_INSERT_HEAD(&vty_conf->nbrp_list, nbrp, entry); + } else if (nbrp->keepalive == secs) + goto cancel; + + nbrp->keepalive = secs; + nbrp->flags |= F_NBRP_KEEPALIVE; + } + + ldp_reload(vty_conf); + + return (CMD_SUCCESS); + +cancel: + ldp_clear_config(vty_conf); + return (CMD_SUCCESS); +} + +static int +ldp_vty_af_session_holdtime(struct vty *vty, struct vty_arg *args[]) +{ + struct ldpd_conf *vty_conf; + struct ldpd_af_conf *af_conf; + int af; + char *ep; + long int secs; + const char *seconds_str; + int disable; + + disable = (vty_get_arg_value(args, "no")) ? 1 : 0; + seconds_str = vty_get_arg_value(args, "seconds"); + + secs = strtol(seconds_str, &ep, 10); + if (*ep != '\0' || secs < MIN_KEEPALIVE || secs > MAX_KEEPALIVE) { + vty_out(vty, "%% Invalid holdtime%s", VTY_NEWLINE); + return (CMD_SUCCESS); + } + + vty_conf = ldp_dup_config(ldpd_conf); + af = ldp_vty_get_af(vty); + af_conf = ldp_af_conf_get(vty_conf, af); + + if (disable) + af_conf->keepalive = DEFAULT_KEEPALIVE; + else + af_conf->keepalive = secs; + + ldp_reload(vty_conf); + + return (CMD_SUCCESS); +} + +int +ldp_vty_session_holdtime(struct vty *vty, struct vty_arg *args[]) +{ + switch (vty->node) { + case LDP_NODE: + return (ldp_vty_nbr_session_holdtime(vty, args)); + case LDP_IPV4_NODE: + case LDP_IPV6_NODE: + return (ldp_vty_af_session_holdtime(vty, args)); + default: + fatalx("ldp_vty_session_holdtime: unexpected node"); + } +} + +int +ldp_vty_interface(struct vty *vty, struct vty_arg *args[]) +{ + struct ldpd_conf *vty_conf; + int af; + struct iface *iface; + struct iface_af *ia; + struct interface *ifp; + struct kif kif; + const char *ifname; + int disable; + + disable = (vty_get_arg_value(args, "no")) ? 1 : 0; + ifname = vty_get_arg_value(args, "ifname"); + + vty_conf = ldp_dup_config(ldpd_conf); + af = ldp_vty_get_af(vty); + iface = if_lookup_name(vty_conf, ifname); + + if (disable) { + if (iface == NULL) + goto cancel; + + ia = iface_af_get(iface, af); + if (ia->enabled == 0) + goto cancel; + + ia->enabled = 0; + ia->hello_holdtime = 0; + ia->hello_interval = 0; + ldp_reload(vty_conf); + return (CMD_SUCCESS); + } + + if (iface == NULL) { + if (ldp_iface_is_configured(vty_conf, ifname)) { + vty_out(vty, "%% Interface is already in use%s", + VTY_NEWLINE); + goto cancel; + } + + ifp = if_lookup_by_name(ifname); + memset(&kif, 0, sizeof(kif)); + strlcpy(kif.ifname, ifname, sizeof(kif.ifname)); + if (ifp) { + kif.ifindex = ifp->ifindex; + kif.flags = ifp->flags; + } + iface = if_new(&kif); + + ia = iface_af_get(iface, af); + ia->enabled = 1; + LIST_INSERT_HEAD(&vty_conf->iface_list, iface, entry); + ldp_reload_ref(vty_conf, (void **)&iface); + } else { + memset(&kif, 0, sizeof(kif)); + strlcpy(kif.ifname, ifname, sizeof(kif.ifname)); + + ia = iface_af_get(iface, af); + if (!ia->enabled) { + ia->enabled = 1; + ldp_reload_ref(vty_conf, (void **)&iface); + } + } + + switch (af) { + case AF_INET: + VTY_PUSH_CONTEXT(LDP_IPV4_IFACE_NODE, iface); + break; + case AF_INET6: + VTY_PUSH_CONTEXT(LDP_IPV6_IFACE_NODE, iface); + break; + default: + break; + } + + return (CMD_SUCCESS); + +cancel: + ldp_clear_config(vty_conf); + return (CMD_SUCCESS); +} + +int +ldp_vty_trans_addr(struct vty *vty, struct vty_arg *args[]) +{ + struct ldpd_conf *vty_conf; + struct ldpd_af_conf *af_conf; + int af; + const char *addr_str; + int disable; + + disable = (vty_get_arg_value(args, "no")) ? 1 : 0; + addr_str = vty_get_arg_value(args, "addr"); + + vty_conf = ldp_dup_config(ldpd_conf); + af = ldp_vty_get_af(vty); + af_conf = ldp_af_conf_get(vty_conf, af); + + if (disable) + memset(&af_conf->trans_addr, 0, sizeof(af_conf->trans_addr)); + else { + if (inet_pton(af, addr_str, &af_conf->trans_addr) != 1 || + bad_addr(af, &af_conf->trans_addr)) { + vty_out(vty, "%% Malformed address%s", VTY_NEWLINE); + goto cancel; + } + } + + ldp_reload(vty_conf); + + return (CMD_SUCCESS); + +cancel: + ldp_clear_config(vty_conf); + return (CMD_SUCCESS); +} + +int +ldp_vty_neighbor_targeted(struct vty *vty, struct vty_arg *args[]) +{ + struct ldpd_conf *vty_conf; + int af; + union ldpd_addr addr; + struct tnbr *tnbr; + const char *addr_str; + int disable; + + disable = (vty_get_arg_value(args, "no")) ? 1 : 0; + addr_str = vty_get_arg_value(args, "addr"); + + af = ldp_vty_get_af(vty); + + if (inet_pton(af, addr_str, &addr) != 1 || + bad_addr(af, &addr)) { + vty_out(vty, "%% Malformed address%s", VTY_NEWLINE); + return (CMD_WARNING); + } + if (af == AF_INET6 && IN6_IS_SCOPE_EMBED(&addr.v6)) { + vty_out(vty, "%% Address can not be link-local%s", VTY_NEWLINE); + return (CMD_WARNING); + } + + vty_conf = ldp_dup_config(ldpd_conf); + tnbr = tnbr_find(vty_conf, af, &addr); + + if (disable) { + if (tnbr == NULL) + goto cancel; + + LIST_REMOVE(tnbr, entry); + free(tnbr); + ldp_reload(vty_conf); + return (CMD_SUCCESS); + } + + if (tnbr) + goto cancel; + + tnbr = tnbr_new(af, &addr); + tnbr->flags |= F_TNBR_CONFIGURED; + LIST_INSERT_HEAD(&vty_conf->tnbr_list, tnbr, entry); + + ldp_reload(vty_conf); + + return (CMD_SUCCESS); + +cancel: + ldp_clear_config(vty_conf); + return (CMD_SUCCESS); +} + +int +ldp_vty_explicit_null(struct vty *vty, struct vty_arg *args[]) +{ + struct ldpd_conf *vty_conf; + struct ldpd_af_conf *af_conf; + int af; + int disable; + + disable = (vty_get_arg_value(args, "no")) ? 1 : 0; + + vty_conf = ldp_dup_config(ldpd_conf); + af = ldp_vty_get_af(vty); + af_conf = ldp_af_conf_get(vty_conf, af); + + if (disable) + af_conf->flags &= ~F_LDPD_AF_EXPNULL; + else + af_conf->flags |= F_LDPD_AF_EXPNULL; + + ldp_reload(vty_conf); + + return (CMD_SUCCESS); +} + +int +ldp_vty_ttl_security(struct vty *vty, struct vty_arg *args[]) +{ + struct ldpd_conf *vty_conf; + struct ldpd_af_conf *af_conf; + int af; + int disable; + + disable = (vty_get_arg_value(args, "no")) ? 1 : 0; + + vty_conf = ldp_dup_config(ldpd_conf); + af = ldp_vty_get_af(vty); + af_conf = ldp_af_conf_get(vty_conf, af); + + if (disable) + af_conf->flags &= ~F_LDPD_AF_NO_GTSM; + else + af_conf->flags |= F_LDPD_AF_NO_GTSM; + + ldp_reload(vty_conf); + + return (CMD_SUCCESS); +} + +int +ldp_vty_router_id(struct vty *vty, struct vty_arg *args[]) +{ + struct ldpd_conf *vty_conf; + const char *addr_str; + int disable; + + disable = (vty_get_arg_value(args, "no")) ? 1 : 0; + addr_str = vty_get_arg_value(args, "addr"); + + vty_conf = ldp_dup_config(ldpd_conf); + + if (disable) + vty_conf->rtr_id.s_addr = INADDR_ANY; + else { + if (inet_pton(AF_INET, addr_str, &vty_conf->rtr_id) != 1 || + bad_addr_v4(vty_conf->rtr_id)) { + vty_out(vty, "%% Malformed address%s", VTY_NEWLINE); + goto cancel; + } + } + + ldp_reload(vty_conf); + + return (CMD_SUCCESS); + +cancel: + ldp_clear_config(vty_conf); + return (CMD_SUCCESS); +} + +int +ldp_vty_ds_cisco_interop(struct vty *vty, struct vty_arg *args[]) +{ + struct ldpd_conf *vty_conf; + int disable; + + disable = (vty_get_arg_value(args, "no")) ? 1 : 0; + + vty_conf = ldp_dup_config(ldpd_conf); + + if (disable) + vty_conf->flags &= ~F_LDPD_DS_CISCO_INTEROP; + else + vty_conf->flags |= F_LDPD_DS_CISCO_INTEROP; + + ldp_reload(vty_conf); + + return (CMD_SUCCESS); +} + +int +ldp_vty_trans_pref_ipv4(struct vty *vty, struct vty_arg *args[]) +{ + struct ldpd_conf *vty_conf; + int disable; + + disable = (vty_get_arg_value(args, "no")) ? 1 : 0; + + vty_conf = ldp_dup_config(ldpd_conf); + + if (disable) + vty_conf->trans_pref = DUAL_STACK_LDPOV6; + else + vty_conf->trans_pref = DUAL_STACK_LDPOV4; + + ldp_reload(vty_conf); + + return (CMD_SUCCESS); +} + +int +ldp_vty_neighbor_password(struct vty *vty, struct vty_arg *args[]) +{ + struct ldpd_conf *vty_conf; + struct in_addr lsr_id; + size_t password_len; + struct nbr_params *nbrp; + const char *lsr_id_str; + const char *password_str; + int disable; + + disable = (vty_get_arg_value(args, "no")) ? 1 : 0; + lsr_id_str = vty_get_arg_value(args, "lsr_id"); + password_str = vty_get_arg_value(args, "password"); + + if (inet_pton(AF_INET, lsr_id_str, &lsr_id) != 1 || + bad_addr_v4(lsr_id)) { + vty_out(vty, "%% Malformed address%s", VTY_NEWLINE); + return (CMD_WARNING); + } + + vty_conf = ldp_dup_config(ldpd_conf); + nbrp = nbr_params_find(vty_conf, lsr_id); + + if (disable) { + if (nbrp == NULL) + goto cancel; + + memset(&nbrp->auth, 0, sizeof(nbrp->auth)); + nbrp->auth.method = AUTH_NONE; + } else { + if (nbrp == NULL) { + nbrp = nbr_params_new(lsr_id); + LIST_INSERT_HEAD(&vty_conf->nbrp_list, nbrp, entry); + } else if (nbrp->auth.method == AUTH_MD5SIG && + strcmp(nbrp->auth.md5key, password_str) == 0) + goto cancel; + + password_len = strlcpy(nbrp->auth.md5key, password_str, + sizeof(nbrp->auth.md5key)); + if (password_len >= sizeof(nbrp->auth.md5key)) + vty_out(vty, "%% password has been truncated to %zu " + "characters.", sizeof(nbrp->auth.md5key) - 1); + nbrp->auth.md5key_len = password_len; + nbrp->auth.method = AUTH_MD5SIG; + } + + ldp_reload(vty_conf); + + return (CMD_SUCCESS); + +cancel: + ldp_clear_config(vty_conf); + return (CMD_SUCCESS); +} + +int +ldp_vty_neighbor_ttl_security(struct vty *vty, struct vty_arg *args[]) +{ + struct ldpd_conf *vty_conf; + struct in_addr lsr_id; + struct nbr_params *nbrp; + long int hops = 0; + char *ep; + const char *lsr_id_str; + const char *hops_str; + int disable; + + disable = (vty_get_arg_value(args, "no")) ? 1 : 0; + lsr_id_str = vty_get_arg_value(args, "lsr_id"); + hops_str = vty_get_arg_value(args, "hops"); + + if (inet_pton(AF_INET, lsr_id_str, &lsr_id) != 1 || + bad_addr_v4(lsr_id)) { + vty_out(vty, "%% Malformed address%s", VTY_NEWLINE); + return (CMD_WARNING); + } + + if (hops_str) { + hops = strtol(hops_str, &ep, 10); + if (*ep != '\0' || hops < 1 || hops > 254) { + vty_out(vty, "%% Invalid hop count%s", VTY_NEWLINE); + return (CMD_SUCCESS); + } + } + + vty_conf = ldp_dup_config(ldpd_conf); + nbrp = nbr_params_find(vty_conf, lsr_id); + + if (disable) { + if (nbrp == NULL) + goto cancel; + + nbrp->flags &= ~(F_NBRP_GTSM|F_NBRP_GTSM_HOPS); + nbrp->gtsm_enabled = 0; + nbrp->gtsm_hops = 0; + } else { + if (nbrp == NULL) { + nbrp = nbr_params_new(lsr_id); + LIST_INSERT_HEAD(&vty_conf->nbrp_list, nbrp, entry); + } + + nbrp->flags |= F_NBRP_GTSM; + nbrp->flags &= ~F_NBRP_GTSM_HOPS; + if (hops_str) { + nbrp->gtsm_enabled = 1; + nbrp->gtsm_hops = hops; + nbrp->flags |= F_NBRP_GTSM_HOPS; + } else + nbrp->gtsm_enabled = 0; + } + + ldp_reload(vty_conf); + + return (CMD_SUCCESS); + +cancel: + ldp_clear_config(vty_conf); + return (CMD_SUCCESS); +} + +int +ldp_vty_l2vpn(struct vty *vty, struct vty_arg *args[]) +{ + struct ldpd_conf *vty_conf; + struct l2vpn *l2vpn; + const char *name_str; + int disable; + + disable = (vty_get_arg_value(args, "no")) ? 1 : 0; + name_str = vty_get_arg_value(args, "name"); + + vty_conf = ldp_dup_config(ldpd_conf); + l2vpn = l2vpn_find(vty_conf, name_str); + + if (disable) { + if (l2vpn == NULL) + goto cancel; + + LIST_REMOVE(l2vpn, entry); + l2vpn_del(l2vpn); + ldp_reload(vty_conf); + return (CMD_SUCCESS); + } + + if (l2vpn) { + VTY_PUSH_CONTEXT(LDP_L2VPN_NODE, l2vpn); + goto cancel; + } + + l2vpn = l2vpn_new(name_str); + l2vpn->type = L2VPN_TYPE_VPLS; + LIST_INSERT_HEAD(&vty_conf->l2vpn_list, l2vpn, entry); + + ldp_reload(vty_conf); + VTY_PUSH_CONTEXT(LDP_L2VPN_NODE, l2vpn); + + return (CMD_SUCCESS); + +cancel: + ldp_clear_config(vty_conf); + return (CMD_SUCCESS); +} + +int +ldp_vty_l2vpn_bridge(struct vty *vty, struct vty_arg *args[]) +{ + struct ldpd_conf *vty_conf; + struct l2vpn *l2vpn; + const char *ifname; + int disable; + + disable = (vty_get_arg_value(args, "no")) ? 1 : 0; + ifname = vty_get_arg_value(args, "ifname"); + + l2vpn = VTY_GET_CONTEXT(l2vpn); + vty_conf = ldp_dup_config_ref(ldpd_conf, (void **)&l2vpn); + + if (disable) + memset(l2vpn->br_ifname, 0, sizeof(l2vpn->br_ifname)); + else + strlcpy(l2vpn->br_ifname, ifname, sizeof(l2vpn->br_ifname)); + + ldp_reload_ref(vty_conf, (void **)&l2vpn); + + return (CMD_SUCCESS); +} + +int +ldp_vty_l2vpn_mtu(struct vty *vty, struct vty_arg *args[]) +{ + struct ldpd_conf *vty_conf; + struct l2vpn *l2vpn; + char *ep; + int mtu; + const char *mtu_str; + int disable; + + disable = (vty_get_arg_value(args, "no")) ? 1 : 0; + mtu_str = vty_get_arg_value(args, "mtu"); + + mtu = strtol(mtu_str, &ep, 10); + if (*ep != '\0' || mtu < MIN_L2VPN_MTU || mtu > MAX_L2VPN_MTU) { + vty_out(vty, "%% Invalid MTU%s", VTY_NEWLINE); + return (CMD_WARNING); + } + + l2vpn = VTY_GET_CONTEXT(l2vpn); + vty_conf = ldp_dup_config_ref(ldpd_conf, (void **)&l2vpn); + + if (disable) + l2vpn->mtu = DEFAULT_L2VPN_MTU; + else + l2vpn->mtu = mtu; + + ldp_reload_ref(vty_conf, (void **)&l2vpn); + + return (CMD_SUCCESS); +} + +int +ldp_vty_l2vpn_pwtype(struct vty *vty, struct vty_arg *args[]) +{ + struct ldpd_conf *vty_conf; + struct l2vpn *l2vpn; + int pw_type; + const char *type_str; + int disable; + + disable = (vty_get_arg_value(args, "no")) ? 1 : 0; + type_str = vty_get_arg_value(args, "type"); + + if (strcmp(type_str, "ethernet") == 0) + pw_type = PW_TYPE_ETHERNET; + else + pw_type = PW_TYPE_ETHERNET_TAGGED; + + l2vpn = VTY_GET_CONTEXT(l2vpn); + vty_conf = ldp_dup_config_ref(ldpd_conf, (void **)&l2vpn); + + if (disable) + l2vpn->pw_type = DEFAULT_PW_TYPE; + else + l2vpn->pw_type = pw_type; + + ldp_reload_ref(vty_conf, (void **)&l2vpn); + + return (CMD_SUCCESS); +} + +int +ldp_vty_l2vpn_interface(struct vty *vty, struct vty_arg *args[]) +{ + struct ldpd_conf *vty_conf; + struct l2vpn *l2vpn; + struct l2vpn_if *lif; + struct interface *ifp; + struct kif kif; + const char *ifname; + int disable; + + disable = (vty_get_arg_value(args, "no")) ? 1 : 0; + ifname = vty_get_arg_value(args, "ifname"); + + l2vpn = VTY_GET_CONTEXT(l2vpn); + vty_conf = ldp_dup_config_ref(ldpd_conf, (void **)&l2vpn); + l2vpn = l2vpn_find(vty_conf, l2vpn->name); + lif = l2vpn_if_find_name(l2vpn, ifname); + + if (disable) { + if (lif == NULL) + goto cancel; + + LIST_REMOVE(lif, entry); + free(lif); + ldp_reload(vty_conf); + return (CMD_SUCCESS); + } + + if (lif) + goto cancel; + + if (ldp_iface_is_configured(vty_conf, ifname)) { + vty_out(vty, "%% Interface is already in use%s", VTY_NEWLINE); + goto cancel; + } + + ifp = if_lookup_by_name(ifname); + memset(&kif, 0, sizeof(kif)); + strlcpy(kif.ifname, ifname, sizeof(kif.ifname)); + if (ifp) { + kif.ifindex = ifp->ifindex; + kif.flags = ifp->flags; + } + + lif = l2vpn_if_new(l2vpn, &kif); + LIST_INSERT_HEAD(&l2vpn->if_list, lif, entry); + + ldp_reload_ref(vty_conf, (void **)&l2vpn); + + return (CMD_SUCCESS); + +cancel: + ldp_clear_config(vty_conf); + return (CMD_SUCCESS); +} + +int +ldp_vty_l2vpn_pseudowire(struct vty *vty, struct vty_arg *args[]) +{ + struct ldpd_conf *vty_conf; + struct l2vpn *l2vpn; + struct l2vpn_pw *pw; + struct interface *ifp; + struct kif kif; + const char *ifname; + int disable; + + disable = (vty_get_arg_value(args, "no")) ? 1 : 0; + ifname = vty_get_arg_value(args, "ifname"); + + l2vpn = VTY_GET_CONTEXT(l2vpn); + vty_conf = ldp_dup_config_ref(ldpd_conf, (void **)&l2vpn); + pw = l2vpn_pw_find_name(l2vpn, ifname); + + if (disable) { + if (pw == NULL) + goto cancel; + + LIST_REMOVE(pw, entry); + free(pw); + ldp_reload(vty_conf); + return (CMD_SUCCESS); + } + + if (pw) { + VTY_PUSH_CONTEXT(LDP_PSEUDOWIRE_NODE, pw); + goto cancel; + } + + if (ldp_iface_is_configured(vty_conf, ifname)) { + vty_out(vty, "%% Interface is already in use%s", VTY_NEWLINE); + goto cancel; + } + + ifp = if_lookup_by_name(ifname); + memset(&kif, 0, sizeof(kif)); + strlcpy(kif.ifname, ifname, sizeof(kif.ifname)); + if (ifp) { + kif.ifindex = ifp->ifindex; + kif.flags = ifp->flags; + } + + pw = l2vpn_pw_new(l2vpn, &kif); + pw->flags = F_PW_STATUSTLV_CONF|F_PW_CWORD_CONF; + LIST_INSERT_HEAD(&l2vpn->pw_inactive_list, pw, entry); + + ldp_reload_ref(vty_conf, (void **)&pw); + VTY_PUSH_CONTEXT(LDP_PSEUDOWIRE_NODE, pw); + + return (CMD_SUCCESS); + +cancel: + ldp_clear_config(vty_conf); + return (CMD_SUCCESS); +} + +int +ldp_vty_l2vpn_pw_cword(struct vty *vty, struct vty_arg *args[]) +{ + struct ldpd_conf *vty_conf; + struct l2vpn_pw *pw; + const char *preference_str; + int disable; + + disable = (vty_get_arg_value(args, "no")) ? 1 : 0; + preference_str = vty_get_arg_value(args, "preference"); + + pw = VTY_GET_CONTEXT(l2vpn_pw); + vty_conf = ldp_dup_config_ref(ldpd_conf, (void **)&pw); + + if (disable) + pw->flags |= F_PW_CWORD_CONF; + else { + if (preference_str[0] == 'e') + pw->flags &= ~F_PW_CWORD_CONF; + else + pw->flags |= F_PW_CWORD_CONF; + } + + ldp_reload_ref(vty_conf, (void **)&pw); + + return (CMD_SUCCESS); +} + +int +ldp_vty_l2vpn_pw_nbr_addr(struct vty *vty, struct vty_arg *args[]) +{ + struct ldpd_conf *vty_conf; + struct l2vpn_pw *pw; + int af; + union ldpd_addr addr; + const char *addr_str; + int disable; + + disable = (vty_get_arg_value(args, "no")) ? 1 : 0; + addr_str = vty_get_arg_value(args, "addr"); + + if (ldp_get_address(addr_str, &af, &addr) == -1 || + bad_addr(af, &addr)) { + vty_out(vty, "%% Malformed address%s", VTY_NEWLINE); + return (CMD_WARNING); + } + + pw = VTY_GET_CONTEXT(l2vpn_pw); + vty_conf = ldp_dup_config_ref(ldpd_conf, (void **)&pw); + + if (disable) { + pw->af = AF_UNSPEC; + memset(&pw->addr, 0, sizeof(pw->addr)); + pw->flags &= ~F_PW_STATIC_NBR_ADDR; + } else { + pw->af = af; + pw->addr = addr; + pw->flags |= F_PW_STATIC_NBR_ADDR; + } + + ldp_reload_ref(vty_conf, (void **)&pw); + + return (CMD_SUCCESS); +} + +int +ldp_vty_l2vpn_pw_nbr_id(struct vty *vty, struct vty_arg *args[]) +{ + struct ldpd_conf *vty_conf; + struct l2vpn_pw *pw; + struct in_addr lsr_id; + const char *lsr_id_str; + int disable; + + disable = (vty_get_arg_value(args, "no")) ? 1 : 0; + lsr_id_str = vty_get_arg_value(args, "lsr-id"); + + if (inet_pton(AF_INET, lsr_id_str, &lsr_id) != 1 || + bad_addr_v4(lsr_id)) { + vty_out(vty, "%% Malformed address%s", VTY_NEWLINE); + return (CMD_WARNING); + } + + pw = VTY_GET_CONTEXT(l2vpn_pw); + vty_conf = ldp_dup_config_ref(ldpd_conf, (void **)&pw); + + if (disable) + pw->lsr_id.s_addr = INADDR_ANY; + else + pw->lsr_id = lsr_id; + + ldp_reload_ref(vty_conf, (void **)&pw); + + return (CMD_SUCCESS); +} + +int +ldp_vty_l2vpn_pw_pwid(struct vty *vty, struct vty_arg *args[]) +{ + struct ldpd_conf *vty_conf; + struct l2vpn_pw *pw; + char *ep; + uint32_t pwid; + const char *pwid_str; + int disable; + + disable = (vty_get_arg_value(args, "no")) ? 1 : 0; + pwid_str = vty_get_arg_value(args, "pwid"); + + pwid = strtol(pwid_str, &ep, 10); + if (*ep != '\0' || pwid < MIN_PWID_ID || pwid > MAX_PWID_ID) { + vty_out(vty, "%% Invalid pw-id%s", VTY_NEWLINE); + return (CMD_WARNING); + } + + pw = VTY_GET_CONTEXT(l2vpn_pw); + vty_conf = ldp_dup_config_ref(ldpd_conf, (void **)&pw); + + if (disable) + pw->pwid = 0; + else + pw->pwid = pwid; + + ldp_reload_ref(vty_conf, (void **)&pw); + + return (CMD_SUCCESS); +} + +int +ldp_vty_l2vpn_pw_pwstatus(struct vty *vty, struct vty_arg *args[]) +{ + struct ldpd_conf *vty_conf; + struct l2vpn_pw *pw; + int disable; + + disable = (vty_get_arg_value(args, "no")) ? 1 : 0; + + pw = VTY_GET_CONTEXT(l2vpn_pw); + vty_conf = ldp_dup_config_ref(ldpd_conf, (void **)&pw); + + if (disable) + pw->flags |= F_PW_STATUSTLV_CONF; + else + pw->flags &= ~F_PW_STATUSTLV_CONF; + + ldp_reload_ref(vty_conf, (void **)&pw); + + return (CMD_SUCCESS); +} + +void +ldp_vty_if_init(void) +{ + /* Install interface node. */ + install_node (&interface_node, interface_config_write); + + install_element(CONFIG_NODE, &interface_cmd); + install_element(CONFIG_NODE, &no_interface_cmd); + install_default(INTERFACE_NODE); + + /* "description" commands. */ + install_element(INTERFACE_NODE, &interface_desc_cmd); + install_element(INTERFACE_NODE, &no_interface_desc_cmd); +} + +struct iface * +iface_new_api(struct ldpd_conf *conf, const char *name) +{ + const char *ifname = name; + struct iface *iface; + struct interface *ifp; + struct kif kif; + + if (ldp_iface_is_configured(conf, ifname)) + return NULL; + + memset(&kif, 0, sizeof(kif)); + strlcpy(kif.ifname, ifname, sizeof(kif.ifname)); + ifp = if_lookup_by_name(ifname); + if (ifp) { + kif.ifindex = ifp->ifindex; + kif.flags = ifp->flags; + } + + iface = if_new(&kif); + LIST_INSERT_HEAD(&conf->iface_list, iface, entry); + return (iface); +} + +void +iface_del_api(struct iface *iface) +{ + LIST_REMOVE(iface, entry); + free(iface); +} + +struct tnbr * +tnbr_new_api(struct ldpd_conf *conf, int af, union ldpd_addr *addr) +{ + struct tnbr *tnbr; + + if (af == AF_INET6 && IN6_IS_SCOPE_EMBED(&addr->v6)) + return (NULL); + + if (tnbr_find(conf, af, addr)) + return (NULL); + + tnbr = tnbr_new(af, addr); + tnbr->flags |= F_TNBR_CONFIGURED; + LIST_INSERT_HEAD(&conf->tnbr_list, tnbr, entry); + return (tnbr); +} + +void +tnbr_del_api(struct tnbr *tnbr) +{ + LIST_REMOVE(tnbr, entry); + free(tnbr); +} + +struct nbr_params * +nbrp_new_api(struct ldpd_conf *conf, struct in_addr lsr_id) +{ + struct nbr_params *nbrp; + + if (nbr_params_find(conf, lsr_id)) + return (NULL); + + nbrp = nbr_params_new(lsr_id); + LIST_INSERT_HEAD(&conf->nbrp_list, nbrp, entry); + return (nbrp); +} + +void +nbrp_del_api(struct nbr_params *nbrp) +{ + LIST_REMOVE(nbrp, entry); + free(nbrp); +} + +struct l2vpn * +l2vpn_new_api(struct ldpd_conf *conf, const char *name) +{ + struct l2vpn *l2vpn; + + if (l2vpn_find(conf, name)) + return (NULL); + + l2vpn = l2vpn_new(name); + l2vpn->type = L2VPN_TYPE_VPLS; + LIST_INSERT_HEAD(&conf->l2vpn_list, l2vpn, entry); + return (l2vpn); +} + +void +l2vpn_del_api(struct l2vpn *l2vpn) +{ + struct l2vpn_if *lif; + struct l2vpn_pw *pw; + + while ((lif = LIST_FIRST(&l2vpn->if_list)) != NULL) { + LIST_REMOVE(lif, entry); + free(lif); + } + while ((pw = LIST_FIRST(&l2vpn->pw_list)) != NULL) { + LIST_REMOVE(pw, entry); + free(pw); + } + while ((pw = LIST_FIRST(&l2vpn->pw_inactive_list)) != NULL) { + LIST_REMOVE(pw, entry); + free(pw); + } + LIST_REMOVE(l2vpn, entry); + free(l2vpn); +} + +struct l2vpn_if * +l2vpn_if_new_api(struct ldpd_conf *conf, struct l2vpn *l2vpn, + const char *ifname) +{ + struct l2vpn_if *lif; + struct interface *ifp; + struct kif kif; + + if (ldp_iface_is_configured(conf, ifname)) + return (NULL); + + memset(&kif, 0, sizeof(kif)); + strlcpy(kif.ifname, ifname, sizeof(kif.ifname)); + ifp = if_lookup_by_name(ifname); + if (ifp) { + kif.ifindex = ifp->ifindex; + kif.flags = ifp->flags; + } + + lif = l2vpn_if_new(l2vpn, &kif); + LIST_INSERT_HEAD(&l2vpn->if_list, lif, entry); + return (lif); +} + +void +l2vpn_if_del_api(struct l2vpn_if *lif) +{ + LIST_REMOVE(lif, entry); + free(lif); +} + +struct l2vpn_pw * +l2vpn_pw_new_api(struct ldpd_conf *conf, struct l2vpn *l2vpn, + const char *ifname) +{ + struct l2vpn_pw *pw; + struct interface *ifp; + struct kif kif; + + if (ldp_iface_is_configured(conf, ifname)) + return (NULL); + + memset(&kif, 0, sizeof(kif)); + strlcpy(kif.ifname, ifname, sizeof(kif.ifname)); + ifp = if_lookup_by_name(ifname); + if (ifp) { + kif.ifindex = ifp->ifindex; + kif.flags = ifp->flags; + } + + pw = l2vpn_pw_new(l2vpn, &kif); + pw->flags = F_PW_STATUSTLV_CONF|F_PW_CWORD_CONF; + LIST_INSERT_HEAD(&l2vpn->pw_inactive_list, pw, entry); + return (pw); +} + +void +l2vpn_pw_del_api(struct l2vpn_pw *pw) +{ + LIST_REMOVE(pw, entry); + free(pw); +} diff --git a/ldpd/ldp_vty_exec.c b/ldpd/ldp_vty_exec.c new file mode 100644 index 0000000000..a57cf3c3f6 --- /dev/null +++ b/ldpd/ldp_vty_exec.c @@ -0,0 +1,667 @@ +/* + * Copyright (C) 2016 by Open Source Routing. + * + * This file is part of GNU Zebra. + * + * GNU Zebra is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * GNU Zebra is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU Zebra; see the file COPYING. If not, write to the Free + * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ + +#include +#include + +#include "ldpd.h" +#include "ldpe.h" +#include "lde.h" +#include "log.h" +#include "ldp_vty.h" + +#include "command.h" +#include "vty.h" +#include "mpls.h" + +enum show_command { + SHOW_DISC, + SHOW_IFACE, + SHOW_NBR, + SHOW_LIB, + SHOW_L2VPN_PW, + SHOW_L2VPN_BINDING +}; + +struct show_filter { + int family; + union ldpd_addr addr; + uint8_t prefixlen; +}; + +#define LDPBUFSIZ 65535 + +static int show_interface_msg(struct vty *, struct imsg *, + struct show_filter *); +static void show_discovery_adj(struct vty *, char *, + struct ctl_adj *); +static int show_discovery_msg(struct vty *, struct imsg *, + struct show_filter *); +static void show_nbr_adj(struct vty *, char *, struct ctl_adj *); +static int show_nbr_msg(struct vty *, struct imsg *, + struct show_filter *); +static int show_lib_msg(struct vty *, struct imsg *, + struct show_filter *); +static int show_l2vpn_binding_msg(struct vty *, struct imsg *); +static int show_l2vpn_pw_msg(struct vty *, struct imsg *); +static int ldp_vty_connect(struct imsgbuf *); +static int ldp_vty_dispatch(struct vty *, struct imsgbuf *, + enum show_command, struct show_filter *); +static int ldp_vty_get_af(const char *, int *); + +static int +show_interface_msg(struct vty *vty, struct imsg *imsg, + struct show_filter *filter) +{ + struct ctl_iface *iface; + char timers[BUFSIZ]; + + switch (imsg->hdr.type) { + case IMSG_CTL_SHOW_INTERFACE: + iface = imsg->data; + + if (filter->family != AF_UNSPEC && filter->family != iface->af) + break; + + snprintf(timers, sizeof(timers), "%u/%u", + iface->hello_interval, iface->hello_holdtime); + + vty_out(vty, "%-4s %-11s %-6s %-8s %-12s %3u%s", + af_name(iface->af), iface->name, + if_state_name(iface->state), iface->uptime == 0 ? + "00:00:00" : log_time(iface->uptime), timers, + iface->adj_cnt, VTY_NEWLINE); + break; + case IMSG_CTL_END: + vty_out(vty, "%s", VTY_NEWLINE); + return (1); + default: + break; + } + + return (0); +} + +static void +show_discovery_adj(struct vty *vty, char *buffer, struct ctl_adj *adj) +{ + size_t buflen = strlen(buffer); + + snprintf(buffer + buflen, LDPBUFSIZ - buflen, + " LDP Id: %s:0, Transport address: %s%s", + inet_ntoa(adj->id), log_addr(adj->af, + &adj->trans_addr), VTY_NEWLINE); + buflen = strlen(buffer); + snprintf(buffer + buflen, LDPBUFSIZ - buflen, + " Hold time: %u sec%s", adj->holdtime, VTY_NEWLINE); +} + +static int +show_discovery_msg(struct vty *vty, struct imsg *imsg, + struct show_filter *filter) +{ + struct ctl_adj *adj; + struct ctl_disc_if *iface; + struct ctl_disc_tnbr *tnbr; + struct in_addr rtr_id; + union ldpd_addr *trans_addr; + size_t buflen; + static char ifaces_buffer[LDPBUFSIZ]; + static char tnbrs_buffer[LDPBUFSIZ]; + + switch (imsg->hdr.type) { + case IMSG_CTL_SHOW_DISCOVERY: + ifaces_buffer[0] = '\0'; + tnbrs_buffer[0] = '\0'; + break; + case IMSG_CTL_SHOW_DISC_IFACE: + iface = imsg->data; + + if (filter->family != AF_UNSPEC && + ((filter->family == AF_INET && !iface->active_v4) || + (filter->family == AF_INET6 && !iface->active_v6))) + break; + + buflen = strlen(ifaces_buffer); + snprintf(ifaces_buffer + buflen, LDPBUFSIZ - buflen, + " %s: %s%s", iface->name, (iface->no_adj) ? + "xmit" : "xmit/recv", VTY_NEWLINE); + break; + case IMSG_CTL_SHOW_DISC_TNBR: + tnbr = imsg->data; + + if (filter->family != AF_UNSPEC && filter->family != tnbr->af) + break; + + trans_addr = &(ldp_af_conf_get(ldpd_conf, + tnbr->af))->trans_addr; + buflen = strlen(tnbrs_buffer); + snprintf(tnbrs_buffer + buflen, LDPBUFSIZ - buflen, + " %s -> %s: %s%s", log_addr(tnbr->af, trans_addr), + log_addr(tnbr->af, &tnbr->addr), (tnbr->no_adj) ? "xmit" : + "xmit/recv", VTY_NEWLINE); + break; + case IMSG_CTL_SHOW_DISC_ADJ: + adj = imsg->data; + + if (filter->family != AF_UNSPEC && filter->family != adj->af) + break; + + switch(adj->type) { + case HELLO_LINK: + show_discovery_adj(vty, ifaces_buffer, adj); + break; + case HELLO_TARGETED: + show_discovery_adj(vty, tnbrs_buffer, adj); + break; + } + break; + case IMSG_CTL_END: + rtr_id.s_addr = ldp_rtr_id_get(ldpd_conf); + vty_out(vty, "Local LDP Identifier: %s:0%s", inet_ntoa(rtr_id), + VTY_NEWLINE); + vty_out(vty, "Discovery Sources:%s", VTY_NEWLINE); + vty_out(vty, " Interfaces:%s", VTY_NEWLINE); + vty_out(vty, "%s", ifaces_buffer); + vty_out(vty, " Targeted Hellos:%s", VTY_NEWLINE); + vty_out(vty, "%s", tnbrs_buffer); + vty_out(vty, "%s", VTY_NEWLINE); + return (1); + default: + break; + } + + return (0); +} + +static void +show_nbr_adj(struct vty *vty, char *buffer, struct ctl_adj *adj) +{ + size_t buflen = strlen(buffer); + + switch (adj->type) { + case HELLO_LINK: + snprintf(buffer + buflen, LDPBUFSIZ - buflen, + " Interface: %s%s", adj->ifname, VTY_NEWLINE); + break; + case HELLO_TARGETED: + snprintf(buffer + buflen, LDPBUFSIZ - buflen, + " Targeted Hello: %s%s", log_addr(adj->af, + &adj->src_addr), VTY_NEWLINE); + break; + } +} + +static int +show_nbr_msg(struct vty *vty, struct imsg *imsg, struct show_filter *filter) +{ + struct ctl_adj *adj; + struct ctl_nbr *nbr; + static char v4adjs_buffer[LDPBUFSIZ]; + static char v6adjs_buffer[LDPBUFSIZ]; + + switch (imsg->hdr.type) { + case IMSG_CTL_SHOW_NBR: + nbr = imsg->data; + + v4adjs_buffer[0] = '\0'; + v6adjs_buffer[0] = '\0'; + vty_out(vty, "Peer LDP Identifier: %s:0%s", inet_ntoa(nbr->id), + VTY_NEWLINE); + vty_out(vty, " TCP connection: %s:%u - %s:%u%s", + log_addr(nbr->af, &nbr->laddr), ntohs(nbr->lport), + log_addr(nbr->af, &nbr->raddr), ntohs(nbr->rport), + VTY_NEWLINE); + vty_out(vty, " Session Holdtime: %u sec%s", nbr->holdtime, + VTY_NEWLINE); + vty_out(vty, " State: %s; Downstream-Unsolicited%s", + nbr_state_name(nbr->nbr_state), VTY_NEWLINE); + vty_out(vty, " Up time: %s%s", log_time(nbr->uptime), + VTY_NEWLINE); + break; + case IMSG_CTL_SHOW_NBR_DISC: + adj = imsg->data; + + switch (adj->af) { + case AF_INET: + show_nbr_adj(vty, v4adjs_buffer, adj); + break; + case AF_INET6: + show_nbr_adj(vty, v6adjs_buffer, adj); + break; + default: + fatalx("show_nbr_msg: unknown af"); + } + break; + case IMSG_CTL_SHOW_NBR_END: + vty_out(vty, " LDP Discovery Sources:%s", VTY_NEWLINE); + if (v4adjs_buffer[0] != '\0') { + vty_out(vty, " IPv4:%s", VTY_NEWLINE); + vty_out(vty, "%s", v4adjs_buffer); + } + if (v6adjs_buffer[0] != '\0') { + vty_out(vty, " IPv6:%s", VTY_NEWLINE); + vty_out(vty, "%s", v6adjs_buffer); + } + vty_out(vty, "%s", VTY_NEWLINE); + break; + case IMSG_CTL_END: + return (1); + default: + break; + } + + return (0); +} + +static int +show_lib_msg(struct vty *vty, struct imsg *imsg, struct show_filter *filter) +{ + struct ctl_rt *rt; + char dstnet[BUFSIZ]; + + switch (imsg->hdr.type) { + case IMSG_CTL_SHOW_LIB: + rt = imsg->data; + + if (filter->family != AF_UNSPEC && filter->family != rt->af) + break; + + snprintf(dstnet, sizeof(dstnet), "%s/%d", + log_addr(rt->af, &rt->prefix), rt->prefixlen); + + if (rt->first) { + vty_out(vty, "%s%s", dstnet, VTY_NEWLINE); + vty_out(vty, "%-8sLocal binding: label: %s%s", "", + log_label(rt->local_label), VTY_NEWLINE); + + if (rt->remote_label != NO_LABEL) { + vty_out(vty, "%-8sRemote bindings:%s", "", + VTY_NEWLINE); + vty_out(vty, "%-12sPeer Label%s", + "", VTY_NEWLINE); + vty_out(vty, "%-12s----------------- " + "---------%s", "", VTY_NEWLINE); + } else + vty_out(vty, "%-8sNo remote bindings%s", "", + VTY_NEWLINE); + } + if (rt->remote_label != NO_LABEL) + vty_out(vty, "%12s%-20s%s%s", "", inet_ntoa(rt->nexthop), + log_label(rt->remote_label), VTY_NEWLINE); + break; + case IMSG_CTL_END: + vty_out(vty, "%s", VTY_NEWLINE); + return (1); + default: + break; + } + + return (0); +} + +static int +show_l2vpn_binding_msg(struct vty *vty, struct imsg *imsg) +{ + struct ctl_pw *pw; + + switch (imsg->hdr.type) { + case IMSG_CTL_SHOW_L2VPN_BINDING: + pw = imsg->data; + + vty_out(vty, " Destination Address: %s, VC ID: %u%s", + inet_ntoa(pw->lsr_id), pw->pwid, VTY_NEWLINE); + + /* local binding */ + if (pw->local_label != NO_LABEL) { + vty_out(vty, " Local Label: %u%s", pw->local_label, + VTY_NEWLINE); + vty_out(vty, "%-8sCbit: %u, VC Type: %s, " + "GroupID: %u%s", "", pw->local_cword, + pw_type_name(pw->type), pw->local_gid, + VTY_NEWLINE); + vty_out(vty, "%-8sMTU: %u%s", "", pw->local_ifmtu, + VTY_NEWLINE); + } else + vty_out(vty, " Local Label: unassigned%s", + VTY_NEWLINE); + + /* remote binding */ + if (pw->remote_label != NO_LABEL) { + vty_out(vty, " Remote Label: %u%s", + pw->remote_label, VTY_NEWLINE); + vty_out(vty, "%-8sCbit: %u, VC Type: %s, " + "GroupID: %u%s", "", pw->remote_cword, + pw_type_name(pw->type), pw->remote_gid, + VTY_NEWLINE); + vty_out(vty, "%-8sMTU: %u%s", "", pw->remote_ifmtu, + VTY_NEWLINE); + } else + vty_out(vty, " Remote Label: unassigned%s", + VTY_NEWLINE); + break; + case IMSG_CTL_END: + vty_out(vty, "%s", VTY_NEWLINE); + return (1); + default: + break; + } + + return (0); +} + +static int +show_l2vpn_pw_msg(struct vty *vty, struct imsg *imsg) +{ + struct ctl_pw *pw; + + switch (imsg->hdr.type) { + case IMSG_CTL_SHOW_L2VPN_PW: + pw = imsg->data; + + vty_out(vty, "%-9s %-15s %-10u %-16s %-10s%s", pw->ifname, + inet_ntoa(pw->lsr_id), pw->pwid, pw->l2vpn_name, + (pw->status ? "UP" : "DOWN"), VTY_NEWLINE); + break; + case IMSG_CTL_END: + vty_out(vty, "%s", VTY_NEWLINE); + return (1); + default: + break; + } + + return (0); +} + +static int +ldp_vty_connect(struct imsgbuf *ibuf) +{ + struct sockaddr_un s_un; + int ctl_sock; + + /* connect to ldpd control socket */ + if ((ctl_sock = socket(AF_UNIX, SOCK_STREAM, 0)) == -1) { + log_warn("%s: socket", __func__); + return (-1); + } + + memset(&s_un, 0, sizeof(s_un)); + s_un.sun_family = AF_UNIX; + strlcpy(s_un.sun_path, LDPD_SOCKET, sizeof(s_un.sun_path)); + if (connect(ctl_sock, (struct sockaddr *)&s_un, sizeof(s_un)) == -1) { + log_warn("%s: connect: %s", __func__, LDPD_SOCKET); + close(ctl_sock); + return (-1); + } + + imsg_init(ibuf, ctl_sock); + + return (0); +} + +static int +ldp_vty_dispatch(struct vty *vty, struct imsgbuf *ibuf, enum show_command cmd, + struct show_filter *filter) +{ + struct imsg imsg; + int n, done = 0; + + while (ibuf->w.queued) + if (msgbuf_write(&ibuf->w) <= 0 && errno != EAGAIN) { + log_warn("write error"); + close(ibuf->fd); + return (CMD_WARNING); + } + + while (!done) { + if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN) { + log_warnx("imsg_read error"); + close(ibuf->fd); + return (CMD_WARNING); + } + if (n == 0) { + log_warnx("pipe closed"); + close(ibuf->fd); + return (CMD_WARNING); + } + + while (!done) { + if ((n = imsg_get(ibuf, &imsg)) == -1) { + log_warnx("imsg_get error"); + close(ibuf->fd); + return (CMD_WARNING); + } + if (n == 0) + break; + switch (cmd) { + case SHOW_IFACE: + done = show_interface_msg(vty, &imsg, filter); + break; + case SHOW_DISC: + done = show_discovery_msg(vty, &imsg, filter); + break; + case SHOW_NBR: + done = show_nbr_msg(vty, &imsg, filter); + break; + case SHOW_LIB: + done = show_lib_msg(vty, &imsg, filter); + break; + case SHOW_L2VPN_PW: + done = show_l2vpn_pw_msg(vty, &imsg); + break; + case SHOW_L2VPN_BINDING: + done = show_l2vpn_binding_msg(vty, &imsg); + break; + default: + break; + } + imsg_free(&imsg); + } + } + + close(ibuf->fd); + + return (CMD_SUCCESS); +} + +static int +ldp_vty_get_af(const char *str, int *af) +{ + if (str == NULL) { + *af = AF_UNSPEC; + return (0); + } else if (strcmp(str, "ipv4") == 0) { + *af = AF_INET; + return (0); + } else if (strcmp(str, "ipv6") == 0) { + *af = AF_INET6; + return (0); + } + + return (-1); +} + +int +ldp_vty_show_binding(struct vty *vty, struct vty_arg *args[]) +{ + struct imsgbuf ibuf; + struct show_filter filter; + const char *af_str; + int af; + + if (ldp_vty_connect(&ibuf) < 0) + return (CMD_WARNING); + + imsg_compose(&ibuf, IMSG_CTL_SHOW_LIB, 0, 0, -1, NULL, 0); + + af_str = vty_get_arg_value(args, "address-family"); + if (ldp_vty_get_af(af_str, &af) < 0) + return (CMD_ERR_NO_MATCH); + + memset(&filter, 0, sizeof(filter)); + filter.family = af; + + return (ldp_vty_dispatch(vty, &ibuf, SHOW_LIB, &filter)); +} + +int +ldp_vty_show_discovery(struct vty *vty, struct vty_arg *args[]) +{ + struct imsgbuf ibuf; + struct show_filter filter; + const char *af_str; + int af; + + if (ldp_vty_connect(&ibuf) < 0) + return (CMD_WARNING); + + imsg_compose(&ibuf, IMSG_CTL_SHOW_DISCOVERY, 0, 0, -1, NULL, 0); + + af_str = vty_get_arg_value(args, "address-family"); + if (ldp_vty_get_af(af_str, &af) < 0) + return (CMD_ERR_NO_MATCH); + + memset(&filter, 0, sizeof(filter)); + filter.family = af; + + return (ldp_vty_dispatch(vty, &ibuf, SHOW_DISC, &filter)); +} + +int +ldp_vty_show_interface(struct vty *vty, struct vty_arg *args[]) +{ + struct imsgbuf ibuf; + struct show_filter filter; + unsigned int ifidx = 0; + const char *af_str; + int af; + + if (ldp_vty_connect(&ibuf) < 0) + return (CMD_WARNING); + + imsg_compose(&ibuf, IMSG_CTL_SHOW_INTERFACE, 0, 0, -1, &ifidx, + sizeof(ifidx)); + + af_str = vty_get_arg_value(args, "address-family"); + if (ldp_vty_get_af(af_str, &af) < 0) + return (CMD_ERR_NO_MATCH); + + memset(&filter, 0, sizeof(filter)); + filter.family = af; + + /* header */ + vty_out(vty, "%-4s %-11s %-6s %-8s %-12s %3s%s", "AF", + "Interface", "State", "Uptime", "Hello Timers", "ac", VTY_NEWLINE); + + return (ldp_vty_dispatch(vty, &ibuf, SHOW_IFACE, &filter)); +} + +int +ldp_vty_show_neighbor(struct vty *vty, struct vty_arg *args[]) +{ + struct imsgbuf ibuf; + struct show_filter filter; + + if (ldp_vty_connect(&ibuf) < 0) + return (CMD_WARNING); + + imsg_compose(&ibuf, IMSG_CTL_SHOW_NBR, 0, 0, -1, NULL, 0); + + /* not used */ + memset(&filter, 0, sizeof(filter)); + + return (ldp_vty_dispatch(vty, &ibuf, SHOW_NBR, &filter)); +} + +int +ldp_vty_show_atom_binding(struct vty *vty, struct vty_arg *args[]) +{ + struct imsgbuf ibuf; + struct show_filter filter; + + if (ldp_vty_connect(&ibuf) < 0) + return (CMD_WARNING); + + imsg_compose(&ibuf, IMSG_CTL_SHOW_L2VPN_BINDING, 0, 0, -1, NULL, 0); + + /* not used */ + memset(&filter, 0, sizeof(filter)); + + return (ldp_vty_dispatch(vty, &ibuf, SHOW_L2VPN_BINDING, &filter)); +} + +int +ldp_vty_show_atom_vc(struct vty *vty, struct vty_arg *args[]) +{ + struct imsgbuf ibuf; + struct show_filter filter; + + if (ldp_vty_connect(&ibuf) < 0) + return (CMD_WARNING); + + imsg_compose(&ibuf, IMSG_CTL_SHOW_L2VPN_PW, 0, 0, -1, NULL, 0); + + /* not used */ + memset(&filter, 0, sizeof(filter)); + + /* header */ + vty_out(vty, "%-9s %-15s %-10s %-16s %-10s%s", + "Interface", "Peer ID", "VC ID", "Name", "Status", VTY_NEWLINE); + vty_out(vty, "%-9s %-15s %-10s %-16s %-10s%s", + "---------", "---------------", "----------", + "----------------", "----------", VTY_NEWLINE); + + return (ldp_vty_dispatch(vty, &ibuf, SHOW_L2VPN_PW, &filter)); +} + +int +ldp_vty_clear_nbr(struct vty *vty, struct vty_arg *args[]) +{ + struct imsgbuf ibuf; + const char *addr_str; + struct ctl_nbr nbr; + + addr_str = vty_get_arg_value(args, "addr"); + + memset(&nbr, 0, sizeof(nbr)); + if (addr_str && + (ldp_get_address(addr_str, &nbr.af, &nbr.raddr) == -1 || + bad_addr(nbr.af, &nbr.raddr))) { + vty_out(vty, "%% Malformed address%s", VTY_NEWLINE); + return (CMD_WARNING); + } + + if (ldp_vty_connect(&ibuf) < 0) + return (CMD_WARNING); + + imsg_compose(&ibuf, IMSG_CTL_CLEAR_NBR, 0, 0, -1, &nbr, sizeof(nbr)); + + while (ibuf.w.queued) + if (msgbuf_write(&ibuf.w) <= 0 && errno != EAGAIN) { + log_warn("write error"); + close(ibuf.fd); + return (CMD_WARNING); + } + + close(ibuf.fd); + + return (CMD_SUCCESS); +} diff --git a/ldpd/ldp_zebra.c b/ldpd/ldp_zebra.c new file mode 100644 index 0000000000..b796b6f6d6 --- /dev/null +++ b/ldpd/ldp_zebra.c @@ -0,0 +1,504 @@ +/* + * Copyright (C) 2016 by Open Source Routing. + * + * This file is part of GNU Zebra. + * + * GNU Zebra is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * GNU Zebra is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU Zebra; see the file COPYING. If not, write to the Free + * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ + +#include + +#include "prefix.h" +#include "stream.h" +#include "memory.h" +#include "zclient.h" +#include "command.h" +#include "network.h" +#include "linklist.h" +#include "mpls.h" + +#include "ldpd.h" +#include "ldpe.h" +#include "lde.h" +#include "log.h" +#include "ldp_debug.h" + +static void ifp2kif(struct interface *, struct kif *); +static void ifc2kaddr(struct interface *, struct connected *, + struct kaddr *); +static int zebra_send_mpls_labels(int, struct kroute *); +static int ldp_router_id_update(int, struct zclient *, zebra_size_t, + vrf_id_t); +static int ldp_interface_add(int, struct zclient *, zebra_size_t, + vrf_id_t); +static int ldp_interface_delete(int, struct zclient *, zebra_size_t, + vrf_id_t); +static int ldp_interface_status_change(int command, struct zclient *, + zebra_size_t, vrf_id_t); +static int ldp_interface_address_add(int, struct zclient *, zebra_size_t, + vrf_id_t); +static int ldp_interface_address_delete(int, struct zclient *, + zebra_size_t, vrf_id_t); +static int ldp_zebra_read_route(int, struct zclient *, zebra_size_t, + vrf_id_t); +static void ldp_zebra_connected(struct zclient *); + +static struct zclient *zclient; + +static void +ifp2kif(struct interface *ifp, struct kif *kif) +{ + memset(kif, 0, sizeof(*kif)); + strlcpy(kif->ifname, ifp->name, sizeof(kif->ifname)); + kif->ifindex = ifp->ifindex; + kif->flags = ifp->flags; +} + +static void +ifc2kaddr(struct interface *ifp, struct connected *ifc, struct kaddr *ka) +{ + memset(ka, 0, sizeof(*ka)); + ka->ifindex = ifp->ifindex; + ka->af = ifc->address->family; + ka->prefixlen = ifc->address->prefixlen; + + switch (ka->af) { + case AF_INET: + ka->addr.v4 = ifc->address->u.prefix4; + if (ifc->destination) + ka->dstbrd.v4 = ifc->destination->u.prefix4; + break; + case AF_INET6: + ka->addr.v6 = ifc->address->u.prefix6; + if (ifc->destination) + ka->dstbrd.v6 = ifc->destination->u.prefix6; + break; + default: + break; + } +} + +static int +zebra_send_mpls_labels(int cmd, struct kroute *kr) +{ + struct stream *s; + + if (kr->local_label < MPLS_LABEL_RESERVED_MAX || + kr->remote_label == NO_LABEL) + return (0); + + debug_zebra_out("prefix %s/%u nexthop %s labels %s/%s (%s)", + log_addr(kr->af, &kr->prefix), kr->prefixlen, + log_addr(kr->af, &kr->nexthop), log_label(kr->local_label), + log_label(kr->remote_label), + (cmd == ZEBRA_MPLS_LABELS_ADD) ? "add" : "delete"); + + /* Reset stream. */ + s = zclient->obuf; + stream_reset(s); + + zclient_create_header(s, cmd, VRF_DEFAULT); + stream_putc(s, ZEBRA_LSP_LDP); + stream_putl(s, kr->af); + switch (kr->af) { + case AF_INET: + stream_put_in_addr(s, &kr->prefix.v4); + stream_putc(s, kr->prefixlen); + stream_put_in_addr(s, &kr->nexthop.v4); + break; + case AF_INET6: + stream_write(s, (u_char *)&kr->prefix.v6, 16); + stream_putc(s, kr->prefixlen); + stream_write(s, (u_char *)&kr->nexthop.v6, 16); + break; + default: + fatalx("kr_change: unknown af"); + } + stream_putc(s, kr->priority); + stream_putl(s, kr->local_label); + stream_putl(s, kr->remote_label); + + /* Put length at the first point of the stream. */ + stream_putw_at(s, 0, stream_get_endp(s)); + + return (zclient_send_message(zclient)); +} + +int +kr_change(struct kroute *kr) +{ + return (zebra_send_mpls_labels(ZEBRA_MPLS_LABELS_ADD, kr)); +} + +int +kr_delete(struct kroute *kr) +{ + return (zebra_send_mpls_labels(ZEBRA_MPLS_LABELS_DELETE, kr)); +} + +int +kmpw_set(struct kpw *kpw) +{ + /* TODO */ + return (0); +} + +int +kmpw_unset(struct kpw *kpw) +{ + /* TODO */ + return (0); +} + +void +kif_redistribute(const char *ifname) +{ + struct listnode *node, *cnode; + struct interface *ifp; + struct connected *ifc; + struct kif kif; + struct kaddr ka; + + for (ALL_LIST_ELEMENTS_RO(vrf_iflist(VRF_DEFAULT), node, ifp)) { + if (ifname && strcmp(ifname, ifp->name) != 0) + continue; + + ifp2kif(ifp, &kif); + main_imsg_compose_ldpe(IMSG_IFSTATUS, 0, &kif, sizeof(kif)); + + for (ALL_LIST_ELEMENTS_RO(ifp->connected, cnode, ifc)) { + ifc2kaddr(ifp, ifc, &ka); + main_imsg_compose_ldpe(IMSG_NEWADDR, 0, &ka, + sizeof(ka)); + } + } +} + +static int +ldp_router_id_update(int command, struct zclient *zclient, zebra_size_t length, + vrf_id_t vrf_id) +{ + struct prefix router_id; + + zebra_router_id_update_read(zclient->ibuf, &router_id); + + if (bad_addr_v4(router_id.u.prefix4)) + return (0); + + debug_zebra_in("router-id update %s", inet_ntoa(router_id.u.prefix4)); + + global.rtr_id.s_addr = router_id.u.prefix4.s_addr; + main_imsg_compose_ldpe(IMSG_RTRID_UPDATE, 0, &global.rtr_id, + sizeof(global.rtr_id)); + + return (0); +} + +static int +ldp_interface_add(int command, struct zclient *zclient, zebra_size_t length, + vrf_id_t vrf_id) +{ + struct interface *ifp; + struct kif kif; + + ifp = zebra_interface_add_read(zclient->ibuf, vrf_id); + debug_zebra_in("interface add %s index %d mtu %d", ifp->name, + ifp->ifindex, ifp->mtu); + + ifp2kif(ifp, &kif); + main_imsg_compose_ldpe(IMSG_IFSTATUS, 0, &kif, sizeof(kif)); + + return (0); +} + +static int +ldp_interface_delete(int command, struct zclient *zclient, zebra_size_t length, + vrf_id_t vrf_id) +{ + struct interface *ifp; + + /* zebra_interface_state_read() updates interface structure in iflist */ + ifp = zebra_interface_state_read(zclient->ibuf, vrf_id); + if (ifp == NULL) + return (0); + + debug_zebra_in("interface delete %s index %d mtu %d", ifp->name, + ifp->ifindex, ifp->mtu); + + /* To support pseudo interface do not free interface structure. */ + /* if_delete(ifp); */ + ifp->ifindex = IFINDEX_INTERNAL; + + return (0); +} + +static int +ldp_interface_status_change(int command, struct zclient *zclient, + zebra_size_t length, vrf_id_t vrf_id) +{ + struct interface *ifp; + struct listnode *node; + struct connected *ifc; + struct kif kif; + struct kaddr ka; + int link_new; + + /* + * zebra_interface_state_read() updates interface structure in + * iflist. + */ + ifp = zebra_interface_state_read(zclient->ibuf, vrf_id); + if (ifp == NULL) + return (0); + + debug_zebra_in("interface %s state update", ifp->name); + + ifp2kif(ifp, &kif); + main_imsg_compose_ldpe(IMSG_IFSTATUS, 0, &kif, sizeof(kif)); + + link_new = (ifp->flags & IFF_UP) && (ifp->flags & IFF_RUNNING); + if (link_new) { + for (ALL_LIST_ELEMENTS_RO(ifp->connected, node, ifc)) { + ifc2kaddr(ifp, ifc, &ka); + main_imsg_compose_ldpe(IMSG_NEWADDR, 0, &ka, + sizeof(ka)); + } + } else { + for (ALL_LIST_ELEMENTS_RO(ifp->connected, node, ifc)) { + ifc2kaddr(ifp, ifc, &ka); + main_imsg_compose_ldpe(IMSG_DELADDR, 0, &ka, + sizeof(ka)); + } + } + + return (0); +} + +static int +ldp_interface_address_add(int command, struct zclient *zclient, + zebra_size_t length, vrf_id_t vrf_id) +{ + struct connected *ifc; + struct interface *ifp; + struct kaddr ka; + + ifc = zebra_interface_address_read(command, zclient->ibuf, vrf_id); + if (ifc == NULL) + return (0); + + ifp = ifc->ifp; + ifc2kaddr(ifp, ifc, &ka); + + /* Filter invalid addresses. */ + if (bad_addr(ka.af, &ka.addr)) + return (0); + + debug_zebra_in("address add %s/%u", log_addr(ka.af, &ka.addr), + ka.prefixlen); + + /* notify ldpe about new address */ + main_imsg_compose_ldpe(IMSG_NEWADDR, 0, &ka, sizeof(ka)); + + return (0); +} + +static int +ldp_interface_address_delete(int command, struct zclient *zclient, + zebra_size_t length, vrf_id_t vrf_id) +{ + struct connected *ifc; + struct interface *ifp; + struct kaddr ka; + + ifc = zebra_interface_address_read(command, zclient->ibuf, vrf_id); + if (ifc == NULL) + return (0); + + ifp = ifc->ifp; + ifc2kaddr(ifp, ifc, &ka); + connected_free(ifc); + + /* Filter invalid addresses. */ + if (bad_addr(ka.af, &ka.addr)) + return (0); + + debug_zebra_in("address delete %s/%u", log_addr(ka.af, &ka.addr), + ka.prefixlen); + + /* notify ldpe about removed address */ + main_imsg_compose_ldpe(IMSG_DELADDR, 0, &ka, sizeof(ka)); + + return (0); +} + +static int +ldp_zebra_read_route(int command, struct zclient *zclient, zebra_size_t length, + vrf_id_t vrf_id) +{ + struct stream *s; + u_char type; + u_char message_flags; + struct kroute kr; + int nhnum, nhlen; + size_t nhmark; + + memset(&kr, 0, sizeof(kr)); + s = zclient->ibuf; + + type = stream_getc(s); + if (type == ZEBRA_ROUTE_CONNECT) + kr.flags |= F_CONNECTED; + stream_getl(s); /* flags, unused */ + stream_getw(s); /* instance, unused */ + message_flags = stream_getc(s); + if (!CHECK_FLAG(message_flags, ZAPI_MESSAGE_NEXTHOP)) + return (0); + + switch (command) { + case ZEBRA_REDISTRIBUTE_IPV4_ADD: + case ZEBRA_REDISTRIBUTE_IPV4_DEL: + kr.af = AF_INET; + nhlen = sizeof(struct in_addr); + break; + case ZEBRA_REDISTRIBUTE_IPV6_ADD: + case ZEBRA_REDISTRIBUTE_IPV6_DEL: + kr.af = AF_INET6; + nhlen = sizeof(struct in6_addr); + break; + default: + fatalx("ldp_zebra_read_route: unknown command"); + } + kr.prefixlen = stream_getc(s); + stream_get(&kr.prefix, s, PSIZE(kr.prefixlen)); + + if (bad_addr(kr.af, &kr.prefix) || + (kr.af == AF_INET6 && IN6_IS_SCOPE_EMBED(&kr.prefix.v6))) + return (0); + + nhnum = stream_getc(s); + nhmark = stream_get_getp(s); + stream_set_getp(s, nhmark + nhnum * (nhlen + 5)); + + if (CHECK_FLAG(message_flags, ZAPI_MESSAGE_DISTANCE)) + kr.priority = stream_getc(s); + if (CHECK_FLAG(message_flags, ZAPI_MESSAGE_METRIC)) + stream_getl(s); /* metric, not used */ + + stream_set_getp(s, nhmark); + + /* loop through all the nexthops */ + for (; nhnum > 0; nhnum--) { + switch (kr.af) { + case AF_INET: + kr.nexthop.v4.s_addr = stream_get_ipv4(s); + break; + case AF_INET6: + stream_get(&kr.nexthop.v6, s, sizeof(kr.nexthop.v6)); + break; + default: + break; + } + stream_getc(s); /* ifindex_num, unused. */ + kr.ifindex = stream_getl(s); + + switch (command) { + case ZEBRA_REDISTRIBUTE_IPV4_ADD: + case ZEBRA_REDISTRIBUTE_IPV6_ADD: + debug_zebra_in("route add %s/%d nexthop %s (%s)", + log_addr(kr.af, &kr.prefix), kr.prefixlen, + log_addr(kr.af, &kr.nexthop), + zebra_route_string(type)); + main_imsg_compose_lde(IMSG_NETWORK_ADD, 0, &kr, + sizeof(kr)); + break; + case ZEBRA_REDISTRIBUTE_IPV4_DEL: + case ZEBRA_REDISTRIBUTE_IPV6_DEL: + debug_zebra_in("route delete %s/%d nexthop %s (%s)", + log_addr(kr.af, &kr.prefix), kr.prefixlen, + log_addr(kr.af, &kr.nexthop), + zebra_route_string(type)); + main_imsg_compose_lde(IMSG_NETWORK_DEL, 0, &kr, + sizeof(kr)); + break; + default: + fatalx("ldp_zebra_read_route: unknown command"); + } + } + + if (command == ZEBRA_REDISTRIBUTE_IPV4_ADD || + command == ZEBRA_REDISTRIBUTE_IPV6_ADD) + main_imsg_compose_lde(IMSG_NETWORK_ADD_END, 0, &kr, sizeof(kr)); + + return (0); +} + +static void +ldp_zebra_connected(struct zclient *zclient) +{ + int i; + + zclient_send_reg_requests(zclient, VRF_DEFAULT); + + for (i = 0; i < ZEBRA_ROUTE_MAX; i++) { + switch (i) { + case ZEBRA_ROUTE_KERNEL: + case ZEBRA_ROUTE_CONNECT: + case ZEBRA_ROUTE_STATIC: + case ZEBRA_ROUTE_ISIS: + zclient_redistribute(ZEBRA_REDISTRIBUTE_ADD, zclient, + AFI_IP, i, 0, VRF_DEFAULT); + zclient_redistribute(ZEBRA_REDISTRIBUTE_ADD, zclient, + AFI_IP6, i, 0, VRF_DEFAULT); + break; + case ZEBRA_ROUTE_RIP: + case ZEBRA_ROUTE_OSPF: + zclient_redistribute(ZEBRA_REDISTRIBUTE_ADD, zclient, + AFI_IP, i, 0, VRF_DEFAULT); + break; + case ZEBRA_ROUTE_RIPNG: + case ZEBRA_ROUTE_OSPF6: + zclient_redistribute(ZEBRA_REDISTRIBUTE_ADD, zclient, + AFI_IP6, i, 0, VRF_DEFAULT); + break; + case ZEBRA_ROUTE_BGP: + /* LDP should follow the IGP and ignore BGP routes */ + default: + break; + } + } +} + +void +ldp_zebra_init(struct thread_master *master) +{ + /* Set default values. */ + zclient = zclient_new(master); + zclient_init(zclient, ZEBRA_ROUTE_LDP, 0); + + /* set callbacks */ + zclient->zebra_connected = ldp_zebra_connected; + zclient->router_id_update = ldp_router_id_update; + zclient->interface_add = ldp_interface_add; + zclient->interface_delete = ldp_interface_delete; + zclient->interface_up = ldp_interface_status_change; + zclient->interface_down = ldp_interface_status_change; + zclient->interface_address_add = ldp_interface_address_add; + zclient->interface_address_delete = ldp_interface_address_delete; + zclient->redistribute_route_ipv4_add = ldp_zebra_read_route; + zclient->redistribute_route_ipv4_del = ldp_zebra_read_route; + zclient->redistribute_route_ipv6_add = ldp_zebra_read_route; + zclient->redistribute_route_ipv6_del = ldp_zebra_read_route; +} diff --git a/ldpd/ldpd.c b/ldpd/ldpd.c new file mode 100644 index 0000000000..aa1dc57a7b --- /dev/null +++ b/ldpd/ldpd.c @@ -0,0 +1,1802 @@ +/* $OpenBSD$ */ + +/* + * Copyright (c) 2013, 2016 Renato Westphal + * Copyright (c) 2005 Claudio Jeker + * Copyright (c) 2004, 2008 Esben Norby + * Copyright (c) 2003, 2004 Henning Brauer + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include + +#include "ldpd.h" +#include "ldpe.h" +#include "lde.h" +#include "log.h" +#include "ldp_vty.h" +#include "ldp_debug.h" + +#include +#include +#include "getopt.h" +#include "vty.h" +#include "command.h" +#include "memory.h" +#include "privs.h" +#include "sigevent.h" +#include "zclient.h" +#include "vrf.h" +#include "qobj.h" + +static void ldpd_shutdown(void); +static pid_t start_child(enum ldpd_process, char *, int, + const char *, const char *); +static int main_dispatch_ldpe(struct thread *); +static int main_dispatch_lde(struct thread *); +static int main_imsg_send_ipc_sockets(struct imsgbuf *, + struct imsgbuf *); +static void main_imsg_send_net_sockets(int); +static void main_imsg_send_net_socket(int, enum socket_type); +static int main_imsg_send_config(struct ldpd_conf *); +static void ldp_config_normalize(struct ldpd_conf *, void **); +static void ldp_config_reset_main(struct ldpd_conf *, void **); +static void ldp_config_reset_af(struct ldpd_conf *, int, void **); +static void merge_config_ref(struct ldpd_conf *, struct ldpd_conf *, void **); +static void merge_global(struct ldpd_conf *, struct ldpd_conf *); +static void merge_af(int, struct ldpd_af_conf *, + struct ldpd_af_conf *); +static void merge_ifaces(struct ldpd_conf *, struct ldpd_conf *, void **); +static void merge_iface_af(struct iface_af *, struct iface_af *); +static void merge_tnbrs(struct ldpd_conf *, struct ldpd_conf *, void **); +static void merge_nbrps(struct ldpd_conf *, struct ldpd_conf *, void **); +static void merge_l2vpns(struct ldpd_conf *, struct ldpd_conf *, void **); +static void merge_l2vpn(struct ldpd_conf *, struct l2vpn *, + struct l2vpn *, void **); + +DEFINE_QOBJ_TYPE(iface) +DEFINE_QOBJ_TYPE(tnbr) +DEFINE_QOBJ_TYPE(nbr_params) +DEFINE_QOBJ_TYPE(l2vpn_if) +DEFINE_QOBJ_TYPE(l2vpn_pw) +DEFINE_QOBJ_TYPE(l2vpn) +DEFINE_QOBJ_TYPE(ldpd_conf) + +struct ldpd_global global; +struct ldpd_conf *ldpd_conf; + +static struct imsgev *iev_ldpe; +static struct imsgev *iev_lde; +static pid_t ldpe_pid; +static pid_t lde_pid; + +#define LDP_DEFAULT_CONFIG "ldpd.conf" +#define LDP_VTY_PORT 2612 + +/* Master of threads. */ +struct thread_master *master; + +/* Process ID saved for use by init system */ +static const char *pid_file = PATH_LDPD_PID; + +/* Configuration filename and directory. */ +static char config_default[] = SYSCONFDIR LDP_DEFAULT_CONFIG; + +/* ldpd privileges */ +static zebra_capabilities_t _caps_p [] = +{ + ZCAP_BIND, + ZCAP_NET_ADMIN +}; + +struct zebra_privs_t ldpd_privs = +{ +#if defined(QUAGGA_USER) && defined(QUAGGA_GROUP) + .user = QUAGGA_USER, + .group = QUAGGA_GROUP, +#endif +#if defined(VTY_GROUP) + .vty_group = VTY_GROUP, +#endif + .caps_p = _caps_p, + .cap_num_p = array_size(_caps_p), + .cap_num_i = 0 +}; + +/* LDPd options. */ +static struct option longopts[] = +{ + { "daemon", no_argument, NULL, 'd'}, + { "config_file", required_argument, NULL, 'f'}, + { "pid_file", required_argument, NULL, 'i'}, + { "socket", required_argument, NULL, 'z'}, + { "dryrun", no_argument, NULL, 'C'}, + { "help", no_argument, NULL, 'h'}, + { "vty_addr", required_argument, NULL, 'A'}, + { "vty_port", required_argument, NULL, 'P'}, + { "user", required_argument, NULL, 'u'}, + { "group", required_argument, NULL, 'g'}, + { "version", no_argument, NULL, 'v'}, + { 0 } +}; + +/* Help information display. */ +static void __attribute__ ((noreturn)) +usage(char *progname, int status) +{ + if (status != 0) + fprintf(stderr, "Try `%s --help' for more information.\n", + progname); + else { + printf("Usage : %s [OPTION...]\n\ +Daemon which manages LDP.\n\n\ +-d, --daemon Runs in daemon mode\n\ +-f, --config_file Set configuration file name\n\ +-i, --pid_file Set process identifier file name\n\ +-z, --socket Set path of zebra socket\n\ +-A, --vty_addr Set vty's bind address\n\ +-P, --vty_port Set vty's port number\n\ +-u, --user User to run as\n\ +-g, --group Group to run as\n\ +-v, --version Print program version\n\ +-C, --dryrun Check configuration for validity and exit\n\ +-h, --help Display this help and exit\n\ +\n\ +Report bugs to %s\n", progname, ZEBRA_BUG_ADDRESS); + } + + exit(status); +} + +/* SIGHUP handler. */ +static void +sighup(void) +{ + log_info("SIGHUP received"); +} + +/* SIGINT / SIGTERM handler. */ +static void +sigint(void) +{ + log_info("SIGINT received"); + ldpd_shutdown(); +} + +/* SIGUSR1 handler. */ +static void +sigusr1(void) +{ + zlog_rotate(NULL); +} + +static struct quagga_signal_t ldp_signals[] = +{ + { + .signal = SIGHUP, + .handler = &sighup, + }, + { + .signal = SIGINT, + .handler = &sigint, + }, + { + .signal = SIGTERM, + .handler = &sigint, + }, + { + .signal = SIGUSR1, + .handler = &sigusr1, + } +}; + +int +main(int argc, char *argv[]) +{ + char *saved_argv0; + int lflag = 0, eflag = 0; + int pipe_parent2ldpe[2]; + int pipe_parent2lde[2]; + char *p; + char *vty_addr = NULL; + int vty_port = LDP_VTY_PORT; + int daemon_mode = 0; + const char *user = NULL; + const char *group = NULL; + char *config_file = NULL; + char *progname; + struct thread thread; + int dryrun = 0; + + ldpd_process = PROC_MAIN; + + /* Set umask before anything for security */ + umask(0027); + + /* get program name */ + progname = ((p = strrchr(argv[0], '/')) ? ++p : argv[0]); + + saved_argv0 = argv[0]; + if (saved_argv0 == NULL) + saved_argv0 = (char *)"ldpd"; + + while (1) { + int opt; + + opt = getopt_long(argc, argv, "df:i:z:hA:P:u:g:vCLE", + longopts, 0); + + if (opt == EOF) + break; + + switch (opt) { + case 0: + break; + case 'd': + daemon_mode = 1; + break; + case 'f': + config_file = optarg; + break; + case 'A': + vty_addr = optarg; + break; + case 'i': + pid_file = optarg; + break; + case 'z': + zclient_serv_path_set(optarg); + break; + case 'P': + /* + * Deal with atoi() returning 0 on failure, and ldpd + * not listening on ldpd port. + */ + if (strcmp(optarg, "0") == 0) { + vty_port = 0; + break; + } + vty_port = atoi(optarg); + if (vty_port <= 0 || vty_port > 0xffff) + vty_port = LDP_VTY_PORT; + break; + case 'u': + user = optarg; + break; + case 'g': + group = optarg; + break; + case 'v': + print_version(progname); + exit(0); + break; + case 'C': + dryrun = 1; + break; + case 'h': + usage(progname, 0); + break; + case 'L': + lflag = 1; + break; + case 'E': + eflag = 1; + break; + default: + usage(progname, 1); + break; + } + } + + argc -= optind; + argv += optind; + if (argc > 0 || (lflag && eflag)) + usage(progname, 1); + + /* check for root privileges */ + if (geteuid() != 0) { + errno = EPERM; + perror(progname); + exit(1); + } + + zlog_default = openzlog(progname, ZLOG_LDP, 0, + LOG_CONS | LOG_NDELAY | LOG_PID, LOG_DAEMON); + + if (lflag) + lde(user, group); + else if (eflag) + ldpe(user, group); + + master = thread_master_create(); + + cmd_init(1); + vty_config_lockless (); + vty_init(master); + vrf_init(); + ldp_vty_init(); + ldp_vty_if_init(); + + /* Get configuration file. */ + ldpd_conf = config_new_empty(); + ldp_config_reset_main(ldpd_conf, NULL); + vty_read_config(config_file, config_default); + + /* Start execution only if not in dry-run mode */ + if (dryrun) + exit(0); + + QOBJ_REG (ldpd_conf, ldpd_conf); + + if (daemon_mode && daemon(0, 0) < 0) { + log_warn("LDPd daemon failed"); + exit(1); + } + + if (socketpair(AF_UNIX, SOCK_STREAM, PF_UNSPEC, pipe_parent2ldpe) == -1) + fatal("socketpair"); + if (socketpair(AF_UNIX, SOCK_STREAM, PF_UNSPEC, pipe_parent2lde) == -1) + fatal("socketpair"); + sock_set_nonblock(pipe_parent2ldpe[0]); + sock_set_cloexec(pipe_parent2ldpe[0]); + sock_set_nonblock(pipe_parent2ldpe[1]); + sock_set_cloexec(pipe_parent2ldpe[1]); + sock_set_nonblock(pipe_parent2lde[0]); + sock_set_cloexec(pipe_parent2lde[0]); + sock_set_nonblock(pipe_parent2lde[1]); + sock_set_cloexec(pipe_parent2lde[1]); + + /* start children */ + lde_pid = start_child(PROC_LDE_ENGINE, saved_argv0, + pipe_parent2lde[1], user, group); + ldpe_pid = start_child(PROC_LDP_ENGINE, saved_argv0, + pipe_parent2ldpe[1], user, group); + + /* drop privileges */ + if (user) + ldpd_privs.user = user; + if (group) + ldpd_privs.group = group; + zprivs_init(&ldpd_privs); + + /* setup signal handler */ + signal_init(master, array_size(ldp_signals), ldp_signals); + + /* library inits */ + ldp_zebra_init(master); + + /* setup pipes to children */ + if ((iev_ldpe = malloc(sizeof(struct imsgev))) == NULL || + (iev_lde = malloc(sizeof(struct imsgev))) == NULL) + fatal(NULL); + imsg_init(&iev_ldpe->ibuf, pipe_parent2ldpe[0]); + iev_ldpe->handler_read = main_dispatch_ldpe; + iev_ldpe->ev_read = thread_add_read(master, iev_ldpe->handler_read, + iev_ldpe, iev_ldpe->ibuf.fd); + iev_ldpe->handler_write = ldp_write_handler; + iev_ldpe->ev_write = NULL; + + imsg_init(&iev_lde->ibuf, pipe_parent2lde[0]); + iev_lde->handler_read = main_dispatch_lde; + iev_lde->ev_read = thread_add_read(master, iev_lde->handler_read, + iev_lde, iev_lde->ibuf.fd); + iev_lde->handler_write = ldp_write_handler; + iev_lde->ev_write = NULL; + + if (main_imsg_send_ipc_sockets(&iev_ldpe->ibuf, &iev_lde->ibuf)) + fatal("could not establish imsg links"); + main_imsg_compose_both(IMSG_DEBUG_UPDATE, &ldp_debug, + sizeof(ldp_debug)); + main_imsg_send_config(ldpd_conf); + + if (ldpd_conf->ipv4.flags & F_LDPD_AF_ENABLED) + main_imsg_send_net_sockets(AF_INET); + if (ldpd_conf->ipv6.flags & F_LDPD_AF_ENABLED) + main_imsg_send_net_sockets(AF_INET6); + + /* Process id file create. */ + pid_output(pid_file); + + /* Create VTY socket */ + vty_serv_sock(vty_addr, vty_port, LDP_VTYSH_PATH); + + /* Print banner. */ + log_notice("LDPd %s starting: vty@%d", QUAGGA_VERSION, vty_port); + + /* Fetch next active thread. */ + while (thread_fetch(master, &thread)) + thread_call(&thread); + + /* NOTREACHED */ + return (0); +} + +static void +ldpd_shutdown(void) +{ + pid_t pid; + int status; + + /* close pipes */ + msgbuf_clear(&iev_ldpe->ibuf.w); + close(iev_ldpe->ibuf.fd); + msgbuf_clear(&iev_lde->ibuf.w); + close(iev_lde->ibuf.fd); + + config_clear(ldpd_conf); + + log_debug("waiting for children to terminate"); + do { + pid = wait(&status); + if (pid == -1) { + if (errno != EINTR && errno != ECHILD) + fatal("wait"); + } else if (WIFSIGNALED(status)) + log_warnx("%s terminated; signal %d", + (pid == lde_pid) ? "label decision engine" : + "ldp engine", WTERMSIG(status)); + } while (pid != -1 || (pid == -1 && errno == EINTR)); + + free(iev_ldpe); + free(iev_lde); + + log_info("terminating"); + exit(0); +} + +static pid_t +start_child(enum ldpd_process p, char *argv0, int fd, const char *user, + const char *group) +{ + char *argv[7]; + int argc = 0; + pid_t pid; + + switch (pid = fork()) { + case -1: + fatal("cannot fork"); + case 0: + break; + default: + close(fd); + return (pid); + } + + if (dup2(fd, 3) == -1) + fatal("cannot setup imsg fd"); + + argv[argc++] = argv0; + switch (p) { + case PROC_MAIN: + fatalx("Can not start main process"); + case PROC_LDE_ENGINE: + argv[argc++] = (char *)"-L"; + break; + case PROC_LDP_ENGINE: + argv[argc++] = (char *)"-E"; + break; + } + if (user) { + argv[argc++] = (char *)"-u"; + argv[argc++] = (char *)user; + } + if (group) { + argv[argc++] = (char *)"-g"; + argv[argc++] = (char *)group; + } + argv[argc++] = NULL; + + execvp(argv0, argv); + fatal("execvp"); +} + +/* imsg handling */ +/* ARGSUSED */ +static int +main_dispatch_ldpe(struct thread *thread) +{ + struct imsgev *iev = THREAD_ARG(thread); + struct imsgbuf *ibuf = &iev->ibuf; + struct imsg imsg; + int af; + ssize_t n; + int shut = 0; + + iev->ev_read = NULL; + + if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN) + fatal("imsg_read error"); + if (n == 0) /* connection closed */ + shut = 1; + + for (;;) { + if ((n = imsg_get(ibuf, &imsg)) == -1) + fatal("imsg_get"); + + if (n == 0) + break; + + switch (imsg.hdr.type) { + case IMSG_LOG: + logit(imsg.hdr.pid, "%s", (const char *)imsg.data); + break; + case IMSG_REQUEST_SOCKETS: + af = imsg.hdr.pid; + main_imsg_send_net_sockets(af); + break; + default: + log_debug("%s: error handling imsg %d", __func__, + imsg.hdr.type); + break; + } + imsg_free(&imsg); + } + if (!shut) + imsg_event_add(iev); + else { + /* this pipe is dead, so remove the event handlers and exit */ + THREAD_READ_OFF(iev->ev_read); + THREAD_WRITE_OFF(iev->ev_write); + ldpe_pid = 0; + if (lde_pid == 0) + ldpd_shutdown(); + else + kill(lde_pid, SIGTERM); + } + + return (0); +} + +/* ARGSUSED */ +static int +main_dispatch_lde(struct thread *thread) +{ + struct imsgev *iev = THREAD_ARG(thread); + struct imsgbuf *ibuf = &iev->ibuf; + struct imsg imsg; + ssize_t n; + int shut = 0; + + iev->ev_read = NULL; + + if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN) + fatal("imsg_read error"); + if (n == 0) /* connection closed */ + shut = 1; + + for (;;) { + if ((n = imsg_get(ibuf, &imsg)) == -1) + fatal("imsg_get"); + + if (n == 0) + break; + + switch (imsg.hdr.type) { + case IMSG_LOG: + logit(imsg.hdr.pid, "%s", (const char *)imsg.data); + break; + case IMSG_KLABEL_CHANGE: + if (imsg.hdr.len - IMSG_HEADER_SIZE != + sizeof(struct kroute)) + fatalx("invalid size of IMSG_KLABEL_CHANGE"); + if (kr_change(imsg.data)) + log_warnx("%s: error changing route", __func__); + break; + case IMSG_KLABEL_DELETE: + if (imsg.hdr.len - IMSG_HEADER_SIZE != + sizeof(struct kroute)) + fatalx("invalid size of IMSG_KLABEL_DELETE"); + if (kr_delete(imsg.data)) + log_warnx("%s: error deleting route", __func__); + break; + case IMSG_KPWLABEL_CHANGE: + if (imsg.hdr.len - IMSG_HEADER_SIZE != + sizeof(struct kpw)) + fatalx("invalid size of IMSG_KPWLABEL_CHANGE"); + if (kmpw_set(imsg.data)) + log_warnx("%s: error changing pseudowire", + __func__); + break; + case IMSG_KPWLABEL_DELETE: + if (imsg.hdr.len - IMSG_HEADER_SIZE != + sizeof(struct kpw)) + fatalx("invalid size of IMSG_KPWLABEL_DELETE"); + if (kmpw_unset(imsg.data)) + log_warnx("%s: error unsetting pseudowire", + __func__); + break; + default: + log_debug("%s: error handling imsg %d", __func__, + imsg.hdr.type); + break; + } + imsg_free(&imsg); + } + if (!shut) + imsg_event_add(iev); + else { + /* this pipe is dead, so remove the event handlers and exit */ + THREAD_READ_OFF(iev->ev_read); + THREAD_WRITE_OFF(iev->ev_write); + lde_pid = 0; + if (ldpe_pid == 0) + ldpd_shutdown(); + else + kill(ldpe_pid, SIGTERM); + } + + return (0); +} + +/* ARGSUSED */ +int +ldp_write_handler(struct thread *thread) +{ + struct imsgev *iev = THREAD_ARG(thread); + struct imsgbuf *ibuf = &iev->ibuf; + ssize_t n; + + iev->ev_write = NULL; + + if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN) + fatal("msgbuf_write"); + if (n == 0) { + /* this pipe is dead, so remove the event handlers */ + THREAD_READ_OFF(iev->ev_read); + THREAD_WRITE_OFF(iev->ev_write); + return (0); + } + + imsg_event_add(iev); + + return (0); +} + +void +main_imsg_compose_ldpe(int type, pid_t pid, void *data, uint16_t datalen) +{ + if (iev_ldpe == NULL) + return; + imsg_compose_event(iev_ldpe, type, 0, pid, -1, data, datalen); +} + +void +main_imsg_compose_lde(int type, pid_t pid, void *data, uint16_t datalen) +{ + imsg_compose_event(iev_lde, type, 0, pid, -1, data, datalen); +} + +int +main_imsg_compose_both(enum imsg_type type, void *buf, uint16_t len) +{ + if (iev_ldpe == NULL || iev_lde == NULL) + return (0); + if (imsg_compose_event(iev_ldpe, type, 0, 0, -1, buf, len) == -1) + return (-1); + if (imsg_compose_event(iev_lde, type, 0, 0, -1, buf, len) == -1) + return (-1); + return (0); +} + +void +imsg_event_add(struct imsgev *iev) +{ + THREAD_READ_ON(master, iev->ev_read, iev->handler_read, iev, + iev->ibuf.fd); + + if (iev->ibuf.w.queued) + THREAD_WRITE_ON(master, iev->ev_write, iev->handler_write, iev, + iev->ibuf.fd); +} + +int +imsg_compose_event(struct imsgev *iev, uint16_t type, uint32_t peerid, + pid_t pid, int fd, void *data, uint16_t datalen) +{ + int ret; + + if ((ret = imsg_compose(&iev->ibuf, type, peerid, + pid, fd, data, datalen)) != -1) + imsg_event_add(iev); + return (ret); +} + +void +evbuf_enqueue(struct evbuf *eb, struct ibuf *buf) +{ + ibuf_close(&eb->wbuf, buf); + evbuf_event_add(eb); +} + +void +evbuf_event_add(struct evbuf *eb) +{ + if (eb->wbuf.queued) + THREAD_WRITE_ON(master, eb->ev, eb->handler, eb->arg, + eb->wbuf.fd); +} + +void +evbuf_init(struct evbuf *eb, int fd, int (*handler)(struct thread *), + void *arg) +{ + msgbuf_init(&eb->wbuf); + eb->wbuf.fd = fd; + eb->handler = handler; + eb->arg = arg; +} + +void +evbuf_clear(struct evbuf *eb) +{ + THREAD_WRITE_OFF(eb->ev); + msgbuf_clear(&eb->wbuf); + eb->wbuf.fd = -1; +} + +static int +main_imsg_send_ipc_sockets(struct imsgbuf *ldpe_buf, struct imsgbuf *lde_buf) +{ + int pipe_ldpe2lde[2]; + + if (socketpair(AF_UNIX, SOCK_STREAM, PF_UNSPEC, pipe_ldpe2lde) == -1) + return (-1); + sock_set_nonblock(pipe_ldpe2lde[0]); + sock_set_nonblock(pipe_ldpe2lde[1]); + + if (imsg_compose(ldpe_buf, IMSG_SOCKET_IPC, 0, 0, pipe_ldpe2lde[0], + NULL, 0) == -1) + return (-1); + if (imsg_compose(lde_buf, IMSG_SOCKET_IPC, 0, 0, pipe_ldpe2lde[1], + NULL, 0) == -1) + return (-1); + + return (0); +} + +static void +main_imsg_send_net_sockets(int af) +{ + if (!ldp_addrisset(af, &(ldp_af_conf_get(ldpd_conf, af))->trans_addr)) + return; + + main_imsg_send_net_socket(af, LDP_SOCKET_DISC); + main_imsg_send_net_socket(af, LDP_SOCKET_EDISC); + main_imsg_send_net_socket(af, LDP_SOCKET_SESSION); + imsg_compose_event(iev_ldpe, IMSG_SETUP_SOCKETS, af, 0, -1, NULL, 0); +} + +static void +main_imsg_send_net_socket(int af, enum socket_type type) +{ + int fd; + + fd = ldp_create_socket(af, type); + if (fd == -1) { + log_warnx("%s: failed to create %s socket for address-family " + "%s", __func__, socket_name(type), af_name(af)); + return; + } + + imsg_compose_event(iev_ldpe, IMSG_SOCKET_NET, af, 0, fd, &type, + sizeof(type)); +} + +struct ldpd_af_conf * +ldp_af_conf_get(struct ldpd_conf *xconf, int af) +{ + switch (af) { + case AF_INET: + return (&xconf->ipv4); + case AF_INET6: + return (&xconf->ipv6); + default: + fatalx("ldp_af_conf_get: unknown af"); + } +} + +struct ldpd_af_global * +ldp_af_global_get(struct ldpd_global *xglobal, int af) +{ + switch (af) { + case AF_INET: + return (&xglobal->ipv4); + case AF_INET6: + return (&xglobal->ipv6); + default: + fatalx("ldp_af_global_get: unknown af"); + } +} + +int +ldp_is_dual_stack(struct ldpd_conf *xconf) +{ + return ((xconf->ipv4.flags & F_LDPD_AF_ENABLED) && + (xconf->ipv6.flags & F_LDPD_AF_ENABLED)); +} + +in_addr_t +ldp_rtr_id_get(struct ldpd_conf *xconf) +{ + if (xconf->rtr_id.s_addr != INADDR_ANY) + return (xconf->rtr_id.s_addr); + else + return (global.rtr_id.s_addr); +} + +static int +main_imsg_send_config(struct ldpd_conf *xconf) +{ + struct iface *iface; + struct tnbr *tnbr; + struct nbr_params *nbrp; + struct l2vpn *l2vpn; + struct l2vpn_if *lif; + struct l2vpn_pw *pw; + + if (main_imsg_compose_both(IMSG_RECONF_CONF, xconf, + sizeof(*xconf)) == -1) + return (-1); + + LIST_FOREACH(iface, &xconf->iface_list, entry) { + if (main_imsg_compose_both(IMSG_RECONF_IFACE, iface, + sizeof(*iface)) == -1) + return (-1); + } + + LIST_FOREACH(tnbr, &xconf->tnbr_list, entry) { + if (main_imsg_compose_both(IMSG_RECONF_TNBR, tnbr, + sizeof(*tnbr)) == -1) + return (-1); + } + + LIST_FOREACH(nbrp, &xconf->nbrp_list, entry) { + if (main_imsg_compose_both(IMSG_RECONF_NBRP, nbrp, + sizeof(*nbrp)) == -1) + return (-1); + } + + LIST_FOREACH(l2vpn, &xconf->l2vpn_list, entry) { + if (main_imsg_compose_both(IMSG_RECONF_L2VPN, l2vpn, + sizeof(*l2vpn)) == -1) + return (-1); + + LIST_FOREACH(lif, &l2vpn->if_list, entry) { + if (main_imsg_compose_both(IMSG_RECONF_L2VPN_IF, lif, + sizeof(*lif)) == -1) + return (-1); + } + LIST_FOREACH(pw, &l2vpn->pw_list, entry) { + if (main_imsg_compose_both(IMSG_RECONF_L2VPN_PW, pw, + sizeof(*pw)) == -1) + return (-1); + } + LIST_FOREACH(pw, &l2vpn->pw_inactive_list, entry) { + if (main_imsg_compose_both(IMSG_RECONF_L2VPN_IPW, pw, + sizeof(*pw)) == -1) + return (-1); + } + } + + if (main_imsg_compose_both(IMSG_RECONF_END, NULL, 0) == -1) + return (-1); + + return (0); +} + +int +ldp_reload_ref(struct ldpd_conf *xconf, void **ref) +{ + ldp_config_normalize(xconf, ref); + + if (main_imsg_send_config(xconf) == -1) + return (-1); + + merge_config_ref(ldpd_conf, xconf, ref); + + return (0); +} + +int +ldp_reload(struct ldpd_conf *xconf) +{ + return ldp_reload_ref(xconf, NULL); +} + +static void +ldp_config_normalize(struct ldpd_conf *xconf, void **ref) +{ + struct l2vpn *l2vpn; + struct l2vpn_pw *pw; + + if (!(xconf->flags & F_LDPD_ENABLED)) + ldp_config_reset_main(xconf, ref); + else { + if (!(xconf->ipv4.flags & F_LDPD_AF_ENABLED)) + ldp_config_reset_af(xconf, AF_INET, ref); + if (!(xconf->ipv6.flags & F_LDPD_AF_ENABLED)) + ldp_config_reset_af(xconf, AF_INET6, ref); + } + + LIST_FOREACH(l2vpn, &xconf->l2vpn_list, entry) { + LIST_FOREACH(pw, &l2vpn->pw_list, entry) { + if (pw->flags & F_PW_STATIC_NBR_ADDR) + continue; + + pw->af = AF_INET; + pw->addr.v4 = pw->lsr_id; + } + LIST_FOREACH(pw, &l2vpn->pw_inactive_list, entry) { + if (pw->flags & F_PW_STATIC_NBR_ADDR) + continue; + + pw->af = AF_INET; + pw->addr.v4 = pw->lsr_id; + } + } +} + +static void +ldp_config_reset_main(struct ldpd_conf *conf, void **ref) +{ + struct iface *iface; + struct nbr_params *nbrp; + + while ((iface = LIST_FIRST(&conf->iface_list)) != NULL) { + if (ref && *ref == iface) + *ref = NULL; + LIST_REMOVE(iface, entry); + free(iface); + } + + while ((nbrp = LIST_FIRST(&conf->nbrp_list)) != NULL) { + if (ref && *ref == nbrp) + *ref = NULL; + LIST_REMOVE(nbrp, entry); + free(nbrp); + } + + conf->rtr_id.s_addr = INADDR_ANY; + ldp_config_reset_af(conf, AF_INET, ref); + ldp_config_reset_af(conf, AF_INET6, ref); + conf->lhello_holdtime = LINK_DFLT_HOLDTIME; + conf->lhello_interval = DEFAULT_HELLO_INTERVAL; + conf->thello_holdtime = TARGETED_DFLT_HOLDTIME; + conf->thello_interval = DEFAULT_HELLO_INTERVAL; + conf->trans_pref = DUAL_STACK_LDPOV6; + conf->flags = 0; +} + +static void +ldp_config_reset_af(struct ldpd_conf *conf, int af, void **ref) +{ + struct ldpd_af_conf *af_conf; + struct iface *iface; + struct iface_af *ia; + struct tnbr *tnbr, *ttmp; + + LIST_FOREACH(iface, &conf->iface_list, entry) { + ia = iface_af_get(iface, af); + ia->enabled = 0; + } + + LIST_FOREACH_SAFE(tnbr, &conf->tnbr_list, entry, ttmp) { + if (tnbr->af != af) + continue; + + if (ref && *ref == tnbr) + *ref = NULL; + LIST_REMOVE(tnbr, entry); + free(tnbr); + } + + af_conf = ldp_af_conf_get(conf, af); + af_conf->keepalive = 180; + af_conf->lhello_holdtime = 0; + af_conf->lhello_interval = 0; + af_conf->thello_holdtime = 0; + af_conf->thello_interval = 0; + memset(&af_conf->trans_addr, 0, sizeof(af_conf->trans_addr)); + af_conf->flags = 0; +} + +struct ldpd_conf * +ldp_dup_config_ref(struct ldpd_conf *conf, void **ref) +{ + struct ldpd_conf *xconf; + struct iface *iface, *xi; + struct tnbr *tnbr, *xt; + struct nbr_params *nbrp, *xn; + struct l2vpn *l2vpn, *xl; + struct l2vpn_if *lif, *xf; + struct l2vpn_pw *pw, *xp; + +#define COPY(a, b) do { \ + a = calloc(1, sizeof(*a)); \ + if (a == NULL) \ + fatal(__func__); \ + *a = *b; \ + if (ref && *ref == b) *ref = a; \ + } while (0) + + COPY(xconf, conf); + LIST_INIT(&xconf->iface_list); + LIST_INIT(&xconf->tnbr_list); + LIST_INIT(&xconf->nbrp_list); + LIST_INIT(&xconf->l2vpn_list); + + LIST_FOREACH(iface, &conf->iface_list, entry) { + COPY(xi, iface); + xi->ipv4.iface = xi; + xi->ipv6.iface = xi; + LIST_INSERT_HEAD(&xconf->iface_list, xi, entry); + } + LIST_FOREACH(tnbr, &conf->tnbr_list, entry) { + COPY(xt, tnbr); + LIST_INSERT_HEAD(&xconf->tnbr_list, xt, entry); + } + LIST_FOREACH(nbrp, &conf->nbrp_list, entry) { + COPY(xn, nbrp); + LIST_INSERT_HEAD(&xconf->nbrp_list, xn, entry); + } + LIST_FOREACH(l2vpn, &conf->l2vpn_list, entry) { + COPY(xl, l2vpn); + LIST_INIT(&xl->if_list); + LIST_INIT(&xl->pw_list); + LIST_INIT(&xl->pw_inactive_list); + LIST_INSERT_HEAD(&xconf->l2vpn_list, xl, entry); + + LIST_FOREACH(lif, &l2vpn->if_list, entry) { + COPY(xf, lif); + xf->l2vpn = xl; + LIST_INSERT_HEAD(&xl->if_list, xf, entry); + } + LIST_FOREACH(pw, &l2vpn->pw_list, entry) { + COPY(xp, pw); + xp->l2vpn = xl; + LIST_INSERT_HEAD(&xl->pw_list, xp, entry); + } + LIST_FOREACH(pw, &l2vpn->pw_inactive_list, entry) { + COPY(xp, pw); + xp->l2vpn = xl; + LIST_INSERT_HEAD(&xl->pw_inactive_list, xp, entry); + } + } +#undef COPY + + return (xconf); +} + +struct ldpd_conf * +ldp_dup_config(struct ldpd_conf *conf) +{ + return ldp_dup_config_ref(conf, NULL); +} + +void +ldp_clear_config(struct ldpd_conf *xconf) +{ + struct iface *iface; + struct tnbr *tnbr; + struct nbr_params *nbrp; + struct l2vpn *l2vpn; + + while ((iface = LIST_FIRST(&xconf->iface_list)) != NULL) { + LIST_REMOVE(iface, entry); + free(iface); + } + while ((tnbr = LIST_FIRST(&xconf->tnbr_list)) != NULL) { + LIST_REMOVE(tnbr, entry); + free(tnbr); + } + while ((nbrp = LIST_FIRST(&xconf->nbrp_list)) != NULL) { + LIST_REMOVE(nbrp, entry); + free(nbrp); + } + while ((l2vpn = LIST_FIRST(&xconf->l2vpn_list)) != NULL) { + LIST_REMOVE(l2vpn, entry); + l2vpn_del(l2vpn); + } + + free(xconf); +} + +static void +merge_config_ref(struct ldpd_conf *conf, struct ldpd_conf *xconf, void **ref) +{ + merge_global(conf, xconf); + merge_af(AF_INET, &conf->ipv4, &xconf->ipv4); + merge_af(AF_INET6, &conf->ipv6, &xconf->ipv6); + merge_ifaces(conf, xconf, ref); + merge_tnbrs(conf, xconf, ref); + merge_nbrps(conf, xconf, ref); + merge_l2vpns(conf, xconf, ref); + if (ref && *ref == xconf) + *ref = conf; + free(xconf); +} + +void +merge_config(struct ldpd_conf *conf, struct ldpd_conf *xconf) +{ + merge_config_ref(conf, xconf, NULL); +} + +static void +merge_global(struct ldpd_conf *conf, struct ldpd_conf *xconf) +{ + /* change of router-id requires resetting all neighborships */ + if (conf->rtr_id.s_addr != xconf->rtr_id.s_addr) { + if (ldpd_process == PROC_LDP_ENGINE) { + ldpe_reset_nbrs(AF_INET); + ldpe_reset_nbrs(AF_INET6); + if (conf->rtr_id.s_addr == INADDR_ANY || + xconf->rtr_id.s_addr == INADDR_ANY) { + if_update_all(AF_UNSPEC); + tnbr_update_all(AF_UNSPEC); + } + } + conf->rtr_id = xconf->rtr_id; + } + + conf->lhello_holdtime = xconf->lhello_holdtime; + conf->lhello_interval = xconf->lhello_interval; + conf->thello_holdtime = xconf->thello_holdtime; + conf->thello_interval = xconf->thello_interval; + + if (conf->trans_pref != xconf->trans_pref) { + if (ldpd_process == PROC_LDP_ENGINE) + ldpe_reset_ds_nbrs(); + conf->trans_pref = xconf->trans_pref; + } + + if ((conf->flags & F_LDPD_DS_CISCO_INTEROP) != + (xconf->flags & F_LDPD_DS_CISCO_INTEROP)) { + if (ldpd_process == PROC_LDP_ENGINE) + ldpe_reset_ds_nbrs(); + } + + conf->flags = xconf->flags; +} + +static void +merge_af(int af, struct ldpd_af_conf *af_conf, struct ldpd_af_conf *xa) +{ + int egress_label_changed = 0; + int update_sockets = 0; + + if (af_conf->keepalive != xa->keepalive) { + af_conf->keepalive = xa->keepalive; + if (ldpd_process == PROC_LDP_ENGINE) + ldpe_stop_init_backoff(af); + } + + af_conf->lhello_holdtime = xa->lhello_holdtime; + af_conf->lhello_interval = xa->lhello_interval; + af_conf->thello_holdtime = xa->thello_holdtime; + af_conf->thello_interval = xa->thello_interval; + + /* update flags */ + if (ldpd_process == PROC_LDP_ENGINE && + (af_conf->flags & F_LDPD_AF_THELLO_ACCEPT) && + !(xa->flags & F_LDPD_AF_THELLO_ACCEPT)) + ldpe_remove_dynamic_tnbrs(af); + + if ((af_conf->flags & F_LDPD_AF_NO_GTSM) != + (xa->flags & F_LDPD_AF_NO_GTSM)) { + if (af == AF_INET6) + /* need to set/unset IPV6_MINHOPCOUNT */ + update_sockets = 1; + else if (ldpd_process == PROC_LDP_ENGINE) + /* for LDPv4 just resetting the neighbors is enough */ + ldpe_reset_nbrs(af); + } + + if ((af_conf->flags & F_LDPD_AF_EXPNULL) != + (xa->flags & F_LDPD_AF_EXPNULL)) + egress_label_changed = 1; + + af_conf->flags = xa->flags; + + if (egress_label_changed) { + switch (ldpd_process) { + case PROC_LDE_ENGINE: + lde_change_egress_label(af, af_conf->flags & + F_LDPD_AF_EXPNULL); + break; + default: + break; + } + } + + if (ldp_addrcmp(af, &af_conf->trans_addr, &xa->trans_addr)) { + af_conf->trans_addr = xa->trans_addr; + update_sockets = 1; + } + + if (ldpd_process == PROC_MAIN && iev_ldpe && update_sockets) + imsg_compose_event(iev_ldpe, IMSG_CLOSE_SOCKETS, af, 0, -1, + NULL, 0); +} + +static void +merge_ifaces(struct ldpd_conf *conf, struct ldpd_conf *xconf, void **ref) +{ + struct iface *iface, *itmp, *xi; + + LIST_FOREACH_SAFE(iface, &conf->iface_list, entry, itmp) { + /* find deleted interfaces */ + if ((xi = if_lookup_name(xconf, iface->name)) == NULL) { + LIST_REMOVE(iface, entry); + + switch (ldpd_process) { + case PROC_LDE_ENGINE: + break; + case PROC_LDP_ENGINE: + if_exit(iface); + break; + case PROC_MAIN: + QOBJ_UNREG (iface); + break; + } + free(iface); + } + } + LIST_FOREACH_SAFE(xi, &xconf->iface_list, entry, itmp) { + /* find new interfaces */ + if ((iface = if_lookup_name(conf, xi->name)) == NULL) { + LIST_REMOVE(xi, entry); + LIST_INSERT_HEAD(&conf->iface_list, xi, entry); + + if (ldpd_process == PROC_MAIN) { + QOBJ_REG (xi, iface); + /* resend addresses to activate new interfaces */ + kif_redistribute(xi->name); + } + continue; + } + + /* update existing interfaces */ + merge_iface_af(&iface->ipv4, &xi->ipv4); + merge_iface_af(&iface->ipv6, &xi->ipv6); + LIST_REMOVE(xi, entry); + if (ref && *ref == xi) + *ref = iface; + free(xi); + } +} + +static void +merge_iface_af(struct iface_af *ia, struct iface_af *xi) +{ + if (ia->enabled != xi->enabled) { + ia->enabled = xi->enabled; + if (ldpd_process == PROC_LDP_ENGINE) + if_update(ia->iface, ia->af); + } + ia->hello_holdtime = xi->hello_holdtime; + ia->hello_interval = xi->hello_interval; +} + +static void +merge_tnbrs(struct ldpd_conf *conf, struct ldpd_conf *xconf, void **ref) +{ + struct tnbr *tnbr, *ttmp, *xt; + + LIST_FOREACH_SAFE(tnbr, &conf->tnbr_list, entry, ttmp) { + if (!(tnbr->flags & F_TNBR_CONFIGURED)) + continue; + + /* find deleted tnbrs */ + if ((xt = tnbr_find(xconf, tnbr->af, &tnbr->addr)) == NULL) { + switch (ldpd_process) { + case PROC_LDE_ENGINE: + LIST_REMOVE(tnbr, entry); + free(tnbr); + break; + case PROC_LDP_ENGINE: + tnbr->flags &= ~F_TNBR_CONFIGURED; + tnbr_check(tnbr); + break; + case PROC_MAIN: + LIST_REMOVE(tnbr, entry); + QOBJ_UNREG (tnbr); + free(tnbr); + break; + } + } + } + LIST_FOREACH_SAFE(xt, &xconf->tnbr_list, entry, ttmp) { + /* find new tnbrs */ + if ((tnbr = tnbr_find(conf, xt->af, &xt->addr)) == NULL) { + LIST_REMOVE(xt, entry); + LIST_INSERT_HEAD(&conf->tnbr_list, xt, entry); + + switch (ldpd_process) { + case PROC_LDE_ENGINE: + break; + case PROC_LDP_ENGINE: + tnbr_update(xt); + break; + case PROC_MAIN: + QOBJ_REG (xt, tnbr); + break; + } + continue; + } + + /* update existing tnbrs */ + if (!(tnbr->flags & F_TNBR_CONFIGURED)) + tnbr->flags |= F_TNBR_CONFIGURED; + LIST_REMOVE(xt, entry); + if (ref && *ref == xt) + *ref = tnbr; + free(xt); + } +} + +static void +merge_nbrps(struct ldpd_conf *conf, struct ldpd_conf *xconf, void **ref) +{ + struct nbr_params *nbrp, *ntmp, *xn; + struct nbr *nbr; + int nbrp_changed; + + LIST_FOREACH_SAFE(nbrp, &conf->nbrp_list, entry, ntmp) { + /* find deleted nbrps */ + if ((xn = nbr_params_find(xconf, nbrp->lsr_id)) == NULL) { + switch (ldpd_process) { + case PROC_LDE_ENGINE: + break; + case PROC_LDP_ENGINE: + nbr = nbr_find_ldpid(nbrp->lsr_id.s_addr); + if (nbr) { + session_shutdown(nbr, S_SHUTDOWN, 0, 0); +#ifdef __OpenBSD__ + pfkey_remove(nbr); +#else + sock_set_md5sig( + (ldp_af_global_get(&global, + nbr->af))->ldp_session_socket, + nbr->af, &nbr->raddr, NULL); +#endif + if (nbr_session_active_role(nbr)) + nbr_establish_connection(nbr); + } + break; + case PROC_MAIN: + QOBJ_UNREG (nbrp); + break; + } + LIST_REMOVE(nbrp, entry); + free(nbrp); + } + } + LIST_FOREACH_SAFE(xn, &xconf->nbrp_list, entry, ntmp) { + /* find new nbrps */ + if ((nbrp = nbr_params_find(conf, xn->lsr_id)) == NULL) { + LIST_REMOVE(xn, entry); + LIST_INSERT_HEAD(&conf->nbrp_list, xn, entry); + + switch (ldpd_process) { + case PROC_LDE_ENGINE: + break; + case PROC_LDP_ENGINE: + nbr = nbr_find_ldpid(xn->lsr_id.s_addr); + if (nbr) { + session_shutdown(nbr, S_SHUTDOWN, 0, 0); +#ifdef __OpenBSD__ + if (pfkey_establish(nbr, xn) == -1) + fatalx("pfkey setup failed"); +#else + sock_set_md5sig( + (ldp_af_global_get(&global, + nbr->af))->ldp_session_socket, + nbr->af, &nbr->raddr, + xn->auth.md5key); +#endif + if (nbr_session_active_role(nbr)) + nbr_establish_connection(nbr); + } + break; + case PROC_MAIN: + QOBJ_REG (xn, nbr_params); + break; + } + continue; + } + + /* update existing nbrps */ + if (nbrp->flags != xn->flags || + nbrp->keepalive != xn->keepalive || + nbrp->gtsm_enabled != xn->gtsm_enabled || + nbrp->gtsm_hops != xn->gtsm_hops || + nbrp->auth.method != xn->auth.method || + strcmp(nbrp->auth.md5key, xn->auth.md5key) != 0) + nbrp_changed = 1; + else + nbrp_changed = 0; + + nbrp->keepalive = xn->keepalive; + nbrp->gtsm_enabled = xn->gtsm_enabled; + nbrp->gtsm_hops = xn->gtsm_hops; + nbrp->auth.method = xn->auth.method; + strlcpy(nbrp->auth.md5key, xn->auth.md5key, + sizeof(nbrp->auth.md5key)); + nbrp->auth.md5key_len = xn->auth.md5key_len; + nbrp->flags = xn->flags; + + if (ldpd_process == PROC_LDP_ENGINE) { + nbr = nbr_find_ldpid(nbrp->lsr_id.s_addr); + if (nbr && nbrp_changed) { + session_shutdown(nbr, S_SHUTDOWN, 0, 0); +#ifdef __OpenBSD__ + pfkey_remove(nbr); + if (pfkey_establish(nbr, nbrp) == -1) + fatalx("pfkey setup failed"); +#else + sock_set_md5sig((ldp_af_global_get(&global, + nbr->af))->ldp_session_socket, nbr->af, + &nbr->raddr, nbrp->auth.md5key); +#endif + if (nbr_session_active_role(nbr)) + nbr_establish_connection(nbr); + } + } + LIST_REMOVE(xn, entry); + if (ref && *ref == xn) + *ref = nbrp; + free(xn); + } +} + +static void +merge_l2vpns(struct ldpd_conf *conf, struct ldpd_conf *xconf, void **ref) +{ + struct l2vpn *l2vpn, *ltmp, *xl; + struct l2vpn_if *lif; + struct l2vpn_pw *pw; + + LIST_FOREACH_SAFE(l2vpn, &conf->l2vpn_list, entry, ltmp) { + /* find deleted l2vpns */ + if ((xl = l2vpn_find(xconf, l2vpn->name)) == NULL) { + LIST_REMOVE(l2vpn, entry); + + switch (ldpd_process) { + case PROC_LDE_ENGINE: + l2vpn_exit(l2vpn); + break; + case PROC_LDP_ENGINE: + ldpe_l2vpn_exit(l2vpn); + break; + case PROC_MAIN: + LIST_FOREACH(lif, &l2vpn->if_list, entry) + QOBJ_UNREG (lif); + LIST_FOREACH(pw, &l2vpn->pw_list, entry) + QOBJ_UNREG (pw); + LIST_FOREACH(pw, &l2vpn->pw_inactive_list, entry) + QOBJ_UNREG (pw); + QOBJ_UNREG (l2vpn); + break; + } + l2vpn_del(l2vpn); + } + } + LIST_FOREACH_SAFE(xl, &xconf->l2vpn_list, entry, ltmp) { + /* find new l2vpns */ + if ((l2vpn = l2vpn_find(conf, xl->name)) == NULL) { + LIST_REMOVE(xl, entry); + LIST_INSERT_HEAD(&conf->l2vpn_list, xl, entry); + + switch (ldpd_process) { + case PROC_LDE_ENGINE: + l2vpn_init(xl); + break; + case PROC_LDP_ENGINE: + ldpe_l2vpn_init(xl); + break; + case PROC_MAIN: + QOBJ_REG (xl, l2vpn); + break; + } + continue; + } + + /* update existing l2vpns */ + merge_l2vpn(conf, l2vpn, xl, ref); + LIST_REMOVE(xl, entry); + if (ref && *ref == xl) + *ref = l2vpn; + free(xl); + } +} + +static void +merge_l2vpn(struct ldpd_conf *xconf, struct l2vpn *l2vpn, struct l2vpn *xl, void **ref) +{ + struct l2vpn_if *lif, *ftmp, *xf; + struct l2vpn_pw *pw, *ptmp, *xp; + struct nbr *nbr; + int reset_nbr, reinstall_pwfec, reinstall_tnbr; + LIST_HEAD(, l2vpn_pw) pw_aux_list; + int previous_pw_type, previous_mtu; + + previous_pw_type = l2vpn->pw_type; + previous_mtu = l2vpn->mtu; + + /* merge intefaces */ + LIST_FOREACH_SAFE(lif, &l2vpn->if_list, entry, ftmp) { + /* find deleted interfaces */ + if ((xf = l2vpn_if_find_name(xl, lif->ifname)) == NULL) { + if (ldpd_process == PROC_MAIN) + QOBJ_UNREG (lif); + LIST_REMOVE(lif, entry); + free(lif); + } + } + LIST_FOREACH_SAFE(xf, &xl->if_list, entry, ftmp) { + /* find new interfaces */ + if ((lif = l2vpn_if_find_name(l2vpn, xf->ifname)) == NULL) { + LIST_REMOVE(xf, entry); + LIST_INSERT_HEAD(&l2vpn->if_list, xf, entry); + xf->l2vpn = l2vpn; + if (ldpd_process == PROC_MAIN) + QOBJ_REG (xf, l2vpn_if); + continue; + } + + LIST_REMOVE(xf, entry); + if (ref && *ref == xf) + *ref = lif; + free(xf); + } + + /* merge active pseudowires */ + LIST_INIT(&pw_aux_list); + LIST_FOREACH_SAFE(pw, &l2vpn->pw_list, entry, ptmp) { + /* find deleted active pseudowires */ + if ((xp = l2vpn_pw_find_name(xl, pw->ifname)) == NULL) { + switch (ldpd_process) { + case PROC_LDE_ENGINE: + l2vpn_pw_exit(pw); + break; + case PROC_LDP_ENGINE: + ldpe_l2vpn_pw_exit(pw); + break; + case PROC_MAIN: + QOBJ_UNREG (pw); + break; + } + + LIST_REMOVE(pw, entry); + free(pw); + } + } + LIST_FOREACH_SAFE(xp, &xl->pw_list, entry, ptmp) { + /* find new active pseudowires */ + if ((pw = l2vpn_pw_find_name(l2vpn, xp->ifname)) == NULL) { + LIST_REMOVE(xp, entry); + LIST_INSERT_HEAD(&l2vpn->pw_list, xp, entry); + xp->l2vpn = l2vpn; + + switch (ldpd_process) { + case PROC_LDE_ENGINE: + l2vpn_pw_init(xp); + break; + case PROC_LDP_ENGINE: + ldpe_l2vpn_pw_init(xp); + break; + case PROC_MAIN: + QOBJ_REG (xp, l2vpn_pw); + break; + } + continue; + } + + /* update existing active pseudowire */ + if (pw->af != xp->af || + ldp_addrcmp(pw->af, &pw->addr, &xp->addr)) + reinstall_tnbr = 1; + else + reinstall_tnbr = 0; + + /* changes that require a session restart */ + if ((pw->flags & (F_PW_STATUSTLV_CONF|F_PW_CWORD_CONF)) != + (xp->flags & (F_PW_STATUSTLV_CONF|F_PW_CWORD_CONF))) + reset_nbr = 1; + else + reset_nbr = 0; + + if (l2vpn->pw_type != xl->pw_type || l2vpn->mtu != xl->mtu || + pw->pwid != xp->pwid || reinstall_tnbr || reset_nbr || + pw->lsr_id.s_addr != xp->lsr_id.s_addr) + reinstall_pwfec = 1; + else + reinstall_pwfec = 0; + + /* check if the pseudowire should be disabled */ + if (xp->lsr_id.s_addr == INADDR_ANY || xp->pwid == 0) { + reinstall_tnbr = 0; + reset_nbr = 0; + reinstall_pwfec = 0; + + switch (ldpd_process) { + case PROC_LDE_ENGINE: + l2vpn_pw_exit(pw); + break; + case PROC_LDP_ENGINE: + ldpe_l2vpn_pw_exit(pw); + break; + case PROC_MAIN: + break; + } + + /* remove from active list */ + LIST_REMOVE(pw, entry); + LIST_INSERT_HEAD(&pw_aux_list, pw, entry); + } + + if (ldpd_process == PROC_LDP_ENGINE) { + if (reinstall_tnbr) + ldpe_l2vpn_pw_exit(pw); + if (reset_nbr) { + nbr = nbr_find_ldpid(pw->lsr_id.s_addr); + if (nbr && nbr->state == NBR_STA_OPER) + session_shutdown(nbr, S_SHUTDOWN, 0, 0); + } + } + if (ldpd_process == PROC_LDE_ENGINE && + !reset_nbr && reinstall_pwfec) + l2vpn_pw_exit(pw); + pw->lsr_id = xp->lsr_id; + pw->af = xp->af; + pw->addr = xp->addr; + pw->pwid = xp->pwid; + strlcpy(pw->ifname, xp->ifname, sizeof(pw->ifname)); + pw->ifindex = xp->ifindex; + if (xp->flags & F_PW_CWORD_CONF) + pw->flags |= F_PW_CWORD_CONF; + else + pw->flags &= ~F_PW_CWORD_CONF; + if (xp->flags & F_PW_STATUSTLV_CONF) + pw->flags |= F_PW_STATUSTLV_CONF; + else + pw->flags &= ~F_PW_STATUSTLV_CONF; + if (xp->flags & F_PW_STATIC_NBR_ADDR) + pw->flags |= F_PW_STATIC_NBR_ADDR; + else + pw->flags &= ~F_PW_STATIC_NBR_ADDR; + if (ldpd_process == PROC_LDP_ENGINE && reinstall_tnbr) + ldpe_l2vpn_pw_init(pw); + if (ldpd_process == PROC_LDE_ENGINE && + !reset_nbr && reinstall_pwfec) { + l2vpn->pw_type = xl->pw_type; + l2vpn->mtu = xl->mtu; + l2vpn_pw_init(pw); + l2vpn->pw_type = previous_pw_type; + l2vpn->mtu = previous_mtu; + } + + LIST_REMOVE(xp, entry); + if (ref && *ref == xp) + *ref = pw; + free(xp); + } + + /* merge inactive pseudowires */ + LIST_FOREACH_SAFE(pw, &l2vpn->pw_inactive_list, entry, ptmp) { + /* find deleted inactive pseudowires */ + if ((xp = l2vpn_pw_find_name(xl, pw->ifname)) == NULL) { + LIST_REMOVE(pw, entry); + if (ldpd_process == PROC_MAIN) + QOBJ_UNREG (pw); + free(pw); + } + } + LIST_FOREACH_SAFE(xp, &xl->pw_inactive_list, entry, ptmp) { + /* find new inactive pseudowires */ + if ((pw = l2vpn_pw_find_name(l2vpn, xp->ifname)) == NULL) { + LIST_REMOVE(xp, entry); + LIST_INSERT_HEAD(&l2vpn->pw_inactive_list, xp, entry); + xp->l2vpn = l2vpn; + if (ldpd_process == PROC_MAIN) + QOBJ_REG (xp, l2vpn_pw); + continue; + } + + /* update existing inactive pseudowire */ + pw->lsr_id.s_addr = xp->lsr_id.s_addr; + pw->af = xp->af; + pw->addr = xp->addr; + pw->pwid = xp->pwid; + strlcpy(pw->ifname, xp->ifname, sizeof(pw->ifname)); + pw->ifindex = xp->ifindex; + pw->flags = xp->flags; + + /* check if the pseudowire should be activated */ + if (pw->lsr_id.s_addr != INADDR_ANY && pw->pwid != 0) { + /* remove from inactive list */ + LIST_REMOVE(pw, entry); + LIST_INSERT_HEAD(&l2vpn->pw_list, pw, entry); + + switch (ldpd_process) { + case PROC_LDE_ENGINE: + l2vpn_pw_init(pw); + break; + case PROC_LDP_ENGINE: + ldpe_l2vpn_pw_init(pw); + break; + case PROC_MAIN: + break; + } + } + + LIST_REMOVE(xp, entry); + if (ref && *ref == xp) + *ref = pw; + free(xp); + } + + /* insert pseudowires that were disabled in the inactive list */ + LIST_FOREACH_SAFE(pw, &pw_aux_list, entry, ptmp) { + LIST_REMOVE(pw, entry); + LIST_INSERT_HEAD(&l2vpn->pw_inactive_list, pw, entry); + } + + l2vpn->pw_type = xl->pw_type; + l2vpn->mtu = xl->mtu; + strlcpy(l2vpn->br_ifname, xl->br_ifname, sizeof(l2vpn->br_ifname)); + l2vpn->br_ifindex = xl->br_ifindex; +} + +struct ldpd_conf * +config_new_empty(void) +{ + struct ldpd_conf *xconf; + + xconf = calloc(1, sizeof(*xconf)); + if (xconf == NULL) + fatal(NULL); + + LIST_INIT(&xconf->iface_list); + LIST_INIT(&xconf->tnbr_list); + LIST_INIT(&xconf->nbrp_list); + LIST_INIT(&xconf->l2vpn_list); + + return (xconf); +} + +void +config_clear(struct ldpd_conf *conf) +{ + struct ldpd_conf *xconf; + + /* + * Merge current config with an empty config, this will deactivate + * and deallocate all the interfaces, pseudowires and so on. Before + * merging, copy the router-id and other variables to avoid some + * unnecessary operations, like trying to reset the neighborships. + */ + xconf = config_new_empty(); + xconf->ipv4 = conf->ipv4; + xconf->ipv6 = conf->ipv6; + xconf->rtr_id = conf->rtr_id; + xconf->trans_pref = conf->trans_pref; + xconf->flags = conf->flags; + merge_config(conf, xconf); + if (ldpd_process == PROC_MAIN) + QOBJ_UNREG (conf); + free(conf); +} diff --git a/ldpd/ldpd.conf.sample b/ldpd/ldpd.conf.sample new file mode 100644 index 0000000000..49da35c284 --- /dev/null +++ b/ldpd/ldpd.conf.sample @@ -0,0 +1,46 @@ +! -*- ldp -*- +! +! LDPd sample configuration file +! +hostname ldpd +password zebra +log stdout +! +interface eth0 +! +interface eth1 +! +interface lo +! +mpls ldp + dual-stack cisco-interop + neighbor 10.0.1.5 password opensourcerouting + neighbor 172.16.0.1 password opensourcerouting + ! + address-family ipv4 + discovery transport-address 10.0.1.1 + label local advertise explicit-null + ! + interface eth0 + ! + interface eth1 + ! + ! + address-family ipv6 + discovery transport-address 2001:db8::1 + ! + interface eth1 + ! + ! +! +l2vpn ENG type vpls + bridge br0 + member interface eth2 + ! + member pseudowire mpw0 + neighbor lsr-id 1.1.1.1 + pw-id 100 + ! +! +line vty +! diff --git a/ldpd/ldpd.h b/ldpd/ldpd.h new file mode 100644 index 0000000000..630b192489 --- /dev/null +++ b/ldpd/ldpd.h @@ -0,0 +1,688 @@ +/* $OpenBSD$ */ + +/* + * Copyright (c) 2013, 2016 Renato Westphal + * Copyright (c) 2009 Michele Marchetto + * Copyright (c) 2004 Esben Norby + * Copyright (c) 2003, 2004 Henning Brauer + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _LDPD_H_ +#define _LDPD_H_ + +#include "openbsd-queue.h" +#include "openbsd-tree.h" +#include "imsg.h" +#include "thread.h" +#include "qobj.h" + +#include "ldp.h" + +#define CONF_FILE "/etc/ldpd.conf" +#define LDPD_USER "_ldpd" + +#define LDPD_OPT_VERBOSE 0x00000001 +#define LDPD_OPT_VERBOSE2 0x00000002 +#define LDPD_OPT_NOACTION 0x00000004 + +#define TCP_MD5_KEY_LEN 80 +#define L2VPN_NAME_LEN 32 + +#define RT_BUF_SIZE 16384 +#define MAX_RTSOCK_BUF 128 * 1024 +#define LDP_BACKLOG 128 + +#define F_LDPD_INSERTED 0x0001 +#define F_CONNECTED 0x0002 +#define F_STATIC 0x0004 +#define F_DYNAMIC 0x0008 +#define F_REJECT 0x0010 +#define F_BLACKHOLE 0x0020 +#define F_REDISTRIBUTED 0x0040 + +struct evbuf { + struct msgbuf wbuf; + struct thread *ev; + int (*handler)(struct thread *); + void *arg; +}; + +struct imsgev { + struct imsgbuf ibuf; + int (*handler_write)(struct thread *); + struct thread *ev_write; + int (*handler_read)(struct thread *); + struct thread *ev_read; +}; + +enum imsg_type { + IMSG_NONE, + IMSG_CTL_RELOAD, + IMSG_CTL_SHOW_INTERFACE, + IMSG_CTL_SHOW_DISCOVERY, + IMSG_CTL_SHOW_DISC_IFACE, + IMSG_CTL_SHOW_DISC_TNBR, + IMSG_CTL_SHOW_DISC_ADJ, + IMSG_CTL_SHOW_NBR, + IMSG_CTL_SHOW_NBR_DISC, + IMSG_CTL_SHOW_NBR_END, + IMSG_CTL_SHOW_LIB, + IMSG_CTL_SHOW_L2VPN_PW, + IMSG_CTL_SHOW_L2VPN_BINDING, + IMSG_CTL_CLEAR_NBR, + IMSG_CTL_FIB_COUPLE, + IMSG_CTL_FIB_DECOUPLE, + IMSG_CTL_KROUTE, + IMSG_CTL_KROUTE_ADDR, + IMSG_CTL_IFINFO, + IMSG_CTL_END, + IMSG_CTL_LOG_VERBOSE, + IMSG_KLABEL_CHANGE, + IMSG_KLABEL_DELETE, + IMSG_KPWLABEL_CHANGE, + IMSG_KPWLABEL_DELETE, + IMSG_IFSTATUS, + IMSG_NEWADDR, + IMSG_DELADDR, + IMSG_RTRID_UPDATE, + IMSG_LABEL_MAPPING, + IMSG_LABEL_MAPPING_FULL, + IMSG_LABEL_REQUEST, + IMSG_LABEL_RELEASE, + IMSG_LABEL_WITHDRAW, + IMSG_LABEL_ABORT, + IMSG_REQUEST_ADD, + IMSG_REQUEST_ADD_END, + IMSG_MAPPING_ADD, + IMSG_MAPPING_ADD_END, + IMSG_RELEASE_ADD, + IMSG_RELEASE_ADD_END, + IMSG_WITHDRAW_ADD, + IMSG_WITHDRAW_ADD_END, + IMSG_ADDRESS_ADD, + IMSG_ADDRESS_DEL, + IMSG_NOTIFICATION, + IMSG_NOTIFICATION_SEND, + IMSG_NEIGHBOR_UP, + IMSG_NEIGHBOR_DOWN, + IMSG_NETWORK_ADD, + IMSG_NETWORK_ADD_END, + IMSG_NETWORK_DEL, + IMSG_SOCKET_IPC, + IMSG_SOCKET_NET, + IMSG_CLOSE_SOCKETS, + IMSG_REQUEST_SOCKETS, + IMSG_SETUP_SOCKETS, + IMSG_RECONF_CONF, + IMSG_RECONF_IFACE, + IMSG_RECONF_TNBR, + IMSG_RECONF_NBRP, + IMSG_RECONF_L2VPN, + IMSG_RECONF_L2VPN_IF, + IMSG_RECONF_L2VPN_PW, + IMSG_RECONF_L2VPN_IPW, + IMSG_RECONF_END, + IMSG_DEBUG_UPDATE, + IMSG_LOG +}; + +union ldpd_addr { + struct in_addr v4; + struct in6_addr v6; +}; + +#define IN6_IS_SCOPE_EMBED(a) \ + ((IN6_IS_ADDR_LINKLOCAL(a)) || \ + (IN6_IS_ADDR_MC_LINKLOCAL(a)) || \ + (IN6_IS_ADDR_MC_INTFACELOCAL(a))) + +/* interface states */ +#define IF_STA_DOWN 0x01 +#define IF_STA_ACTIVE 0x02 + +/* targeted neighbor states */ +#define TNBR_STA_DOWN 0x01 +#define TNBR_STA_ACTIVE 0x02 + +/* interface types */ +enum iface_type { + IF_TYPE_POINTOPOINT, + IF_TYPE_BROADCAST +}; + +/* neighbor states */ +#define NBR_STA_PRESENT 0x0001 +#define NBR_STA_INITIAL 0x0002 +#define NBR_STA_OPENREC 0x0004 +#define NBR_STA_OPENSENT 0x0008 +#define NBR_STA_OPER 0x0010 +#define NBR_STA_SESSION (NBR_STA_INITIAL | NBR_STA_OPENREC | \ + NBR_STA_OPENSENT | NBR_STA_OPER) + +/* neighbor events */ +enum nbr_event { + NBR_EVT_NOTHING, + NBR_EVT_MATCH_ADJ, + NBR_EVT_CONNECT_UP, + NBR_EVT_CLOSE_SESSION, + NBR_EVT_INIT_RCVD, + NBR_EVT_KEEPALIVE_RCVD, + NBR_EVT_PDU_RCVD, + NBR_EVT_PDU_SENT, + NBR_EVT_INIT_SENT +}; + +/* neighbor actions */ +enum nbr_action { + NBR_ACT_NOTHING, + NBR_ACT_RST_KTIMEOUT, + NBR_ACT_SESSION_EST, + NBR_ACT_RST_KTIMER, + NBR_ACT_CONNECT_SETUP, + NBR_ACT_PASSIVE_INIT, + NBR_ACT_KEEPALIVE_SEND, + NBR_ACT_CLOSE_SESSION +}; + +TAILQ_HEAD(mapping_head, mapping_entry); + +struct map { + uint8_t type; + uint32_t msg_id; + union { + struct { + uint16_t af; + union ldpd_addr prefix; + uint8_t prefixlen; + } prefix; + struct { + uint16_t type; + uint32_t pwid; + uint32_t group_id; + uint16_t ifmtu; + } pwid; + } fec; + struct { + uint32_t status_code; + uint32_t msg_id; + uint16_t msg_type; + } st; + uint32_t label; + uint32_t requestid; + uint32_t pw_status; + uint8_t flags; +}; +#define F_MAP_REQ_ID 0x01 /* optional request message id present */ +#define F_MAP_STATUS 0x02 /* status */ +#define F_MAP_PW_CWORD 0x04 /* pseudowire control word */ +#define F_MAP_PW_ID 0x08 /* pseudowire connection id */ +#define F_MAP_PW_IFMTU 0x10 /* pseudowire interface parameter */ +#define F_MAP_PW_STATUS 0x20 /* pseudowire status */ + +struct notify_msg { + uint32_t status_code; + uint32_t msg_id; /* network byte order */ + uint16_t msg_type; /* network byte order */ + uint32_t pw_status; + struct map fec; + uint8_t flags; +}; +#define F_NOTIF_PW_STATUS 0x01 /* pseudowire status tlv present */ +#define F_NOTIF_FEC 0x02 /* fec tlv present */ + +struct if_addr { + LIST_ENTRY(if_addr) entry; + int af; + union ldpd_addr addr; + uint8_t prefixlen; + union ldpd_addr dstbrd; +}; +LIST_HEAD(if_addr_head, if_addr); + +struct iface_af { + struct iface *iface; + int af; + int enabled; + int state; + LIST_HEAD(, adj) adj_list; + time_t uptime; + struct thread *hello_timer; + uint16_t hello_holdtime; + uint16_t hello_interval; +}; + +struct iface { + LIST_ENTRY(iface) entry; + char name[IF_NAMESIZE]; + unsigned int ifindex; + struct if_addr_head addr_list; + struct in6_addr linklocal; + enum iface_type type; + uint16_t flags; + struct iface_af ipv4; + struct iface_af ipv6; + QOBJ_FIELDS +}; +DECLARE_QOBJ_TYPE(iface) + +/* source of targeted hellos */ +struct tnbr { + LIST_ENTRY(tnbr) entry; + struct thread *hello_timer; + struct adj *adj; + int af; + union ldpd_addr addr; + int state; + uint16_t pw_count; + uint8_t flags; + QOBJ_FIELDS +}; +DECLARE_QOBJ_TYPE(tnbr) +#define F_TNBR_CONFIGURED 0x01 +#define F_TNBR_DYNAMIC 0x02 + +enum auth_method { + AUTH_NONE, + AUTH_MD5SIG +}; + +/* neighbor specific parameters */ +struct nbr_params { + LIST_ENTRY(nbr_params) entry; + struct in_addr lsr_id; + uint16_t keepalive; + int gtsm_enabled; + uint8_t gtsm_hops; + struct { + enum auth_method method; + char md5key[TCP_MD5_KEY_LEN]; + uint8_t md5key_len; + } auth; + uint8_t flags; + QOBJ_FIELDS +}; +DECLARE_QOBJ_TYPE(nbr_params) +#define F_NBRP_KEEPALIVE 0x01 +#define F_NBRP_GTSM 0x02 +#define F_NBRP_GTSM_HOPS 0x04 + +struct l2vpn_if { + LIST_ENTRY(l2vpn_if) entry; + struct l2vpn *l2vpn; + char ifname[IF_NAMESIZE]; + unsigned int ifindex; + uint16_t flags; + QOBJ_FIELDS +}; +DECLARE_QOBJ_TYPE(l2vpn_if) + +struct l2vpn_pw { + LIST_ENTRY(l2vpn_pw) entry; + struct l2vpn *l2vpn; + struct in_addr lsr_id; + int af; + union ldpd_addr addr; + uint32_t pwid; + char ifname[IF_NAMESIZE]; + unsigned int ifindex; + uint32_t remote_group; + uint16_t remote_mtu; + uint32_t remote_status; + uint8_t flags; + QOBJ_FIELDS +}; +DECLARE_QOBJ_TYPE(l2vpn_pw) +#define F_PW_STATUSTLV_CONF 0x01 /* status tlv configured */ +#define F_PW_STATUSTLV 0x02 /* status tlv negotiated */ +#define F_PW_CWORD_CONF 0x04 /* control word configured */ +#define F_PW_CWORD 0x08 /* control word negotiated */ +#define F_PW_STATUS_UP 0x10 /* pseudowire is operational */ +#define F_PW_STATIC_NBR_ADDR 0x20 /* static neighbor address configured */ + +struct l2vpn { + LIST_ENTRY(l2vpn) entry; + char name[L2VPN_NAME_LEN]; + int type; + int pw_type; + int mtu; + char br_ifname[IF_NAMESIZE]; + unsigned int br_ifindex; + LIST_HEAD(, l2vpn_if) if_list; + LIST_HEAD(, l2vpn_pw) pw_list; + LIST_HEAD(, l2vpn_pw) pw_inactive_list; + QOBJ_FIELDS +}; +DECLARE_QOBJ_TYPE(l2vpn) +#define L2VPN_TYPE_VPWS 1 +#define L2VPN_TYPE_VPLS 2 + +/* ldp_conf */ +enum ldpd_process { + PROC_MAIN, + PROC_LDP_ENGINE, + PROC_LDE_ENGINE +} ldpd_process; + +enum socket_type { + LDP_SOCKET_DISC, + LDP_SOCKET_EDISC, + LDP_SOCKET_SESSION +}; + +enum hello_type { + HELLO_LINK, + HELLO_TARGETED +}; + +struct ldpd_af_conf { + uint16_t keepalive; + uint16_t lhello_holdtime; + uint16_t lhello_interval; + uint16_t thello_holdtime; + uint16_t thello_interval; + union ldpd_addr trans_addr; + int flags; +}; +#define F_LDPD_AF_ENABLED 0x0001 +#define F_LDPD_AF_THELLO_ACCEPT 0x0002 +#define F_LDPD_AF_EXPNULL 0x0004 +#define F_LDPD_AF_NO_GTSM 0x0008 + +struct ldpd_conf { + struct in_addr rtr_id; + struct ldpd_af_conf ipv4; + struct ldpd_af_conf ipv6; + LIST_HEAD(, iface) iface_list; + LIST_HEAD(, tnbr) tnbr_list; + LIST_HEAD(, nbr_params) nbrp_list; + LIST_HEAD(, l2vpn) l2vpn_list; + uint16_t lhello_holdtime; + uint16_t lhello_interval; + uint16_t thello_holdtime; + uint16_t thello_interval; + uint16_t trans_pref; + int flags; + QOBJ_FIELDS +}; +DECLARE_QOBJ_TYPE(ldpd_conf) +#define F_LDPD_NO_FIB_UPDATE 0x0001 +#define F_LDPD_DS_CISCO_INTEROP 0x0002 +#define F_LDPD_ENABLED 0x0004 + +struct ldpd_af_global { + struct thread *disc_ev; + struct thread *edisc_ev; + int ldp_disc_socket; + int ldp_edisc_socket; + int ldp_session_socket; +}; + +struct ldpd_global { + int cmd_opts; + time_t uptime; + struct in_addr rtr_id; + struct ldpd_af_global ipv4; + struct ldpd_af_global ipv6; + uint32_t conf_seqnum; + int pfkeysock; + struct if_addr_head addr_list; + LIST_HEAD(, adj) adj_list; + struct in_addr mcast_addr_v4; + struct in6_addr mcast_addr_v6; + TAILQ_HEAD(, pending_conn) pending_conns; +}; + +/* kroute */ +struct kroute { + int af; + union ldpd_addr prefix; + uint8_t prefixlen; + union ldpd_addr nexthop; + uint32_t local_label; + uint32_t remote_label; + unsigned short ifindex; + uint8_t priority; + uint16_t flags; +}; + +struct kpw { + unsigned short ifindex; + int pw_type; + int af; + union ldpd_addr nexthop; + uint32_t local_label; + uint32_t remote_label; + uint8_t flags; +}; + +struct kaddr { + unsigned short ifindex; + int af; + union ldpd_addr addr; + uint8_t prefixlen; + union ldpd_addr dstbrd; +}; + +struct kif { + char ifname[IF_NAMESIZE]; + unsigned short ifindex; + int flags; + int mtu; +}; + +/* control data structures */ +struct ctl_iface { + int af; + char name[IF_NAMESIZE]; + unsigned int ifindex; + int state; + uint16_t flags; + enum iface_type type; + uint16_t hello_holdtime; + uint16_t hello_interval; + time_t uptime; + uint16_t adj_cnt; +}; + +struct ctl_disc_if { + char name[IF_NAMESIZE]; + int active_v4; + int active_v6; + int no_adj; +}; + +struct ctl_disc_tnbr { + int af; + union ldpd_addr addr; + int no_adj; +}; + +struct ctl_adj { + int af; + struct in_addr id; + enum hello_type type; + char ifname[IF_NAMESIZE]; + union ldpd_addr src_addr; + uint16_t holdtime; + union ldpd_addr trans_addr; +}; + +struct ctl_nbr { + int af; + struct in_addr id; + union ldpd_addr laddr; + in_port_t lport; + union ldpd_addr raddr; + in_port_t rport; + uint16_t holdtime; + time_t uptime; + int nbr_state; +}; + +struct ctl_rt { + int af; + union ldpd_addr prefix; + uint8_t prefixlen; + struct in_addr nexthop; /* lsr-id */ + uint32_t local_label; + uint32_t remote_label; + uint8_t flags; + uint8_t in_use; + int first; +}; + +struct ctl_pw { + uint16_t type; + char l2vpn_name[L2VPN_NAME_LEN]; + char ifname[IF_NAMESIZE]; + uint32_t pwid; + struct in_addr lsr_id; + uint32_t local_label; + uint32_t local_gid; + uint16_t local_ifmtu; + uint8_t local_cword; + uint32_t remote_label; + uint32_t remote_gid; + uint16_t remote_ifmtu; + uint8_t remote_cword; + uint32_t status; +}; + +extern struct ldpd_conf *ldpd_conf; +extern struct ldpd_global global; + +/* parse.y */ +struct ldpd_conf *parse_config(char *); +int cmdline_symset(char *); + +/* kroute.c */ +void kif_redistribute(const char *); +int kr_change(struct kroute *); +int kr_delete(struct kroute *); +int kmpw_set(struct kpw *); +int kmpw_unset(struct kpw *); + +/* util.c */ +uint8_t mask2prefixlen(in_addr_t); +uint8_t mask2prefixlen6(struct sockaddr_in6 *); +in_addr_t prefixlen2mask(uint8_t); +struct in6_addr *prefixlen2mask6(uint8_t); +void ldp_applymask(int, union ldpd_addr *, + const union ldpd_addr *, int); +int ldp_addrcmp(int, const union ldpd_addr *, + const union ldpd_addr *); +int ldp_addrisset(int, const union ldpd_addr *); +int ldp_prefixcmp(int, const union ldpd_addr *, + const union ldpd_addr *, uint8_t); +int bad_addr_v4(struct in_addr); +int bad_addr_v6(struct in6_addr *); +int bad_addr(int, union ldpd_addr *); +void embedscope(struct sockaddr_in6 *); +void recoverscope(struct sockaddr_in6 *); +void addscope(struct sockaddr_in6 *, uint32_t); +void clearscope(struct in6_addr *); +struct sockaddr *addr2sa(int af, union ldpd_addr *, uint16_t); +void sa2addr(struct sockaddr *, int *, union ldpd_addr *, + in_port_t *); +socklen_t sockaddr_len(struct sockaddr *); + +/* ldpd.c */ +int ldp_write_handler(struct thread *); +void main_imsg_compose_ldpe(int, pid_t, void *, uint16_t); +void main_imsg_compose_lde(int, pid_t, void *, uint16_t); +int main_imsg_compose_both(enum imsg_type, void *, + uint16_t); +void imsg_event_add(struct imsgev *); +int imsg_compose_event(struct imsgev *, uint16_t, uint32_t, + pid_t, int, void *, uint16_t); +void evbuf_enqueue(struct evbuf *, struct ibuf *); +void evbuf_event_add(struct evbuf *); +void evbuf_init(struct evbuf *, int, + int (*)(struct thread *), void *); +void evbuf_clear(struct evbuf *); +struct ldpd_af_conf *ldp_af_conf_get(struct ldpd_conf *, int); +struct ldpd_af_global *ldp_af_global_get(struct ldpd_global *, int); +int ldp_is_dual_stack(struct ldpd_conf *); +in_addr_t ldp_rtr_id_get(struct ldpd_conf *); +int ldp_reload(struct ldpd_conf *); +int ldp_reload_ref(struct ldpd_conf *, void **); +struct ldpd_conf *ldp_dup_config_ref(struct ldpd_conf *, void **ref); +struct ldpd_conf *ldp_dup_config(struct ldpd_conf *); +void ldp_clear_config(struct ldpd_conf *); +void merge_config(struct ldpd_conf *, struct ldpd_conf *); +struct ldpd_conf *config_new_empty(void); +void config_clear(struct ldpd_conf *); + +/* ldp_vty_conf.c */ +/* NOTE: the parameters' names should be preserved because of codegen */ +struct iface *iface_new_api(struct ldpd_conf *cfg, + const char *name); +void iface_del_api(struct iface *iface); +struct tnbr *tnbr_new_api(struct ldpd_conf *cfg, int af, + union ldpd_addr *addr); +void tnbr_del_api(struct tnbr *tnbr); +struct nbr_params *nbrp_new_api(struct ldpd_conf *cfg, + struct in_addr lsr_id); +void nbrp_del_api(struct nbr_params *nbrp); +struct l2vpn *l2vpn_new_api(struct ldpd_conf *cfg, const char *name); +void l2vpn_del_api(struct l2vpn *l2vpn); +struct l2vpn_if *l2vpn_if_new_api(struct ldpd_conf *conf, + struct l2vpn *l2vpn, const char *ifname); +void l2vpn_if_del_api(struct l2vpn_if *lif); +struct l2vpn_pw *l2vpn_pw_new_api(struct ldpd_conf *conf, + struct l2vpn *l2vpn, const char *ifname); +void l2vpn_pw_del_api(struct l2vpn_pw *pw); + +/* socket.c */ +int ldp_create_socket(int, enum socket_type); +void sock_set_nonblock(int); +void sock_set_cloexec(int); +void sock_set_recvbuf(int); +int sock_set_reuse(int, int); +int sock_set_bindany(int, int); +int sock_set_md5sig(int, int, union ldpd_addr *, const char *); +int sock_set_ipv4_tos(int, int); +int sock_set_ipv4_pktinfo(int, int); +int sock_set_ipv4_recvdstaddr(int, int); +int sock_set_ipv4_recvif(int, int); +int sock_set_ipv4_minttl(int, int); +int sock_set_ipv4_ucast_ttl(int fd, int); +int sock_set_ipv4_mcast_ttl(int, uint8_t); +int sock_set_ipv4_mcast(struct iface *); +int sock_set_ipv4_mcast_loop(int); +int sock_set_ipv6_dscp(int, int); +int sock_set_ipv6_pktinfo(int, int); +int sock_set_ipv6_minhopcount(int, int); +int sock_set_ipv6_ucast_hops(int, int); +int sock_set_ipv6_mcast_hops(int, int); +int sock_set_ipv6_mcast(struct iface *); +int sock_set_ipv6_mcast_loop(int); + +/* quagga */ +extern struct thread_master *master; + +/* ldp_zebra.c */ +void ldp_zebra_init(struct thread_master *); + +/* compatibility */ +#ifndef __OpenBSD__ +#define __IPV6_ADDR_MC_SCOPE(a) ((a)->s6_addr[1] & 0x0f) +#define __IPV6_ADDR_SCOPE_INTFACELOCAL 0x01 +#define IN6_IS_ADDR_MC_INTFACELOCAL(a) \ + (IN6_IS_ADDR_MULTICAST(a) && \ + (__IPV6_ADDR_MC_SCOPE(a) == __IPV6_ADDR_SCOPE_INTFACELOCAL)) +#endif + +#endif /* _LDPD_H_ */ diff --git a/ldpd/ldpe.c b/ldpd/ldpe.c new file mode 100644 index 0000000000..aef33c8e37 --- /dev/null +++ b/ldpd/ldpe.c @@ -0,0 +1,906 @@ +/* $OpenBSD$ */ + +/* + * Copyright (c) 2013, 2016 Renato Westphal + * Copyright (c) 2005 Claudio Jeker + * Copyright (c) 2004, 2008 Esben Norby + * Copyright (c) 2003, 2004 Henning Brauer + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include + +#include "ldpd.h" +#include "ldpe.h" +#include "lde.h" +#include "control.h" +#include "log.h" +#include "ldp_debug.h" + +#include +#include "memory.h" +#include "privs.h" +#include "sigevent.h" + +static void ldpe_shutdown(void); +static int ldpe_dispatch_main(struct thread *); +static int ldpe_dispatch_lde(struct thread *); +#ifdef __OpenBSD__ +static int ldpe_dispatch_pfkey(struct thread *); +#endif +static void ldpe_setup_sockets(int, int, int, int); +static void ldpe_close_sockets(int); +static void ldpe_iface_af_ctl(struct ctl_conn *, int, unsigned int); + +struct ldpd_conf *leconf; +#ifdef __OpenBSD__ +struct ldpd_sysdep sysdep; +#endif + +static struct imsgev *iev_main; +static struct imsgev *iev_lde; +#ifdef __OpenBSD__ +static struct thread *pfkey_ev; +#endif + +/* Master of threads. */ +struct thread_master *master; + +/* ldpe privileges */ +static zebra_capabilities_t _caps_p [] = +{ + ZCAP_BIND, + ZCAP_NET_ADMIN +}; + +struct zebra_privs_t ldpe_privs = +{ +#if defined(QUAGGA_USER) && defined(QUAGGA_GROUP) + .user = QUAGGA_USER, + .group = QUAGGA_GROUP, +#endif +#if defined(VTY_GROUP) + .vty_group = VTY_GROUP, +#endif + .caps_p = _caps_p, + .cap_num_p = array_size(_caps_p), + .cap_num_i = 0 +}; + +/* SIGINT / SIGTERM handler. */ +static void +sigint(void) +{ + ldpe_shutdown(); +} + +static struct quagga_signal_t ldpe_signals[] = +{ + { + .signal = SIGINT, + .handler = &sigint, + }, + { + .signal = SIGTERM, + .handler = &sigint, + }, +}; + +/* label distribution protocol engine */ +void +ldpe(const char *user, const char *group) +{ + struct thread thread; + + leconf = config_new_empty(); + +#ifdef HAVE_SETPROCTITLE + setproctitle("ldp engine"); +#endif + ldpd_process = PROC_LDP_ENGINE; + + LIST_INIT(&global.addr_list); + LIST_INIT(&global.adj_list); + TAILQ_INIT(&global.pending_conns); + if (inet_pton(AF_INET, AllRouters_v4, &global.mcast_addr_v4) != 1) + fatal("inet_pton"); + if (inet_pton(AF_INET6, AllRouters_v6, &global.mcast_addr_v6) != 1) + fatal("inet_pton"); +#ifdef __OpenBSD__ + global.pfkeysock = pfkey_init(); +#endif + + /* drop privileges */ + if (user) + ldpe_privs.user = user; + if (group) + ldpe_privs.group = group; + zprivs_init(&ldpe_privs); + + if (control_init() == -1) + fatalx("control socket setup failed"); + +#ifdef HAVE_PLEDGE + if (pledge("stdio cpath inet mcast recvfd", NULL) == -1) + fatal("pledge"); +#endif + + master = thread_master_create(); + accept_init(); + + /* setup signal handler */ + signal_init(master, array_size(ldpe_signals), ldpe_signals); + + /* setup pipe and event handler to the parent process */ + if ((iev_main = malloc(sizeof(struct imsgev))) == NULL) + fatal(NULL); + imsg_init(&iev_main->ibuf, 3); + iev_main->handler_read = ldpe_dispatch_main; + iev_main->ev_read = thread_add_read(master, iev_main->handler_read, + iev_main, iev_main->ibuf.fd); + iev_main->handler_write = ldp_write_handler; + iev_main->ev_write = NULL; + +#ifdef __OpenBSD__ + if (sysdep.no_pfkey == 0) + pfkey_ev = thread_add_read(master, ldpe_dispatch_pfkey, + NULL, global.pfkeysock); +#endif + + /* mark sockets as closed */ + global.ipv4.ldp_disc_socket = -1; + global.ipv4.ldp_edisc_socket = -1; + global.ipv4.ldp_session_socket = -1; + global.ipv6.ldp_disc_socket = -1; + global.ipv6.ldp_edisc_socket = -1; + global.ipv6.ldp_session_socket = -1; + + /* listen on ldpd control socket */ + TAILQ_INIT(&ctl_conns); + control_listen(); + + if ((pkt_ptr = calloc(1, IBUF_READ_SIZE)) == NULL) + fatal(__func__); + + /* Fetch next active thread. */ + while (thread_fetch(master, &thread)) + thread_call(&thread); +} + +static void +ldpe_shutdown(void) +{ + struct if_addr *if_addr; + struct adj *adj; + + /* close pipes */ + msgbuf_write(&iev_lde->ibuf.w); + msgbuf_clear(&iev_lde->ibuf.w); + close(iev_lde->ibuf.fd); + msgbuf_write(&iev_main->ibuf.w); + msgbuf_clear(&iev_main->ibuf.w); + close(iev_main->ibuf.fd); + + control_cleanup(); + config_clear(leconf); + +#ifdef __OpenBSD__ + if (sysdep.no_pfkey == 0) { + THREAD_READ_OFF(pfkey_ev); + close(global.pfkeysock); + } +#endif + ldpe_close_sockets(AF_INET); + ldpe_close_sockets(AF_INET6); + + /* remove addresses from global list */ + while ((if_addr = LIST_FIRST(&global.addr_list)) != NULL) { + LIST_REMOVE(if_addr, entry); + free(if_addr); + } + while ((adj = LIST_FIRST(&global.adj_list)) != NULL) + adj_del(adj, S_SHUTDOWN); + + /* clean up */ + free(iev_lde); + free(iev_main); + free(pkt_ptr); + + log_info("ldp engine exiting"); + exit(0); +} + +/* imesg */ +int +ldpe_imsg_compose_parent(int type, pid_t pid, void *data, uint16_t datalen) +{ + return (imsg_compose_event(iev_main, type, 0, pid, -1, data, datalen)); +} + +int +ldpe_imsg_compose_lde(int type, uint32_t peerid, pid_t pid, void *data, + uint16_t datalen) +{ + return (imsg_compose_event(iev_lde, type, peerid, pid, -1, + data, datalen)); +} + +/* ARGSUSED */ +static int +ldpe_dispatch_main(struct thread *thread) +{ + static struct ldpd_conf *nconf; + struct iface *niface; + struct tnbr *ntnbr; + struct nbr_params *nnbrp; + static struct l2vpn *nl2vpn; + struct l2vpn_if *nlif; + struct l2vpn_pw *npw; + struct imsg imsg; + int fd = THREAD_FD(thread); + struct imsgev *iev = THREAD_ARG(thread); + struct imsgbuf *ibuf = &iev->ibuf; + struct iface *iface = NULL; + struct kif *kif; + int af; + enum socket_type *socket_type; + static int disc_socket = -1; + static int edisc_socket = -1; + static int session_socket = -1; + struct nbr *nbr; +#ifdef __OpenBSD__ + struct nbr_params *nbrp; +#endif + int n, shut = 0; + + iev->ev_read = NULL; + + if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN) + fatal("imsg_read error"); + if (n == 0) /* connection closed */ + shut = 1; + + for (;;) { + if ((n = imsg_get(ibuf, &imsg)) == -1) + fatal("ldpe_dispatch_main: imsg_get error"); + if (n == 0) + break; + + switch (imsg.hdr.type) { + case IMSG_IFSTATUS: + if (imsg.hdr.len != IMSG_HEADER_SIZE + + sizeof(struct kif)) + fatalx("IFSTATUS imsg with wrong len"); + kif = imsg.data; + + iface = if_lookup_name(leconf, kif->ifname); + if (!iface) + break; + + if_update_info(iface, kif); + if_update(iface, AF_UNSPEC); + break; + case IMSG_NEWADDR: + if (imsg.hdr.len != IMSG_HEADER_SIZE + + sizeof(struct kaddr)) + fatalx("NEWADDR imsg with wrong len"); + + if_addr_add(imsg.data); + break; + case IMSG_DELADDR: + if (imsg.hdr.len != IMSG_HEADER_SIZE + + sizeof(struct kaddr)) + fatalx("DELADDR imsg with wrong len"); + + if_addr_del(imsg.data); + break; + case IMSG_SOCKET_IPC: + if (iev_lde) { + log_warnx("%s: received unexpected imsg fd " + "to lde", __func__); + break; + } + if ((fd = imsg.fd) == -1) { + log_warnx("%s: expected to receive imsg fd to " + "lde but didn't receive any", __func__); + break; + } + + if ((iev_lde = malloc(sizeof(struct imsgev))) == NULL) + fatal(NULL); + imsg_init(&iev_lde->ibuf, fd); + iev_lde->handler_read = ldpe_dispatch_lde; + iev_lde->ev_read = thread_add_read(master, + iev_lde->handler_read, iev_lde, iev_lde->ibuf.fd); + iev_lde->handler_write = ldp_write_handler; + iev_lde->ev_write = NULL; + break; + case IMSG_CLOSE_SOCKETS: + af = imsg.hdr.peerid; + + RB_FOREACH(nbr, nbr_id_head, &nbrs_by_id) { + if (nbr->af != af) + continue; + session_shutdown(nbr, S_SHUTDOWN, 0, 0); +#ifdef __OpenBSD__ + pfkey_remove(nbr); +#endif + } + ldpe_close_sockets(af); + if_update_all(af); + tnbr_update_all(af); + + disc_socket = -1; + edisc_socket = -1; + session_socket = -1; + if ((ldp_af_conf_get(leconf, af))->flags & + F_LDPD_AF_ENABLED) + ldpe_imsg_compose_parent(IMSG_REQUEST_SOCKETS, + af, NULL, 0); + break; + case IMSG_SOCKET_NET: + if (imsg.hdr.len != IMSG_HEADER_SIZE + + sizeof(enum socket_type)) + fatalx("SOCKET_NET imsg with wrong len"); + socket_type = imsg.data; + + switch (*socket_type) { + case LDP_SOCKET_DISC: + disc_socket = imsg.fd; + break; + case LDP_SOCKET_EDISC: + edisc_socket = imsg.fd; + break; + case LDP_SOCKET_SESSION: + session_socket = imsg.fd; + break; + } + break; + case IMSG_SETUP_SOCKETS: + af = imsg.hdr.peerid; + if (disc_socket == -1 || edisc_socket == -1 || + session_socket == -1) { + if (disc_socket != -1) + close(disc_socket); + if (edisc_socket != -1) + close(edisc_socket); + if (session_socket != -1) + close(session_socket); + break; + } + + ldpe_setup_sockets(af, disc_socket, edisc_socket, + session_socket); + if_update_all(af); + tnbr_update_all(af); + RB_FOREACH(nbr, nbr_id_head, &nbrs_by_id) { + if (nbr->af != af) + continue; + nbr->laddr = (ldp_af_conf_get(leconf, + af))->trans_addr; +#ifdef __OpenBSD__ + nbrp = nbr_params_find(leconf, nbr->id); + if (nbrp && pfkey_establish(nbr, nbrp) == -1) + fatalx("pfkey setup failed"); +#endif + if (nbr_session_active_role(nbr)) + nbr_establish_connection(nbr); + } + break; + case IMSG_RTRID_UPDATE: + memcpy(&global.rtr_id, imsg.data, + sizeof(global.rtr_id)); + if (leconf->rtr_id.s_addr == INADDR_ANY) { + ldpe_reset_nbrs(AF_INET); + ldpe_reset_nbrs(AF_INET6); + } + if_update_all(AF_UNSPEC); + tnbr_update_all(AF_UNSPEC); + break; + case IMSG_RECONF_CONF: + if ((nconf = malloc(sizeof(struct ldpd_conf))) == + NULL) + fatal(NULL); + memcpy(nconf, imsg.data, sizeof(struct ldpd_conf)); + + LIST_INIT(&nconf->iface_list); + LIST_INIT(&nconf->tnbr_list); + LIST_INIT(&nconf->nbrp_list); + LIST_INIT(&nconf->l2vpn_list); + break; + case IMSG_RECONF_IFACE: + if ((niface = malloc(sizeof(struct iface))) == NULL) + fatal(NULL); + memcpy(niface, imsg.data, sizeof(struct iface)); + + LIST_INIT(&niface->addr_list); + LIST_INIT(&niface->ipv4.adj_list); + LIST_INIT(&niface->ipv6.adj_list); + niface->ipv4.iface = niface; + niface->ipv6.iface = niface; + + LIST_INSERT_HEAD(&nconf->iface_list, niface, entry); + break; + case IMSG_RECONF_TNBR: + if ((ntnbr = malloc(sizeof(struct tnbr))) == NULL) + fatal(NULL); + memcpy(ntnbr, imsg.data, sizeof(struct tnbr)); + + LIST_INSERT_HEAD(&nconf->tnbr_list, ntnbr, entry); + break; + case IMSG_RECONF_NBRP: + if ((nnbrp = malloc(sizeof(struct nbr_params))) == NULL) + fatal(NULL); + memcpy(nnbrp, imsg.data, sizeof(struct nbr_params)); + + LIST_INSERT_HEAD(&nconf->nbrp_list, nnbrp, entry); + break; + case IMSG_RECONF_L2VPN: + if ((nl2vpn = malloc(sizeof(struct l2vpn))) == NULL) + fatal(NULL); + memcpy(nl2vpn, imsg.data, sizeof(struct l2vpn)); + + LIST_INIT(&nl2vpn->if_list); + LIST_INIT(&nl2vpn->pw_list); + LIST_INIT(&nl2vpn->pw_inactive_list); + + LIST_INSERT_HEAD(&nconf->l2vpn_list, nl2vpn, entry); + break; + case IMSG_RECONF_L2VPN_IF: + if ((nlif = malloc(sizeof(struct l2vpn_if))) == NULL) + fatal(NULL); + memcpy(nlif, imsg.data, sizeof(struct l2vpn_if)); + + nlif->l2vpn = nl2vpn; + LIST_INSERT_HEAD(&nl2vpn->if_list, nlif, entry); + break; + case IMSG_RECONF_L2VPN_PW: + if ((npw = malloc(sizeof(struct l2vpn_pw))) == NULL) + fatal(NULL); + memcpy(npw, imsg.data, sizeof(struct l2vpn_pw)); + + npw->l2vpn = nl2vpn; + LIST_INSERT_HEAD(&nl2vpn->pw_list, npw, entry); + break; + case IMSG_RECONF_L2VPN_IPW: + if ((npw = malloc(sizeof(struct l2vpn_pw))) == NULL) + fatal(NULL); + memcpy(npw, imsg.data, sizeof(struct l2vpn_pw)); + + npw->l2vpn = nl2vpn; + LIST_INSERT_HEAD(&nl2vpn->pw_inactive_list, npw, entry); + break; + case IMSG_RECONF_END: + merge_config(leconf, nconf); + nconf = NULL; + global.conf_seqnum++; + break; + case IMSG_CTL_END: + control_imsg_relay(&imsg); + break; + case IMSG_DEBUG_UPDATE: + if (imsg.hdr.len != IMSG_HEADER_SIZE + + sizeof(ldp_debug)) { + log_warnx("%s: wrong imsg len", __func__); + break; + } + memcpy(&ldp_debug, imsg.data, sizeof(ldp_debug)); + break; + default: + log_debug("ldpe_dispatch_main: error handling imsg %d", + imsg.hdr.type); + break; + } + imsg_free(&imsg); + } + if (!shut) + imsg_event_add(iev); + else { + /* this pipe is dead, so remove the event handlers and exit */ + THREAD_READ_OFF(iev->ev_read); + THREAD_WRITE_OFF(iev->ev_write); + ldpe_shutdown(); + } + + return (0); +} + +/* ARGSUSED */ +static int +ldpe_dispatch_lde(struct thread *thread) +{ + struct imsgev *iev = THREAD_ARG(thread); + struct imsgbuf *ibuf = &iev->ibuf; + struct imsg imsg; + struct map map; + struct notify_msg nm; + int n, shut = 0; + struct nbr *nbr = NULL; + + iev->ev_read = NULL; + + if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN) + fatal("imsg_read error"); + if (n == 0) /* connection closed */ + shut = 1; + + for (;;) { + if ((n = imsg_get(ibuf, &imsg)) == -1) + fatal("ldpe_dispatch_lde: imsg_get error"); + if (n == 0) + break; + + switch (imsg.hdr.type) { + case IMSG_MAPPING_ADD: + case IMSG_RELEASE_ADD: + case IMSG_REQUEST_ADD: + case IMSG_WITHDRAW_ADD: + if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(map)) + fatalx("invalid size of map request"); + memcpy(&map, imsg.data, sizeof(map)); + + nbr = nbr_find_peerid(imsg.hdr.peerid); + if (nbr == NULL) { + log_debug("ldpe_dispatch_lde: cannot find " + "neighbor"); + break; + } + if (nbr->state != NBR_STA_OPER) + break; + + switch (imsg.hdr.type) { + case IMSG_MAPPING_ADD: + mapping_list_add(&nbr->mapping_list, &map); + break; + case IMSG_RELEASE_ADD: + mapping_list_add(&nbr->release_list, &map); + break; + case IMSG_REQUEST_ADD: + mapping_list_add(&nbr->request_list, &map); + break; + case IMSG_WITHDRAW_ADD: + mapping_list_add(&nbr->withdraw_list, &map); + break; + } + break; + case IMSG_MAPPING_ADD_END: + case IMSG_RELEASE_ADD_END: + case IMSG_REQUEST_ADD_END: + case IMSG_WITHDRAW_ADD_END: + nbr = nbr_find_peerid(imsg.hdr.peerid); + if (nbr == NULL) { + log_debug("ldpe_dispatch_lde: cannot find " + "neighbor"); + break; + } + if (nbr->state != NBR_STA_OPER) + break; + + switch (imsg.hdr.type) { + case IMSG_MAPPING_ADD_END: + send_labelmessage(nbr, MSG_TYPE_LABELMAPPING, + &nbr->mapping_list); + break; + case IMSG_RELEASE_ADD_END: + send_labelmessage(nbr, MSG_TYPE_LABELRELEASE, + &nbr->release_list); + break; + case IMSG_REQUEST_ADD_END: + send_labelmessage(nbr, MSG_TYPE_LABELREQUEST, + &nbr->request_list); + break; + case IMSG_WITHDRAW_ADD_END: + send_labelmessage(nbr, MSG_TYPE_LABELWITHDRAW, + &nbr->withdraw_list); + break; + } + break; + case IMSG_NOTIFICATION_SEND: + if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(nm)) + fatalx("invalid size of OE request"); + memcpy(&nm, imsg.data, sizeof(nm)); + + nbr = nbr_find_peerid(imsg.hdr.peerid); + if (nbr == NULL) { + log_debug("ldpe_dispatch_lde: cannot find " + "neighbor"); + break; + } + if (nbr->state != NBR_STA_OPER) + break; + + send_notification_full(nbr->tcp, &nm); + break; + case IMSG_CTL_END: + case IMSG_CTL_SHOW_LIB: + case IMSG_CTL_SHOW_L2VPN_PW: + case IMSG_CTL_SHOW_L2VPN_BINDING: + control_imsg_relay(&imsg); + break; + default: + log_debug("ldpe_dispatch_lde: error handling imsg %d", + imsg.hdr.type); + break; + } + imsg_free(&imsg); + } + if (!shut) + imsg_event_add(iev); + else { + /* this pipe is dead, so remove the event handlers and exit */ + THREAD_READ_OFF(iev->ev_read); + THREAD_WRITE_OFF(iev->ev_write); + ldpe_shutdown(); + } + + return (0); +} + +#ifdef __OpenBSD__ +/* ARGSUSED */ +static int +ldpe_dispatch_pfkey(struct thread *thread) +{ + int fd = THREAD_FD(thread); + + pfkey_ev = thread_add_read(master, ldpe_dispatch_pfkey, + NULL, global.pfkeysock); + + if (pfkey_read(fd, NULL) == -1) + fatal("pfkey_read failed, exiting..."); + + return (0); +} +#endif /* __OpenBSD__ */ + +static void +ldpe_setup_sockets(int af, int disc_socket, int edisc_socket, + int session_socket) +{ + struct ldpd_af_global *af_global; + + af_global = ldp_af_global_get(&global, af); + + /* discovery socket */ + af_global->ldp_disc_socket = disc_socket; + af_global->disc_ev = thread_add_read(master, disc_recv_packet, + &af_global->disc_ev, af_global->ldp_disc_socket); + + /* extended discovery socket */ + af_global->ldp_edisc_socket = edisc_socket; + af_global->edisc_ev = thread_add_read(master, disc_recv_packet, + &af_global->edisc_ev, af_global->ldp_edisc_socket); + + /* session socket */ + af_global->ldp_session_socket = session_socket; + accept_add(af_global->ldp_session_socket, session_accept, NULL); +} + +static void +ldpe_close_sockets(int af) +{ + struct ldpd_af_global *af_global; + + af_global = ldp_af_global_get(&global, af); + + /* discovery socket */ + THREAD_READ_OFF(af_global->disc_ev); + if (af_global->ldp_disc_socket != -1) { + close(af_global->ldp_disc_socket); + af_global->ldp_disc_socket = -1; + } + + /* extended discovery socket */ + THREAD_READ_OFF(af_global->edisc_ev); + if (af_global->ldp_edisc_socket != -1) { + close(af_global->ldp_edisc_socket); + af_global->ldp_edisc_socket = -1; + } + + /* session socket */ + if (af_global->ldp_session_socket != -1) { + accept_del(af_global->ldp_session_socket); + close(af_global->ldp_session_socket); + af_global->ldp_session_socket = -1; + } +} + +void +ldpe_reset_nbrs(int af) +{ + struct nbr *nbr; + + RB_FOREACH(nbr, nbr_id_head, &nbrs_by_id) { + if (nbr->af == af) + session_shutdown(nbr, S_SHUTDOWN, 0, 0); + } +} + +void +ldpe_reset_ds_nbrs(void) +{ + struct nbr *nbr; + + RB_FOREACH(nbr, nbr_id_head, &nbrs_by_id) { + if (nbr->ds_tlv) + session_shutdown(nbr, S_SHUTDOWN, 0, 0); + } +} + +void +ldpe_remove_dynamic_tnbrs(int af) +{ + struct tnbr *tnbr, *safe; + + LIST_FOREACH_SAFE(tnbr, &leconf->tnbr_list, entry, safe) { + if (tnbr->af != af) + continue; + + tnbr->flags &= ~F_TNBR_DYNAMIC; + tnbr_check(tnbr); + } +} + +void +ldpe_stop_init_backoff(int af) +{ + struct nbr *nbr; + + RB_FOREACH(nbr, nbr_id_head, &nbrs_by_id) { + if (nbr->af == af && nbr_pending_idtimer(nbr)) { + nbr_stop_idtimer(nbr); + nbr_establish_connection(nbr); + } + } +} + +static void +ldpe_iface_af_ctl(struct ctl_conn *c, int af, unsigned int idx) +{ + struct iface *iface; + struct iface_af *ia; + struct ctl_iface *ictl; + + LIST_FOREACH(iface, &leconf->iface_list, entry) { + if (idx == 0 || idx == iface->ifindex) { + ia = iface_af_get(iface, af); + if (!ia->enabled) + continue; + + ictl = if_to_ctl(ia); + imsg_compose_event(&c->iev, + IMSG_CTL_SHOW_INTERFACE, + 0, 0, -1, ictl, sizeof(struct ctl_iface)); + } + } +} + +void +ldpe_iface_ctl(struct ctl_conn *c, unsigned int idx) +{ + ldpe_iface_af_ctl(c, AF_INET, idx); + ldpe_iface_af_ctl(c, AF_INET6, idx); +} + +void +ldpe_adj_ctl(struct ctl_conn *c) +{ + struct iface *iface; + struct tnbr *tnbr; + struct adj *adj; + struct ctl_adj *actl; + struct ctl_disc_if ictl; + struct ctl_disc_tnbr tctl; + + imsg_compose_event(&c->iev, IMSG_CTL_SHOW_DISCOVERY, 0, 0, -1, NULL, 0); + + LIST_FOREACH(iface, &leconf->iface_list, entry) { + memset(&ictl, 0, sizeof(ictl)); + ictl.active_v4 = (iface->ipv4.state == IF_STA_ACTIVE); + ictl.active_v6 = (iface->ipv6.state == IF_STA_ACTIVE); + + if (!ictl.active_v4 && !ictl.active_v6) + continue; + + strlcpy(ictl.name, iface->name, sizeof(ictl.name)); + if (LIST_EMPTY(&iface->ipv4.adj_list) && + LIST_EMPTY(&iface->ipv6.adj_list)) + ictl.no_adj = 1; + imsg_compose_event(&c->iev, IMSG_CTL_SHOW_DISC_IFACE, 0, 0, + -1, &ictl, sizeof(ictl)); + + LIST_FOREACH(adj, &iface->ipv4.adj_list, ia_entry) { + actl = adj_to_ctl(adj); + imsg_compose_event(&c->iev, IMSG_CTL_SHOW_DISC_ADJ, + 0, 0, -1, actl, sizeof(struct ctl_adj)); + } + LIST_FOREACH(adj, &iface->ipv6.adj_list, ia_entry) { + actl = adj_to_ctl(adj); + imsg_compose_event(&c->iev, IMSG_CTL_SHOW_DISC_ADJ, + 0, 0, -1, actl, sizeof(struct ctl_adj)); + } + } + + LIST_FOREACH(tnbr, &leconf->tnbr_list, entry) { + memset(&tctl, 0, sizeof(tctl)); + tctl.af = tnbr->af; + tctl.addr = tnbr->addr; + if (tnbr->adj == NULL) + tctl.no_adj = 1; + + imsg_compose_event(&c->iev, IMSG_CTL_SHOW_DISC_TNBR, 0, 0, + -1, &tctl, sizeof(tctl)); + + if (tnbr->adj == NULL) + continue; + + actl = adj_to_ctl(tnbr->adj); + imsg_compose_event(&c->iev, IMSG_CTL_SHOW_DISC_ADJ, 0, 0, + -1, actl, sizeof(struct ctl_adj)); + } + + imsg_compose_event(&c->iev, IMSG_CTL_END, 0, 0, -1, NULL, 0); +} + +void +ldpe_nbr_ctl(struct ctl_conn *c) +{ + struct adj *adj; + struct ctl_adj *actl; + struct nbr *nbr; + struct ctl_nbr *nctl; + + RB_FOREACH(nbr, nbr_addr_head, &nbrs_by_addr) { + if (nbr->state == NBR_STA_PRESENT) + continue; + + nctl = nbr_to_ctl(nbr); + imsg_compose_event(&c->iev, IMSG_CTL_SHOW_NBR, 0, 0, -1, nctl, + sizeof(struct ctl_nbr)); + + LIST_FOREACH(adj, &nbr->adj_list, nbr_entry) { + actl = adj_to_ctl(adj); + imsg_compose_event(&c->iev, IMSG_CTL_SHOW_NBR_DISC, + 0, 0, -1, actl, sizeof(struct ctl_adj)); + } + + imsg_compose_event(&c->iev, IMSG_CTL_SHOW_NBR_END, 0, 0, -1, + NULL, 0); + } + imsg_compose_event(&c->iev, IMSG_CTL_END, 0, 0, -1, NULL, 0); +} + +void +mapping_list_add(struct mapping_head *mh, struct map *map) +{ + struct mapping_entry *me; + + me = calloc(1, sizeof(*me)); + if (me == NULL) + fatal(__func__); + me->map = *map; + + TAILQ_INSERT_TAIL(mh, me, entry); +} + +void +mapping_list_clr(struct mapping_head *mh) +{ + struct mapping_entry *me; + + while ((me = TAILQ_FIRST(mh)) != NULL) { + TAILQ_REMOVE(mh, me, entry); + free(me); + } +} diff --git a/ldpd/ldpe.h b/ldpd/ldpe.h new file mode 100644 index 0000000000..aab1a7fd9b --- /dev/null +++ b/ldpd/ldpe.h @@ -0,0 +1,293 @@ +/* $OpenBSD$ */ + +/* + * Copyright (c) 2013, 2016 Renato Westphal + * Copyright (c) 2009 Michele Marchetto + * Copyright (c) 2004, 2005, 2008 Esben Norby + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _LDPE_H_ +#define _LDPE_H_ + +#include "openbsd-queue.h" +#include "openbsd-tree.h" +#ifdef __OpenBSD__ +#include +#endif + +#include "ldpd.h" + +#define min(x,y) ((x) <= (y) ? (x) : (y)) +#define max(x,y) ((x) > (y) ? (x) : (y)) + +struct hello_source { + enum hello_type type; + struct { + struct iface_af *ia; + union ldpd_addr src_addr; + } link; + struct tnbr *target; +}; + +struct adj { + LIST_ENTRY(adj) global_entry; + LIST_ENTRY(adj) nbr_entry; + LIST_ENTRY(adj) ia_entry; + struct in_addr lsr_id; + struct nbr *nbr; + int ds_tlv; + struct hello_source source; + struct thread *inactivity_timer; + uint16_t holdtime; + union ldpd_addr trans_addr; +}; + +struct tcp_conn { + struct nbr *nbr; + int fd; + struct ibuf_read *rbuf; + struct evbuf wbuf; + struct thread *rev; + in_port_t lport; + in_port_t rport; +}; + +struct nbr { + RB_ENTRY(nbr) id_tree, addr_tree, pid_tree; + struct tcp_conn *tcp; + LIST_HEAD(, adj) adj_list; /* adjacencies */ + struct thread *ev_connect; + struct thread *keepalive_timer; + struct thread *keepalive_timeout; + struct thread *init_timeout; + struct thread *initdelay_timer; + + struct mapping_head mapping_list; + struct mapping_head withdraw_list; + struct mapping_head request_list; + struct mapping_head release_list; + struct mapping_head abortreq_list; + + uint32_t peerid; /* unique ID in DB */ + int af; + int ds_tlv; + int v4_enabled; /* announce/process v4 msgs */ + int v6_enabled; /* announce/process v6 msgs */ + struct in_addr id; /* lsr id */ + union ldpd_addr laddr; /* local address */ + union ldpd_addr raddr; /* remote address */ + uint32_t raddr_scope; /* remote address scope (v6) */ + time_t uptime; + int fd; + int state; + uint32_t conf_seqnum; + int idtimer_cnt; + uint16_t keepalive; + uint16_t max_pdu_len; + + struct { + uint8_t established; + uint32_t spi_in; + uint32_t spi_out; + enum auth_method method; + char md5key[TCP_MD5_KEY_LEN]; + } auth; + int flags; +}; +#define F_NBR_GTSM_NEGOTIATED 0x01 + +RB_HEAD(nbr_id_head, nbr); +RB_PROTOTYPE(nbr_id_head, nbr, id_tree, nbr_id_compare) +RB_HEAD(nbr_addr_head, nbr); +RB_PROTOTYPE(nbr_addr_head, nbr, addr_tree, nbr_addr_compare) +RB_HEAD(nbr_pid_head, nbr); +RB_PROTOTYPE(nbr_pid_head, nbr, pid_tree, nbr_pid_compare) + +struct pending_conn { + TAILQ_ENTRY(pending_conn) entry; + int fd; + int af; + union ldpd_addr addr; + struct thread *ev_timeout; +}; +#define PENDING_CONN_TIMEOUT 5 + +struct mapping_entry { + TAILQ_ENTRY(mapping_entry) entry; + struct map map; +}; + +struct ldpd_sysdep { + uint8_t no_pfkey; + uint8_t no_md5sig; +}; + +extern struct ldpd_conf *leconf; +extern struct ldpd_sysdep sysdep; +extern struct nbr_id_head nbrs_by_id; +extern struct nbr_addr_head nbrs_by_addr; +extern struct nbr_pid_head nbrs_by_pid; + +/* accept.c */ +void accept_init(void); +int accept_add(int, int (*)(struct thread *), void *); +void accept_del(int); +void accept_pause(void); +void accept_unpause(void); + +/* hello.c */ +int send_hello(enum hello_type, struct iface_af *, struct tnbr *); +void recv_hello(struct in_addr, struct ldp_msg *, int, union ldpd_addr *, + struct iface *, int, char *, uint16_t); + +/* init.c */ +void send_init(struct nbr *); +int recv_init(struct nbr *, char *, uint16_t); + +/* keepalive.c */ +void send_keepalive(struct nbr *); +int recv_keepalive(struct nbr *, char *, uint16_t); + +/* notification.c */ +void send_notification_full(struct tcp_conn *, struct notify_msg *); +void send_notification(uint32_t, struct tcp_conn *, uint32_t, + uint16_t); +void send_notification_nbr(struct nbr *, uint32_t, uint32_t, uint16_t); +int recv_notification(struct nbr *, char *, uint16_t); +int gen_status_tlv(struct ibuf *, uint32_t, uint32_t, uint16_t); + +/* address.c */ +void send_address_single(struct nbr *, struct if_addr *, int); +void send_address_all(struct nbr *, int); +int recv_address(struct nbr *, char *, uint16_t); + +/* labelmapping.c */ +#define PREFIX_SIZE(x) (((x) + 7) / 8) +void send_labelmessage(struct nbr *, uint16_t, struct mapping_head *); +int recv_labelmessage(struct nbr *, char *, uint16_t, uint16_t); +int gen_pw_status_tlv(struct ibuf *, uint32_t); +int gen_fec_tlv(struct ibuf *, struct map *); +int tlv_decode_fec_elm(struct nbr *, struct ldp_msg *, char *, + uint16_t, struct map *); + +/* ldpe.c */ +void ldpe(const char *, const char *); +int ldpe_imsg_compose_parent(int, pid_t, void *, + uint16_t); +int ldpe_imsg_compose_lde(int, uint32_t, pid_t, void *, + uint16_t); +void ldpe_reset_nbrs(int); +void ldpe_reset_ds_nbrs(void); +void ldpe_remove_dynamic_tnbrs(int); +void ldpe_stop_init_backoff(int); +struct ctl_conn; +void ldpe_iface_ctl(struct ctl_conn *, unsigned int); +void ldpe_adj_ctl(struct ctl_conn *); +void ldpe_nbr_ctl(struct ctl_conn *); +void mapping_list_add(struct mapping_head *, struct map *); +void mapping_list_clr(struct mapping_head *); + +/* interface.c */ +struct iface *if_new(struct kif *); +void if_exit(struct iface *); +struct iface *if_lookup(struct ldpd_conf *, unsigned short); +struct iface *if_lookup_name(struct ldpd_conf *, const char *); +void if_update_info(struct iface *, struct kif *); +struct iface_af *iface_af_get(struct iface *, int); +void if_addr_add(struct kaddr *); +void if_addr_del(struct kaddr *); +void if_update(struct iface *, int); +void if_update_all(int); +uint16_t if_get_hello_holdtime(struct iface_af *); +uint16_t if_get_hello_interval(struct iface_af *); +struct ctl_iface *if_to_ctl(struct iface_af *); +in_addr_t if_get_ipv4_addr(struct iface *); + +/* adjacency.c */ +struct adj *adj_new(struct in_addr, struct hello_source *, + union ldpd_addr *); +void adj_del(struct adj *, uint32_t); +struct adj *adj_find(struct hello_source *); +int adj_get_af(struct adj *adj); +void adj_start_itimer(struct adj *); +void adj_stop_itimer(struct adj *); +struct tnbr *tnbr_new(int, union ldpd_addr *); +struct tnbr *tnbr_find(struct ldpd_conf *, int, union ldpd_addr *); +struct tnbr *tnbr_check(struct tnbr *); +void tnbr_update(struct tnbr *); +void tnbr_update_all(int); +uint16_t tnbr_get_hello_holdtime(struct tnbr *); +uint16_t tnbr_get_hello_interval(struct tnbr *); +struct ctl_adj *adj_to_ctl(struct adj *); + +/* neighbor.c */ +int nbr_fsm(struct nbr *, enum nbr_event); +struct nbr *nbr_new(struct in_addr, int, int, union ldpd_addr *, + uint32_t); +void nbr_del(struct nbr *); +struct nbr *nbr_find_ldpid(uint32_t); +struct nbr *nbr_find_addr(int, union ldpd_addr *); +struct nbr *nbr_find_peerid(uint32_t); +int nbr_adj_count(struct nbr *, int); +int nbr_session_active_role(struct nbr *); +void nbr_stop_ktimer(struct nbr *); +void nbr_stop_ktimeout(struct nbr *); +void nbr_stop_itimeout(struct nbr *); +void nbr_start_idtimer(struct nbr *); +void nbr_stop_idtimer(struct nbr *); +int nbr_pending_idtimer(struct nbr *); +int nbr_pending_connect(struct nbr *); +int nbr_establish_connection(struct nbr *); +int nbr_gtsm_enabled(struct nbr *, struct nbr_params *); +int nbr_gtsm_setup(int, int, struct nbr_params *); +int nbr_gtsm_check(int, struct nbr *, struct nbr_params *); +struct nbr_params *nbr_params_new(struct in_addr); +struct nbr_params *nbr_params_find(struct ldpd_conf *, struct in_addr); +uint16_t nbr_get_keepalive(int, struct in_addr); +struct ctl_nbr *nbr_to_ctl(struct nbr *); +void nbr_clear_ctl(struct ctl_nbr *); + +/* packet.c */ +int gen_ldp_hdr(struct ibuf *, uint16_t); +int gen_msg_hdr(struct ibuf *, uint16_t, uint16_t); +int send_packet(int, int, union ldpd_addr *, + struct iface_af *, void *, size_t); +int disc_recv_packet(struct thread *); +int session_accept(struct thread *); +void session_accept_nbr(struct nbr *, int); +void session_shutdown(struct nbr *, uint32_t, uint32_t, + uint32_t); +void session_close(struct nbr *); +struct tcp_conn *tcp_new(int, struct nbr *); +void pending_conn_del(struct pending_conn *); +struct pending_conn *pending_conn_find(int, union ldpd_addr *); + +char *pkt_ptr; /* packet buffer */ + +/* pfkey.c */ +#ifdef __OpenBSD__ +int pfkey_read(int, struct sadb_msg *); +int pfkey_establish(struct nbr *, struct nbr_params *); +int pfkey_remove(struct nbr *); +int pfkey_init(void); +#endif + +/* l2vpn.c */ +void ldpe_l2vpn_init(struct l2vpn *); +void ldpe_l2vpn_exit(struct l2vpn *); +void ldpe_l2vpn_pw_init(struct l2vpn_pw *); +void ldpe_l2vpn_pw_exit(struct l2vpn_pw *); + +#endif /* _LDPE_H_ */ diff --git a/ldpd/log.c b/ldpd/log.c new file mode 100644 index 0000000000..77efdb4714 --- /dev/null +++ b/ldpd/log.c @@ -0,0 +1,584 @@ +/* $OpenBSD$ */ + +/* + * Copyright (c) 2003, 2004 Henning Brauer + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include + +#include "ldpd.h" +#include "ldpe.h" +#include "lde.h" +#include "log.h" + +#include +#include "mpls.h" + +static const char * const procnames[] = { + "parent", + "ldpe", + "lde" +}; + +void vlog(int, const char *, va_list); + +void +logit(int pri, const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + vlog(pri, fmt, ap); + va_end(ap); +} + +void +vlog(int pri, const char *fmt, va_list ap) +{ + char buf[1024]; + + switch (ldpd_process) { + case PROC_LDE_ENGINE: + vsnprintf(buf, sizeof(buf), fmt, ap); + lde_imsg_compose_parent(IMSG_LOG, pri, buf, strlen(buf) + 1); + break; + case PROC_LDP_ENGINE: + vsnprintf(buf, sizeof(buf), fmt, ap); + ldpe_imsg_compose_parent(IMSG_LOG, pri, buf, strlen(buf) + 1); + break; + case PROC_MAIN: + vzlog(NULL, pri, fmt, ap); + break; + } +} + +void +log_warn(const char *emsg, ...) +{ + char *nfmt; + va_list ap; + + /* best effort to even work in out of memory situations */ + if (emsg == NULL) + logit(LOG_CRIT, "%s", strerror(errno)); + else { + va_start(ap, emsg); + + if (asprintf(&nfmt, "%s: %s", emsg, strerror(errno)) == -1) { + /* we tried it... */ + vlog(LOG_CRIT, emsg, ap); + logit(LOG_CRIT, "%s", strerror(errno)); + } else { + vlog(LOG_CRIT, nfmt, ap); + free(nfmt); + } + va_end(ap); + } +} + +void +log_warnx(const char *emsg, ...) +{ + va_list ap; + + va_start(ap, emsg); + vlog(LOG_CRIT, emsg, ap); + va_end(ap); +} + +void +log_info(const char *emsg, ...) +{ + va_list ap; + + va_start(ap, emsg); + vlog(LOG_INFO, emsg, ap); + va_end(ap); +} + +void +log_notice(const char *emsg, ...) +{ + va_list ap; + + va_start(ap, emsg); + vlog(LOG_NOTICE, emsg, ap); + va_end(ap); +} + +void +log_debug(const char *emsg, ...) +{ + va_list ap; + + va_start(ap, emsg); + vlog(LOG_DEBUG, emsg, ap); + va_end(ap); +} + +void +fatal(const char *emsg) +{ + if (emsg == NULL) + logit(LOG_CRIT, "fatal in %s: %s", procnames[ldpd_process], + strerror(errno)); + else + if (errno) + logit(LOG_CRIT, "fatal in %s: %s: %s", + procnames[ldpd_process], emsg, strerror(errno)); + else + logit(LOG_CRIT, "fatal in %s: %s", + procnames[ldpd_process], emsg); + + exit(1); +} + +void +fatalx(const char *emsg) +{ + errno = 0; + fatal(emsg); +} + +#define NUM_LOGS 4 +const char * +log_sockaddr(void *vp) +{ + static char buf[NUM_LOGS][NI_MAXHOST]; + static int round = 0; + struct sockaddr *sa = vp; + + round = (round + 1) % NUM_LOGS; + + if (getnameinfo(sa, sockaddr_len(sa), buf[round], NI_MAXHOST, NULL, 0, + NI_NUMERICHOST)) + return ("(unknown)"); + else + return (buf[round]); +} + +const char * +log_in6addr(const struct in6_addr *addr) +{ + struct sockaddr_in6 sa_in6; + + memset(&sa_in6, 0, sizeof(sa_in6)); +#ifdef HAVE_STRUCT_SOCKADDR_IN_SIN_LEN + sa_in6.sin6_len = sizeof(sa_in6); +#endif + sa_in6.sin6_family = AF_INET6; + sa_in6.sin6_addr = *addr; + + recoverscope(&sa_in6); + + return (log_sockaddr(&sa_in6)); +} + +const char * +log_in6addr_scope(const struct in6_addr *addr, unsigned int ifindex) +{ + struct sockaddr_in6 sa_in6; + + memset(&sa_in6, 0, sizeof(sa_in6)); +#ifdef HAVE_STRUCT_SOCKADDR_IN_SIN_LEN + sa_in6.sin6_len = sizeof(sa_in6); +#endif + sa_in6.sin6_family = AF_INET6; + sa_in6.sin6_addr = *addr; + + addscope(&sa_in6, ifindex); + + return (log_sockaddr(&sa_in6)); +} + +const char * +log_addr(int af, const union ldpd_addr *addr) +{ + static char buf[NUM_LOGS][INET6_ADDRSTRLEN]; + static int round = 0; + + switch (af) { + case AF_INET: + round = (round + 1) % NUM_LOGS; + if (inet_ntop(AF_INET, &addr->v4, buf[round], + sizeof(buf[round])) == NULL) + return ("???"); + return (buf[round]); + case AF_INET6: + return (log_in6addr(&addr->v6)); + default: + break; + } + + return ("???"); +} + +#define TF_BUFS 4 +#define TF_LEN 32 + +char * +log_label(uint32_t label) +{ + char *buf; + static char tfbuf[TF_BUFS][TF_LEN]; /* ring buffer */ + static int idx = 0; + + buf = tfbuf[idx++]; + if (idx == TF_BUFS) + idx = 0; + + switch (label) { + case NO_LABEL: + snprintf(buf, TF_LEN, "-"); + break; + case MPLS_LABEL_IMPLNULL: + snprintf(buf, TF_LEN, "imp-null"); + break; + case MPLS_LABEL_IPV4NULL: + case MPLS_LABEL_IPV6NULL: + snprintf(buf, TF_LEN, "exp-null"); + break; + default: + snprintf(buf, TF_LEN, "%u", label); + break; + } + + return (buf); +} + +const char * +log_time(time_t t) +{ + char *buf; + static char tfbuf[TF_BUFS][TF_LEN]; /* ring buffer */ + static int idx = 0; + unsigned int sec, min, hrs, day, week; + + buf = tfbuf[idx++]; + if (idx == TF_BUFS) + idx = 0; + + week = t; + + sec = week % 60; + week /= 60; + min = week % 60; + week /= 60; + hrs = week % 24; + week /= 24; + day = week % 7; + week /= 7; + + if (week > 0) + snprintf(buf, TF_LEN, "%02uw%01ud%02uh", week, day, hrs); + else if (day > 0) + snprintf(buf, TF_LEN, "%01ud%02uh%02um", day, hrs, min); + else + snprintf(buf, TF_LEN, "%02u:%02u:%02u", hrs, min, sec); + + return (buf); +} + +char * +log_hello_src(const struct hello_source *src) +{ + static char buf[64]; + + switch (src->type) { + case HELLO_LINK: + snprintf(buf, sizeof(buf), "iface %s", + src->link.ia->iface->name); + break; + case HELLO_TARGETED: + snprintf(buf, sizeof(buf), "source %s", + log_addr(src->target->af, &src->target->addr)); + break; + } + + return (buf); +} + +const char * +log_map(const struct map *map) +{ + static char buf[64]; + + switch (map->type) { + case MAP_TYPE_WILDCARD: + if (snprintf(buf, sizeof(buf), "wildcard") < 0) + return ("???"); + break; + case MAP_TYPE_PREFIX: + if (snprintf(buf, sizeof(buf), "%s/%u", + log_addr(map->fec.prefix.af, &map->fec.prefix.prefix), + map->fec.prefix.prefixlen) == -1) + return ("???"); + break; + case MAP_TYPE_PWID: + if (snprintf(buf, sizeof(buf), "pwid %u (%s)", + map->fec.pwid.pwid, + pw_type_name(map->fec.pwid.type)) == -1) + return ("???"); + break; + default: + return ("???"); + } + + return (buf); +} + +const char * +log_fec(const struct fec *fec) +{ + static char buf[64]; + union ldpd_addr addr; + + switch (fec->type) { + case FEC_TYPE_IPV4: + addr.v4 = fec->u.ipv4.prefix; + if (snprintf(buf, sizeof(buf), "ipv4 %s/%u", + log_addr(AF_INET, &addr), fec->u.ipv4.prefixlen) == -1) + return ("???"); + break; + case FEC_TYPE_IPV6: + addr.v6 = fec->u.ipv6.prefix; + if (snprintf(buf, sizeof(buf), "ipv6 %s/%u", + log_addr(AF_INET6, &addr), fec->u.ipv6.prefixlen) == -1) + return ("???"); + break; + case FEC_TYPE_PWID: + if (snprintf(buf, sizeof(buf), + "pwid %u (%s) - %s", + fec->u.pwid.pwid, pw_type_name(fec->u.pwid.type), + inet_ntoa(fec->u.pwid.lsr_id)) == -1) + return ("???"); + break; + default: + return ("???"); + } + + return (buf); +} + +/* names */ +const char * +af_name(int af) +{ + switch (af) { + case AF_INET: + return ("ipv4"); + case AF_INET6: + return ("ipv6"); +#ifdef AF_MPLS + case AF_MPLS: + return ("mpls"); +#endif + default: + return ("UNKNOWN"); + } +} + +const char * +socket_name(int type) +{ + switch (type) { + case LDP_SOCKET_DISC: + return ("discovery"); + case LDP_SOCKET_EDISC: + return ("extended discovery"); + case LDP_SOCKET_SESSION: + return ("session"); + default: + return ("UNKNOWN"); + } +} + +const char * +nbr_state_name(int state) +{ + switch (state) { + case NBR_STA_PRESENT: + return ("PRESENT"); + case NBR_STA_INITIAL: + return ("INITIALIZED"); + case NBR_STA_OPENREC: + return ("OPENREC"); + case NBR_STA_OPENSENT: + return ("OPENSENT"); + case NBR_STA_OPER: + return ("OPERATIONAL"); + default: + return ("UNKNOWN"); + } +} + +const char * +if_state_name(int state) +{ + switch (state) { + case IF_STA_DOWN: + return ("DOWN"); + case IF_STA_ACTIVE: + return ("ACTIVE"); + default: + return ("UNKNOWN"); + } +} + +const char * +if_type_name(enum iface_type type) +{ + switch (type) { + case IF_TYPE_POINTOPOINT: + return ("POINTOPOINT"); + case IF_TYPE_BROADCAST: + return ("BROADCAST"); + } + /* NOTREACHED */ + return ("UNKNOWN"); +} + +const char * +msg_name(uint16_t msg) +{ + static char buf[16]; + + switch (msg) { + case MSG_TYPE_NOTIFICATION: + return ("notification"); + case MSG_TYPE_HELLO: + return ("hello"); + case MSG_TYPE_INIT: + return ("initialization"); + case MSG_TYPE_KEEPALIVE: + return ("keepalive"); + case MSG_TYPE_ADDR: + return ("address"); + case MSG_TYPE_ADDRWITHDRAW: + return ("address withdraw"); + case MSG_TYPE_LABELMAPPING: + return ("label mapping"); + case MSG_TYPE_LABELREQUEST: + return ("label request"); + case MSG_TYPE_LABELWITHDRAW: + return ("label withdraw"); + case MSG_TYPE_LABELRELEASE: + return ("label release"); + case MSG_TYPE_LABELABORTREQ: + default: + snprintf(buf, sizeof(buf), "[%08x]", msg); + return (buf); + } +} + +const char * +status_code_name(uint32_t status) +{ + static char buf[16]; + + switch (status) { + case S_SUCCESS: + return ("Success"); + case S_BAD_LDP_ID: + return ("Bad LDP Identifier"); + case S_BAD_PROTO_VER: + return ("Bad Protocol Version"); + case S_BAD_PDU_LEN: + return ("Bad PDU Length"); + case S_UNKNOWN_MSG: + return ("Unknown Message Type"); + case S_BAD_MSG_LEN: + return ("Bad Message Length"); + case S_UNKNOWN_TLV: + return ("Unknown TLV"); + case S_BAD_TLV_LEN: + return ("Bad TLV Length"); + case S_BAD_TLV_VAL: + return ("Malformed TLV Value"); + case S_HOLDTIME_EXP: + return ("Hold Timer Expired"); + case S_SHUTDOWN: + return ("Shutdown"); + case S_LOOP_DETECTED: + return ("Loop Detected"); + case S_UNKNOWN_FEC: + return ("Unknown FEC"); + case S_NO_ROUTE: + return ("No Route"); + case S_NO_LABEL_RES: + return ("No Label Resources"); + case S_AVAILABLE: + return ("Label Resources Available"); + case S_NO_HELLO: + return ("Session Rejected, No Hello"); + case S_PARM_ADV_MODE: + return ("Rejected Advertisement Mode Parameter"); + case S_MAX_PDU_LEN: + return ("Rejected Max PDU Length Parameter"); + case S_PARM_L_RANGE: + return ("Rejected Label Range Parameter"); + case S_KEEPALIVE_TMR: + return ("KeepAlive Timer Expired"); + case S_LAB_REQ_ABRT: + return ("Label Request Aborted"); + case S_MISS_MSG: + return ("Missing Message Parameters"); + case S_UNSUP_ADDR: + return ("Unsupported Address Family"); + case S_KEEPALIVE_BAD: + return ("Bad KeepAlive Time"); + case S_INTERN_ERR: + return ("Internal Error"); + case S_ILLEGAL_CBIT: + return ("Illegal C-Bit"); + case S_WRONG_CBIT: + return ("Wrong C-Bit"); + case S_INCPT_BITRATE: + return ("Incompatible bit-rate"); + case S_CEP_MISCONF: + return ("CEP-TDM mis-configuration"); + case S_PW_STATUS: + return ("PW Status"); + case S_UNASSIGN_TAI: + return ("Unassigned/Unrecognized TAI"); + case S_MISCONF_ERR: + return ("Generic Misconfiguration Error"); + case S_WITHDRAW_MTHD: + return ("Label Withdraw PW Status Method"); + case S_TRANS_MISMTCH: + return ("Transport Connection Mismatch"); + case S_DS_NONCMPLNCE: + return ("Dual-Stack Noncompliance"); + default: + snprintf(buf, sizeof(buf), "[%08x]", status); + return (buf); + } +} + +const char * +pw_type_name(uint16_t pw_type) +{ + static char buf[64]; + + switch (pw_type) { + case PW_TYPE_ETHERNET_TAGGED: + return ("Eth Tagged"); + case PW_TYPE_ETHERNET: + return ("Ethernet"); + default: + snprintf(buf, sizeof(buf), "[%0x]", pw_type); + return (buf); + } +} diff --git a/ldpd/log.h b/ldpd/log.h new file mode 100644 index 0000000000..4d6da43cac --- /dev/null +++ b/ldpd/log.h @@ -0,0 +1,65 @@ +/* $OpenBSD$ */ + +/* + * Copyright (c) 2003, 2004 Henning Brauer + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _LOG_H_ +#define _LOG_H_ + +#include + +struct in6_addr; +union ldpd_addr; +struct hello_source; +struct fec; + +void logit(int, const char *, ...) + __attribute__((__format__ (printf, 2, 3))); +void log_warn(const char *, ...) + __attribute__((__format__ (printf, 1, 2))); +void log_warnx(const char *, ...) + __attribute__((__format__ (printf, 1, 2))); +void log_info(const char *, ...) + __attribute__((__format__ (printf, 1, 2))); +void log_notice(const char *, ...) + __attribute__((__format__ (printf, 1, 2))); +void log_debug(const char *, ...) + __attribute__((__format__ (printf, 1, 2))); +void fatal(const char *) + __attribute__ ((noreturn)) + __attribute__((__format__ (printf, 1, 0))); +void fatalx(const char *) + __attribute__ ((noreturn)) + __attribute__((__format__ (printf, 1, 0))); +const char *log_sockaddr(void *); +const char *log_in6addr(const struct in6_addr *); +const char *log_in6addr_scope(const struct in6_addr *, unsigned int); +const char *log_addr(int, const union ldpd_addr *); +char *log_label(uint32_t); +const char *log_time(time_t); +char *log_hello_src(const struct hello_source *); +const char *log_map(const struct map *); +const char *log_fec(const struct fec *); +const char *af_name(int); +const char *socket_name(int); +const char *nbr_state_name(int); +const char *if_state_name(int); +const char *if_type_name(enum iface_type); +const char *msg_name(uint16_t); +const char *status_code_name(uint32_t); +const char *pw_type_name(uint16_t); + +#endif /* _LOG_H_ */ diff --git a/ldpd/neighbor.c b/ldpd/neighbor.c new file mode 100644 index 0000000000..8376a01549 --- /dev/null +++ b/ldpd/neighbor.c @@ -0,0 +1,831 @@ +/* $OpenBSD$ */ + +/* + * Copyright (c) 2013, 2016 Renato Westphal + * Copyright (c) 2009 Michele Marchetto + * Copyright (c) 2005 Claudio Jeker + * Copyright (c) 2004, 2005, 2008 Esben Norby + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include + +#include "ldpd.h" +#include "ldpe.h" +#include "lde.h" +#include "log.h" + +static __inline int nbr_id_compare(struct nbr *, struct nbr *); +static __inline int nbr_addr_compare(struct nbr *, struct nbr *); +static __inline int nbr_pid_compare(struct nbr *, struct nbr *); +static void nbr_update_peerid(struct nbr *); +static int nbr_ktimer(struct thread *); +static void nbr_start_ktimer(struct nbr *); +static int nbr_ktimeout(struct thread *); +static void nbr_start_ktimeout(struct nbr *); +static int nbr_itimeout(struct thread *); +static void nbr_start_itimeout(struct nbr *); +static int nbr_idtimer(struct thread *); +static int nbr_act_session_operational(struct nbr *); +static void nbr_send_labelmappings(struct nbr *); + +RB_GENERATE(nbr_id_head, nbr, id_tree, nbr_id_compare) +RB_GENERATE(nbr_addr_head, nbr, addr_tree, nbr_addr_compare) +RB_GENERATE(nbr_pid_head, nbr, pid_tree, nbr_pid_compare) + +struct { + int state; + enum nbr_event event; + enum nbr_action action; + int new_state; +} nbr_fsm_tbl[] = { + /* current state event that happened action to take resulting state */ +/* Passive Role */ + {NBR_STA_PRESENT, NBR_EVT_MATCH_ADJ, NBR_ACT_NOTHING, NBR_STA_INITIAL}, + {NBR_STA_INITIAL, NBR_EVT_INIT_RCVD, NBR_ACT_PASSIVE_INIT, NBR_STA_OPENREC}, + {NBR_STA_OPENREC, NBR_EVT_KEEPALIVE_RCVD, NBR_ACT_SESSION_EST, NBR_STA_OPER}, +/* Active Role */ + {NBR_STA_PRESENT, NBR_EVT_CONNECT_UP, NBR_ACT_CONNECT_SETUP, NBR_STA_INITIAL}, + {NBR_STA_INITIAL, NBR_EVT_INIT_SENT, NBR_ACT_NOTHING, NBR_STA_OPENSENT}, + {NBR_STA_OPENSENT, NBR_EVT_INIT_RCVD, NBR_ACT_KEEPALIVE_SEND, NBR_STA_OPENREC}, +/* Session Maintenance */ + {NBR_STA_OPER, NBR_EVT_PDU_RCVD, NBR_ACT_RST_KTIMEOUT, 0}, + {NBR_STA_SESSION, NBR_EVT_PDU_RCVD, NBR_ACT_NOTHING, 0}, + {NBR_STA_OPER, NBR_EVT_PDU_SENT, NBR_ACT_RST_KTIMER, 0}, + {NBR_STA_SESSION, NBR_EVT_PDU_SENT, NBR_ACT_NOTHING, 0}, +/* Session Close */ + {NBR_STA_PRESENT, NBR_EVT_CLOSE_SESSION, NBR_ACT_NOTHING, 0}, + {NBR_STA_SESSION, NBR_EVT_CLOSE_SESSION, NBR_ACT_CLOSE_SESSION, NBR_STA_PRESENT}, + {-1, NBR_EVT_NOTHING, NBR_ACT_NOTHING, 0}, +}; + +const char * const nbr_event_names[] = { + "NOTHING", + "ADJACENCY MATCHED", + "CONNECTION UP", + "SESSION CLOSE", + "INIT RECEIVED", + "KEEPALIVE RECEIVED", + "PDU RECEIVED", + "PDU SENT", + "INIT SENT" +}; + +const char * const nbr_action_names[] = { + "NOTHING", + "RESET KEEPALIVE TIMEOUT", + "START NEIGHBOR SESSION", + "RESET KEEPALIVE TIMER", + "SETUP NEIGHBOR CONNECTION", + "SEND INIT AND KEEPALIVE", + "SEND KEEPALIVE", + "CLOSE SESSION" +}; + +struct nbr_id_head nbrs_by_id = RB_INITIALIZER(&nbrs_by_id); +struct nbr_addr_head nbrs_by_addr = RB_INITIALIZER(&nbrs_by_addr); +struct nbr_pid_head nbrs_by_pid = RB_INITIALIZER(&nbrs_by_pid); + +static __inline int +nbr_id_compare(struct nbr *a, struct nbr *b) +{ + return (ntohl(a->id.s_addr) - ntohl(b->id.s_addr)); +} + +static __inline int +nbr_addr_compare(struct nbr *a, struct nbr *b) +{ + if (a->af < b->af) + return (-1); + if (a->af > b->af) + return (1); + + return (ldp_addrcmp(a->af, &a->raddr, &b->raddr)); +} + +static __inline int +nbr_pid_compare(struct nbr *a, struct nbr *b) +{ + return (a->peerid - b->peerid); +} + +int +nbr_fsm(struct nbr *nbr, enum nbr_event event) +{ + struct timeval now; + int old_state; + int new_state = 0; + int i; + + old_state = nbr->state; + for (i = 0; nbr_fsm_tbl[i].state != -1; i++) + if ((nbr_fsm_tbl[i].state & old_state) && + (nbr_fsm_tbl[i].event == event)) { + new_state = nbr_fsm_tbl[i].new_state; + break; + } + + if (nbr_fsm_tbl[i].state == -1) { + /* event outside of the defined fsm, ignore it. */ + log_warnx("%s: lsr-id %s, event %s not expected in " + "state %s", __func__, inet_ntoa(nbr->id), + nbr_event_names[event], nbr_state_name(old_state)); + return (0); + } + + if (new_state != 0) + nbr->state = new_state; + + if (old_state != nbr->state) { + log_debug("%s: event %s resulted in action %s and " + "changing state for lsr-id %s from %s to %s", + __func__, nbr_event_names[event], + nbr_action_names[nbr_fsm_tbl[i].action], + inet_ntoa(nbr->id), nbr_state_name(old_state), + nbr_state_name(nbr->state)); + + if (nbr->state == NBR_STA_OPER) { + gettimeofday(&now, NULL); + nbr->uptime = now.tv_sec; + } + } + + if (nbr->state == NBR_STA_OPER || nbr->state == NBR_STA_PRESENT) + nbr_stop_itimeout(nbr); + else + nbr_start_itimeout(nbr); + + switch (nbr_fsm_tbl[i].action) { + case NBR_ACT_RST_KTIMEOUT: + nbr_start_ktimeout(nbr); + break; + case NBR_ACT_RST_KTIMER: + nbr_start_ktimer(nbr); + break; + case NBR_ACT_SESSION_EST: + nbr_act_session_operational(nbr); + nbr_start_ktimer(nbr); + nbr_start_ktimeout(nbr); + if (nbr->v4_enabled) + send_address_all(nbr, AF_INET); + if (nbr->v6_enabled) + send_address_all(nbr, AF_INET6); + nbr_send_labelmappings(nbr); + break; + case NBR_ACT_CONNECT_SETUP: + nbr->tcp = tcp_new(nbr->fd, nbr); + + /* trigger next state */ + send_init(nbr); + nbr_fsm(nbr, NBR_EVT_INIT_SENT); + break; + case NBR_ACT_PASSIVE_INIT: + send_init(nbr); + send_keepalive(nbr); + break; + case NBR_ACT_KEEPALIVE_SEND: + nbr_start_ktimeout(nbr); + send_keepalive(nbr); + break; + case NBR_ACT_CLOSE_SESSION: + ldpe_imsg_compose_lde(IMSG_NEIGHBOR_DOWN, nbr->peerid, 0, + NULL, 0); + session_close(nbr); + break; + case NBR_ACT_NOTHING: + /* do nothing */ + break; + } + + return (0); +} + +struct nbr * +nbr_new(struct in_addr id, int af, int ds_tlv, union ldpd_addr *addr, + uint32_t scope_id) +{ + struct nbr *nbr; + struct nbr_params *nbrp; + struct adj *adj; + struct pending_conn *pconn; + + log_debug("%s: lsr-id %s transport-address %s", __func__, + inet_ntoa(id), log_addr(af, addr)); + + if ((nbr = calloc(1, sizeof(*nbr))) == NULL) + fatal(__func__); + + LIST_INIT(&nbr->adj_list); + nbr->state = NBR_STA_PRESENT; + nbr->peerid = 0; + nbr->af = af; + nbr->ds_tlv = ds_tlv; + if (af == AF_INET || ds_tlv) + nbr->v4_enabled = 1; + if (af == AF_INET6 || ds_tlv) + nbr->v6_enabled = 1; + nbr->id = id; + nbr->laddr = (ldp_af_conf_get(leconf, af))->trans_addr; + nbr->raddr = *addr; + nbr->raddr_scope = scope_id; + nbr->conf_seqnum = 0; + + LIST_FOREACH(adj, &global.adj_list, global_entry) { + if (adj->lsr_id.s_addr == nbr->id.s_addr) { + adj->nbr = nbr; + LIST_INSERT_HEAD(&nbr->adj_list, adj, nbr_entry); + } + } + + if (RB_INSERT(nbr_id_head, &nbrs_by_id, nbr) != NULL) + fatalx("nbr_new: RB_INSERT(nbrs_by_id) failed"); + if (RB_INSERT(nbr_addr_head, &nbrs_by_addr, nbr) != NULL) + fatalx("nbr_new: RB_INSERT(nbrs_by_addr) failed"); + + TAILQ_INIT(&nbr->mapping_list); + TAILQ_INIT(&nbr->withdraw_list); + TAILQ_INIT(&nbr->request_list); + TAILQ_INIT(&nbr->release_list); + TAILQ_INIT(&nbr->abortreq_list); + + nbrp = nbr_params_find(leconf, nbr->id); + if (nbrp) { +#ifdef __OpenBSD__ + if (pfkey_establish(nbr, nbrp) == -1) + fatalx("pfkey setup failed"); +#else + sock_set_md5sig( + (ldp_af_global_get(&global, nbr->af))->ldp_session_socket, + nbr->af, &nbr->raddr, nbrp->auth.md5key); +#endif + } + + pconn = pending_conn_find(nbr->af, &nbr->raddr); + if (pconn) { + session_accept_nbr(nbr, pconn->fd); + pending_conn_del(pconn); + } + + return (nbr); +} + +void +nbr_del(struct nbr *nbr) +{ + log_debug("%s: lsr-id %s", __func__, inet_ntoa(nbr->id)); + + nbr_fsm(nbr, NBR_EVT_CLOSE_SESSION); +#ifdef __OpenBSD__ + pfkey_remove(nbr); +#else + sock_set_md5sig( + (ldp_af_global_get(&global, nbr->af))->ldp_session_socket, + nbr->af, &nbr->raddr, NULL); +#endif + + if (nbr_pending_connect(nbr)) + THREAD_WRITE_OFF(nbr->ev_connect); + nbr_stop_ktimer(nbr); + nbr_stop_ktimeout(nbr); + nbr_stop_itimeout(nbr); + nbr_stop_idtimer(nbr); + + mapping_list_clr(&nbr->mapping_list); + mapping_list_clr(&nbr->withdraw_list); + mapping_list_clr(&nbr->request_list); + mapping_list_clr(&nbr->release_list); + mapping_list_clr(&nbr->abortreq_list); + + if (nbr->peerid) + RB_REMOVE(nbr_pid_head, &nbrs_by_pid, nbr); + RB_REMOVE(nbr_id_head, &nbrs_by_id, nbr); + RB_REMOVE(nbr_addr_head, &nbrs_by_addr, nbr); + + free(nbr); +} + +static void +nbr_update_peerid(struct nbr *nbr) +{ + static uint32_t peercnt = 1; + + if (nbr->peerid) + RB_REMOVE(nbr_pid_head, &nbrs_by_pid, nbr); + + /* get next unused peerid */ + while (nbr_find_peerid(++peercnt)) + ; + nbr->peerid = peercnt; + + if (RB_INSERT(nbr_pid_head, &nbrs_by_pid, nbr) != NULL) + fatalx("nbr_update_peerid: RB_INSERT(nbrs_by_pid) failed"); +} + +struct nbr * +nbr_find_ldpid(uint32_t lsr_id) +{ + struct nbr n; + n.id.s_addr = lsr_id; + return (RB_FIND(nbr_id_head, &nbrs_by_id, &n)); +} + +struct nbr * +nbr_find_addr(int af, union ldpd_addr *addr) +{ + struct nbr n; + n.af = af; + n.raddr = *addr; + return (RB_FIND(nbr_addr_head, &nbrs_by_addr, &n)); +} + +struct nbr * +nbr_find_peerid(uint32_t peerid) +{ + struct nbr n; + n.peerid = peerid; + return (RB_FIND(nbr_pid_head, &nbrs_by_pid, &n)); +} + +int +nbr_adj_count(struct nbr *nbr, int af) +{ + struct adj *adj; + int total = 0; + + LIST_FOREACH(adj, &nbr->adj_list, nbr_entry) + if (adj_get_af(adj) == af) + total++; + + return (total); +} + +int +nbr_session_active_role(struct nbr *nbr) +{ + if (ldp_addrcmp(nbr->af, &nbr->laddr, &nbr->raddr) > 0) + return (1); + + return (0); +} + +/* timers */ + +/* Keepalive timer: timer to send keepalive message to neighbors */ + +static int +nbr_ktimer(struct thread *thread) +{ + struct nbr *nbr = THREAD_ARG(thread); + + nbr->keepalive_timer = NULL; + send_keepalive(nbr); + nbr_start_ktimer(nbr); + + return (0); +} + +static void +nbr_start_ktimer(struct nbr *nbr) +{ + int secs; + + /* send three keepalives per period */ + secs = nbr->keepalive / KEEPALIVE_PER_PERIOD; + THREAD_TIMER_OFF(nbr->keepalive_timer); + nbr->keepalive_timer = thread_add_timer(master, nbr_ktimer, nbr, secs); +} + +void +nbr_stop_ktimer(struct nbr *nbr) +{ + THREAD_TIMER_OFF(nbr->keepalive_timer); +} + +/* Keepalive timeout: if the nbr hasn't sent keepalive */ + +static int +nbr_ktimeout(struct thread *thread) +{ + struct nbr *nbr = THREAD_ARG(thread); + + nbr->keepalive_timeout = NULL; + + log_debug("%s: lsr-id %s", __func__, inet_ntoa(nbr->id)); + + session_shutdown(nbr, S_KEEPALIVE_TMR, 0, 0); + + return (0); +} + +static void +nbr_start_ktimeout(struct nbr *nbr) +{ + THREAD_TIMER_OFF(nbr->keepalive_timeout); + nbr->keepalive_timeout = thread_add_timer(master, nbr_ktimeout, nbr, + nbr->keepalive); +} + +void +nbr_stop_ktimeout(struct nbr *nbr) +{ + THREAD_TIMER_OFF(nbr->keepalive_timeout); +} + +/* Session initialization timeout: if nbr got stuck in the initialization FSM */ + +static int +nbr_itimeout(struct thread *thread) +{ + struct nbr *nbr = THREAD_ARG(thread); + + log_debug("%s: lsr-id %s", __func__, inet_ntoa(nbr->id)); + + nbr_fsm(nbr, NBR_EVT_CLOSE_SESSION); + + return (0); +} + +static void +nbr_start_itimeout(struct nbr *nbr) +{ + int secs; + + secs = INIT_FSM_TIMEOUT; + THREAD_TIMER_OFF(nbr->init_timeout); + nbr->init_timeout = thread_add_timer(master, nbr_itimeout, nbr, secs); +} + +void +nbr_stop_itimeout(struct nbr *nbr) +{ + THREAD_TIMER_OFF(nbr->init_timeout); +} + +/* Init delay timer: timer to retry to iniziatize session */ + +static int +nbr_idtimer(struct thread *thread) +{ + struct nbr *nbr = THREAD_ARG(thread); + + nbr->initdelay_timer = NULL; + + log_debug("%s: lsr-id %s", __func__, inet_ntoa(nbr->id)); + + nbr_establish_connection(nbr); + + return (0); +} + +void +nbr_start_idtimer(struct nbr *nbr) +{ + int secs; + + secs = INIT_DELAY_TMR; + switch(nbr->idtimer_cnt) { + default: + /* do not further increase the counter */ + secs = MAX_DELAY_TMR; + break; + case 2: + secs *= 2; + /* FALLTHROUGH */ + case 1: + secs *= 2; + /* FALLTHROUGH */ + case 0: + nbr->idtimer_cnt++; + break; + } + + THREAD_TIMER_OFF(nbr->initdelay_timer); + nbr->initdelay_timer = thread_add_timer(master, nbr_idtimer, nbr, secs); +} + +void +nbr_stop_idtimer(struct nbr *nbr) +{ + THREAD_TIMER_OFF(nbr->initdelay_timer); +} + +int +nbr_pending_idtimer(struct nbr *nbr) +{ + return (nbr->initdelay_timer != NULL); +} + +int +nbr_pending_connect(struct nbr *nbr) +{ + return (nbr->ev_connect != NULL); +} + +static int +nbr_connect_cb(struct thread *thread) +{ + struct nbr *nbr = THREAD_ARG(thread); + int error; + socklen_t len; + + nbr->ev_connect = NULL; + + len = sizeof(error); + if (getsockopt(nbr->fd, SOL_SOCKET, SO_ERROR, &error, &len) < 0) { + log_warn("%s: getsockopt SOL_SOCKET SO_ERROR", __func__); + return (0); + } + + if (error) { + close(nbr->fd); + errno = error; + log_debug("%s: error while connecting to %s: %s", __func__, + log_addr(nbr->af, &nbr->raddr), strerror(errno)); + return (0); + } + + nbr_fsm(nbr, NBR_EVT_CONNECT_UP); + + return (0); +} + +int +nbr_establish_connection(struct nbr *nbr) +{ + struct sockaddr_storage local_sa; + struct sockaddr_storage remote_sa; + struct adj *adj; + struct nbr_params *nbrp; +#ifdef __OpenBSD__ + int opt = 1; +#endif + + nbr->fd = socket(nbr->af, SOCK_STREAM, 0); + if (nbr->fd == -1) { + log_warn("%s: error while creating socket", __func__); + return (-1); + } + sock_set_nonblock(nbr->fd); + + nbrp = nbr_params_find(leconf, nbr->id); + if (nbrp && nbrp->auth.method == AUTH_MD5SIG) { +#ifdef __OpenBSD__ + if (sysdep.no_pfkey || sysdep.no_md5sig) { + log_warnx("md5sig configured but not available"); + close(nbr->fd); + return (-1); + } + if (setsockopt(nbr->fd, IPPROTO_TCP, TCP_MD5SIG, + &opt, sizeof(opt)) == -1) { + log_warn("setsockopt md5sig"); + close(nbr->fd); + return (-1); + } +#else + sock_set_md5sig(nbr->fd, nbr->af, &nbr->raddr, + nbrp->auth.md5key); +#endif + } + + memcpy(&local_sa, addr2sa(nbr->af, &nbr->laddr, 0), sizeof(local_sa)); + memcpy(&remote_sa, addr2sa(nbr->af, &nbr->raddr, LDP_PORT), + sizeof(local_sa)); + if (nbr->af == AF_INET6 && nbr->raddr_scope) + addscope((struct sockaddr_in6 *)&remote_sa, nbr->raddr_scope); + + if (bind(nbr->fd, (struct sockaddr *)&local_sa, + sockaddr_len((struct sockaddr *)&local_sa)) == -1) { + log_warn("%s: error while binding socket to %s", __func__, + log_sockaddr((struct sockaddr *)&local_sa)); + close(nbr->fd); + return (-1); + } + + if (nbr_gtsm_check(nbr->fd, nbr, nbrp)) { + close(nbr->fd); + return (-1); + } + + /* + * Send an extra hello to guarantee that the remote peer has formed + * an adjacency as well. + */ + LIST_FOREACH(adj, &nbr->adj_list, nbr_entry) + send_hello(adj->source.type, adj->source.link.ia, + adj->source.target); + + if (connect(nbr->fd, (struct sockaddr *)&remote_sa, + sockaddr_len((struct sockaddr *)&remote_sa)) == -1) { + if (errno == EINPROGRESS) { + THREAD_WRITE_ON(master, nbr->ev_connect, nbr_connect_cb, + nbr, nbr->fd); + return (0); + } + log_warn("%s: error while connecting to %s", __func__, + log_sockaddr((struct sockaddr *)&remote_sa)); + close(nbr->fd); + return (-1); + } + + /* connection completed immediately */ + nbr_fsm(nbr, NBR_EVT_CONNECT_UP); + + return (0); +} + +int +nbr_gtsm_enabled(struct nbr *nbr, struct nbr_params *nbrp) +{ + /* + * RFC 6720 - Section 3: + * "This document allows for the implementation to provide an option to + * statically (e.g., via configuration) and/or dynamically override the + * default behavior and enable/disable GTSM on a per-peer basis". + */ + if (nbrp && (nbrp->flags & F_NBRP_GTSM)) + return (nbrp->gtsm_enabled); + + if ((ldp_af_conf_get(leconf, nbr->af))->flags & F_LDPD_AF_NO_GTSM) + return (0); + + /* By default, GTSM support has to be negotiated for LDPv4 */ + if (nbr->af == AF_INET && !(nbr->flags & F_NBR_GTSM_NEGOTIATED)) + return (0); + + return (1); +} + +int +nbr_gtsm_setup(int fd, int af, struct nbr_params *nbrp) +{ + int ttl = 255; + + if (nbrp && (nbrp->flags & F_NBRP_GTSM_HOPS)) + ttl = 256 - nbrp->gtsm_hops; + + switch (af) { + case AF_INET: + if (sock_set_ipv4_minttl(fd, ttl) == -1) + return (-1); + ttl = 255; + if (sock_set_ipv4_ucast_ttl(fd, ttl) == -1) + return (-1); + break; + case AF_INET6: + /* ignore any possible error */ + sock_set_ipv6_minhopcount(fd, ttl); + ttl = 255; + if (sock_set_ipv6_ucast_hops(fd, ttl) == -1) + return (-1); + break; + default: + fatalx("nbr_gtsm_setup: unknown af"); + } + + return (0); +} + +int +nbr_gtsm_check(int fd, struct nbr *nbr, struct nbr_params *nbrp) +{ + if (!nbr_gtsm_enabled(nbr, nbrp)) { + switch (nbr->af) { + case AF_INET: + sock_set_ipv4_ucast_ttl(fd, -1); + break; + case AF_INET6: + /* + * Send packets with a Hop Limit of 255 even when GSTM + * is disabled to guarantee interoperability. + */ + sock_set_ipv6_ucast_hops(fd, 255); + break; + default: + fatalx("nbr_gtsm_check: unknown af"); + break; + } + return (0); + } + + if (nbr_gtsm_setup(fd, nbr->af, nbrp) == -1) { + log_warnx("%s: error enabling GTSM for lsr-id %s", __func__, + inet_ntoa(nbr->id)); + return (-1); + } + + return (0); +} + +static int +nbr_act_session_operational(struct nbr *nbr) +{ + struct lde_nbr lde_nbr; + + nbr->idtimer_cnt = 0; + + /* this is necessary to avoid ipc synchronization issues */ + nbr_update_peerid(nbr); + + memset(&lde_nbr, 0, sizeof(lde_nbr)); + lde_nbr.id = nbr->id; + lde_nbr.v4_enabled = nbr->v4_enabled; + lde_nbr.v6_enabled = nbr->v6_enabled; + return (ldpe_imsg_compose_lde(IMSG_NEIGHBOR_UP, nbr->peerid, 0, + &lde_nbr, sizeof(lde_nbr))); +} + +static void +nbr_send_labelmappings(struct nbr *nbr) +{ + ldpe_imsg_compose_lde(IMSG_LABEL_MAPPING_FULL, nbr->peerid, 0, + NULL, 0); +} + +struct nbr_params * +nbr_params_new(struct in_addr lsr_id) +{ + struct nbr_params *nbrp; + + if ((nbrp = calloc(1, sizeof(*nbrp))) == NULL) + fatal(__func__); + + nbrp->lsr_id = lsr_id; + nbrp->auth.method = AUTH_NONE; + + return (nbrp); +} + +struct nbr_params * +nbr_params_find(struct ldpd_conf *xconf, struct in_addr lsr_id) +{ + struct nbr_params *nbrp; + + LIST_FOREACH(nbrp, &xconf->nbrp_list, entry) + if (nbrp->lsr_id.s_addr == lsr_id.s_addr) + return (nbrp); + + return (NULL); +} + +uint16_t +nbr_get_keepalive(int af, struct in_addr lsr_id) +{ + struct nbr_params *nbrp; + + nbrp = nbr_params_find(leconf, lsr_id); + if (nbrp && (nbrp->flags & F_NBRP_KEEPALIVE)) + return (nbrp->keepalive); + + return ((ldp_af_conf_get(leconf, af))->keepalive); +} + +struct ctl_nbr * +nbr_to_ctl(struct nbr *nbr) +{ + static struct ctl_nbr nctl; + struct timeval now; + + nctl.af = nbr->af; + nctl.id = nbr->id; + nctl.laddr = nbr->laddr; + nctl.lport = nbr->tcp->lport; + nctl.raddr = nbr->raddr; + nctl.rport = nbr->tcp->rport; + nctl.holdtime = nbr->keepalive; + nctl.nbr_state = nbr->state; + + gettimeofday(&now, NULL); + if (nbr->state == NBR_STA_OPER) { + nctl.uptime = now.tv_sec - nbr->uptime; + } else + nctl.uptime = 0; + + return (&nctl); +} + +void +nbr_clear_ctl(struct ctl_nbr *nctl) +{ + struct nbr *nbr; + + RB_FOREACH(nbr, nbr_addr_head, &nbrs_by_addr) { + if (ldp_addrisset(nctl->af, &nctl->raddr) && + ldp_addrcmp(nctl->af, &nctl->raddr, &nbr->raddr)) + continue; + + log_debug("%s: neighbor %s manually cleared", __func__, + log_addr(nbr->af, &nbr->raddr)); + session_shutdown(nbr, S_SHUTDOWN, 0, 0); + } +} diff --git a/ldpd/notification.c b/ldpd/notification.c new file mode 100644 index 0000000000..d306361d5c --- /dev/null +++ b/ldpd/notification.c @@ -0,0 +1,238 @@ +/* $OpenBSD$ */ + +/* + * Copyright (c) 2009 Michele Marchetto + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include + +#include "ldpd.h" +#include "ldp.h" +#include "log.h" +#include "ldpe.h" +#include "ldp_debug.h" + +void +send_notification_full(struct tcp_conn *tcp, struct notify_msg *nm) +{ + struct ibuf *buf; + uint16_t size; + int err = 0; + + /* calculate size */ + size = LDP_HDR_SIZE + LDP_MSG_SIZE + STATUS_SIZE; + if (nm->flags & F_NOTIF_PW_STATUS) + size += PW_STATUS_TLV_SIZE; + if (nm->flags & F_NOTIF_FEC) { + size += TLV_HDR_SIZE; + switch (nm->fec.type) { + case MAP_TYPE_PWID: + size += FEC_PWID_ELM_MIN_LEN; + if (nm->fec.flags & F_MAP_PW_ID) + size += sizeof(uint32_t); + break; + } + } + + if ((buf = ibuf_open(size)) == NULL) + fatal(__func__); + + err |= gen_ldp_hdr(buf, size); + size -= LDP_HDR_SIZE; + err |= gen_msg_hdr(buf, MSG_TYPE_NOTIFICATION, size); + err |= gen_status_tlv(buf, nm->status_code, nm->msg_id, nm->msg_type); + /* optional tlvs */ + if (nm->flags & F_NOTIF_PW_STATUS) + err |= gen_pw_status_tlv(buf, nm->pw_status); + if (nm->flags & F_NOTIF_FEC) + err |= gen_fec_tlv(buf, &nm->fec); + if (err) { + ibuf_free(buf); + return; + } + + if (tcp->nbr) + debug_msg_send("notification: lsr-id %s status %s%s", + inet_ntoa(tcp->nbr->id), status_code_name(nm->status_code), + (nm->status_code & STATUS_FATAL) ? " (fatal)" : ""); + + evbuf_enqueue(&tcp->wbuf, buf); +} + +/* send a notification without optional tlvs */ +void +send_notification(uint32_t status_code, struct tcp_conn *tcp, uint32_t msg_id, + uint16_t msg_type) +{ + struct notify_msg nm; + + memset(&nm, 0, sizeof(nm)); + nm.status_code = status_code; + nm.msg_id = msg_id; + nm.msg_type = msg_type; + + send_notification_full(tcp, &nm); +} + +void +send_notification_nbr(struct nbr *nbr, uint32_t status_code, uint32_t msg_id, + uint16_t msg_type) +{ + send_notification(status_code, nbr->tcp, msg_id, msg_type); + nbr_fsm(nbr, NBR_EVT_PDU_SENT); +} + +int +recv_notification(struct nbr *nbr, char *buf, uint16_t len) +{ + struct ldp_msg msg; + struct status_tlv st; + struct notify_msg nm; + int tlen; + + memcpy(&msg, buf, sizeof(msg)); + buf += LDP_MSG_SIZE; + len -= LDP_MSG_SIZE; + + if (len < STATUS_SIZE) { + session_shutdown(nbr, S_BAD_MSG_LEN, msg.id, msg.type); + return (-1); + } + memcpy(&st, buf, sizeof(st)); + + if (ntohs(st.length) > STATUS_SIZE - TLV_HDR_SIZE || + ntohs(st.length) > len - TLV_HDR_SIZE) { + session_shutdown(nbr, S_BAD_TLV_LEN, msg.id, msg.type); + return (-1); + } + buf += STATUS_SIZE; + len -= STATUS_SIZE; + + memset(&nm, 0, sizeof(nm)); + nm.status_code = ntohl(st.status_code); + + /* Optional Parameters */ + while (len > 0) { + struct tlv tlv; + uint16_t tlv_len; + + if (len < sizeof(tlv)) { + session_shutdown(nbr, S_BAD_TLV_LEN, msg.id, msg.type); + return (-1); + } + + memcpy(&tlv, buf, TLV_HDR_SIZE); + tlv_len = ntohs(tlv.length); + if (tlv_len + TLV_HDR_SIZE > len) { + session_shutdown(nbr, S_BAD_TLV_LEN, msg.id, msg.type); + return (-1); + } + buf += TLV_HDR_SIZE; + len -= TLV_HDR_SIZE; + + switch (ntohs(tlv.type)) { + case TLV_TYPE_EXTSTATUS: + case TLV_TYPE_RETURNEDPDU: + case TLV_TYPE_RETURNEDMSG: + /* TODO is there any use for this? */ + break; + case TLV_TYPE_PW_STATUS: + if (tlv_len != 4) { + session_shutdown(nbr, S_BAD_TLV_LEN, + msg.id, msg.type); + return (-1); + } + + nm.pw_status = ntohl(*(uint32_t *)buf); + nm.flags |= F_NOTIF_PW_STATUS; + break; + case TLV_TYPE_FEC: + if ((tlen = tlv_decode_fec_elm(nbr, &msg, buf, + tlv_len, &nm.fec)) == -1) + return (-1); + /* allow only one fec element */ + if (tlen != tlv_len) { + session_shutdown(nbr, S_BAD_TLV_VAL, + msg.id, msg.type); + return (-1); + } + nm.flags |= F_NOTIF_FEC; + break; + default: + if (!(ntohs(tlv.type) & UNKNOWN_FLAG)) + send_notification_nbr(nbr, S_UNKNOWN_TLV, + msg.id, msg.type); + /* ignore unknown tlv */ + break; + } + buf += tlv_len; + len -= tlv_len; + } + + if (nm.status_code == S_PW_STATUS) { + if (!(nm.flags & (F_NOTIF_PW_STATUS|F_NOTIF_FEC))) { + send_notification_nbr(nbr, S_MISS_MSG, + msg.id, msg.type); + return (-1); + } + + switch (nm.fec.type) { + case MAP_TYPE_PWID: + break; + default: + send_notification_nbr(nbr, S_BAD_TLV_VAL, + msg.id, msg.type); + return (-1); + } + } + + debug_msg_recv("notification: lsr-id %s: %s%s", inet_ntoa(nbr->id), + status_code_name(ntohl(st.status_code)), + (st.status_code & htonl(STATUS_FATAL)) ? " (fatal)" : ""); + + if (st.status_code & htonl(STATUS_FATAL)) { + if (nbr->state == NBR_STA_OPENSENT) + nbr_start_idtimer(nbr); + + nbr_fsm(nbr, NBR_EVT_CLOSE_SESSION); + return (-1); + } + + if (nm.status_code == S_PW_STATUS) + ldpe_imsg_compose_lde(IMSG_NOTIFICATION, nbr->peerid, 0, + &nm, sizeof(nm)); + + return (0); +} + +int +gen_status_tlv(struct ibuf *buf, uint32_t status_code, uint32_t msg_id, + uint16_t msg_type) +{ + struct status_tlv st; + + memset(&st, 0, sizeof(st)); + st.type = htons(TLV_TYPE_STATUS); + st.length = htons(STATUS_TLV_LEN); + st.status_code = htonl(status_code); + /* + * For convenience, msg_id and msg_type are already in network + * byte order. + */ + st.msg_id = msg_id; + st.msg_type = msg_type; + + return (ibuf_add(buf, &st, STATUS_SIZE)); +} diff --git a/ldpd/packet.c b/ldpd/packet.c new file mode 100644 index 0000000000..9b3151d720 --- /dev/null +++ b/ldpd/packet.c @@ -0,0 +1,822 @@ +/* $OpenBSD$ */ + +/* + * Copyright (c) 2013, 2016 Renato Westphal + * Copyright (c) 2009 Michele Marchetto + * Copyright (c) 2004, 2005, 2008 Esben Norby + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include + +#include "ldpd.h" +#include "ldpe.h" +#include "log.h" + +#include "sockopt.h" + +static struct iface *disc_find_iface(unsigned int, int, + union ldpd_addr *, int); +static int session_read(struct thread *); +static int session_write(struct thread *); +static ssize_t session_get_pdu(struct ibuf_read *, char **); +static void tcp_close(struct tcp_conn *); +static struct pending_conn *pending_conn_new(int, int, union ldpd_addr *); +static int pending_conn_timeout(struct thread *); + +int +gen_ldp_hdr(struct ibuf *buf, uint16_t size) +{ + struct ldp_hdr ldp_hdr; + + memset(&ldp_hdr, 0, sizeof(ldp_hdr)); + ldp_hdr.version = htons(LDP_VERSION); + /* exclude the 'Version' and 'PDU Length' fields from the total */ + ldp_hdr.length = htons(size - LDP_HDR_DEAD_LEN); + ldp_hdr.lsr_id = ldp_rtr_id_get(leconf); + ldp_hdr.lspace_id = 0; + + return (ibuf_add(buf, &ldp_hdr, LDP_HDR_SIZE)); +} + +int +gen_msg_hdr(struct ibuf *buf, uint16_t type, uint16_t size) +{ + static int msgcnt = 0; + struct ldp_msg msg; + + memset(&msg, 0, sizeof(msg)); + msg.type = htons(type); + /* exclude the 'Type' and 'Length' fields from the total */ + msg.length = htons(size - LDP_MSG_DEAD_LEN); + msg.id = htonl(++msgcnt); + + return (ibuf_add(buf, &msg, sizeof(msg))); +} + +/* send packets */ +int +send_packet(int fd, int af, union ldpd_addr *dst, struct iface_af *ia, + void *pkt, size_t len) +{ + struct sockaddr *sa; + + switch (af) { + case AF_INET: + if (ia && IN_MULTICAST(ntohl(dst->v4.s_addr))) { + /* set outgoing interface for multicast traffic */ + if (sock_set_ipv4_mcast(ia->iface) == -1) { + log_debug("%s: error setting multicast " + "interface, %s", __func__, ia->iface->name); + return (-1); + } + } + break; + case AF_INET6: + if (ia && IN6_IS_ADDR_MULTICAST(&dst->v6)) { + /* set outgoing interface for multicast traffic */ + if (sock_set_ipv6_mcast(ia->iface) == -1) { + log_debug("%s: error setting multicast " + "interface, %s", __func__, ia->iface->name); + return (-1); + } + } + break; + default: + fatalx("send_packet: unknown af"); + } + + sa = addr2sa(af, dst, LDP_PORT); + if (sendto(fd, pkt, len, 0, sa, sockaddr_len(sa)) == -1) { + log_warn("%s: error sending packet to %s", __func__, + log_sockaddr(sa)); + return (-1); + } + + return (0); +} + +/* Discovery functions */ +int +disc_recv_packet(struct thread *thread) +{ + int fd = THREAD_FD(thread); + struct thread **threadp = THREAD_ARG(thread); + + union { + struct cmsghdr hdr; +#ifdef HAVE_STRUCT_SOCKADDR_DL + char buf[CMSG_SPACE(sizeof(struct sockaddr_dl))]; +#else + char buf[CMSG_SPACE(sizeof(struct in6_pktinfo))]; +#endif + } cmsgbuf; + struct msghdr m; + struct sockaddr_storage from; + struct iovec iov; + char *buf; +#ifndef MSG_MCAST + struct cmsghdr *cmsg; +#endif + ssize_t r; + int multicast; + int af; + union ldpd_addr src; + unsigned int ifindex = 0; + struct iface *iface; + uint16_t len; + struct ldp_hdr ldp_hdr; + uint16_t pdu_len; + struct ldp_msg msg; + uint16_t msg_len; + struct in_addr lsr_id; + + /* reschedule read */ + *threadp = thread_add_read(master, disc_recv_packet, threadp, fd); + + /* setup buffer */ + memset(&m, 0, sizeof(m)); + iov.iov_base = buf = pkt_ptr; + iov.iov_len = IBUF_READ_SIZE; + m.msg_name = &from; + m.msg_namelen = sizeof(from); + m.msg_iov = &iov; + m.msg_iovlen = 1; + m.msg_control = &cmsgbuf.buf; + m.msg_controllen = sizeof(cmsgbuf.buf); + + if ((r = recvmsg(fd, &m, 0)) == -1) { + if (errno != EAGAIN && errno != EINTR) + log_debug("%s: read error: %s", __func__, + strerror(errno)); + return (0); + } + + sa2addr((struct sockaddr *)&from, &af, &src, NULL); +#ifdef MSG_MCAST + multicast = (m.msg_flags & MSG_MCAST) ? 1 : 0; +#else + multicast = 0; + for (cmsg = CMSG_FIRSTHDR(&m); cmsg != NULL; + cmsg = CMSG_NXTHDR(&m, cmsg)) { +#if defined(HAVE_IP_PKTINFO) + if (af == AF_INET && cmsg->cmsg_level == IPPROTO_IP && + cmsg->cmsg_type == IP_PKTINFO) { + struct in_pktinfo *pktinfo; + + pktinfo = (struct in_pktinfo *)CMSG_DATA(cmsg); + if (IN_MULTICAST(ntohl(pktinfo->ipi_addr.s_addr))) + multicast = 1; + break; + } +#elif defined(HAVE_IP_RECVDSTADDR) + if (af == AF_INET && cmsg->cmsg_level == IPPROTO_IP && + cmsg->cmsg_type == IP_RECVDSTADDR) { + struct in_addr *addr; + + addr = (struct in_addr *)CMSG_DATA(cmsg); + if (IN_MULTICAST(ntohl(addr->s_addr))) + multicast = 1; + break; + } +#else +#error "Unsupported socket API" +#endif + if (af == AF_INET6 && cmsg->cmsg_level == IPPROTO_IPV6 && + cmsg->cmsg_type == IPV6_PKTINFO) { + struct in6_pktinfo *pktinfo; + + pktinfo = (struct in6_pktinfo *)CMSG_DATA(cmsg); + if (IN6_IS_ADDR_MULTICAST(&pktinfo->ipi6_addr)) + multicast = 1; + break; + } + } +#endif /* MSG_MCAST */ + if (bad_addr(af, &src)) { + log_debug("%s: invalid source address: %s", __func__, + log_addr(af, &src)); + return (0); + } + ifindex = getsockopt_ifindex(af, &m); + + /* find a matching interface */ + iface = disc_find_iface(ifindex, af, &src, multicast); + if (iface == NULL) + return (0); + + /* check packet size */ + len = (uint16_t)r; + if (len < (LDP_HDR_SIZE + LDP_MSG_SIZE) || len > LDP_MAX_LEN) { + log_debug("%s: bad packet size, source %s", __func__, + log_addr(af, &src)); + return (0); + } + + /* LDP header sanity checks */ + memcpy(&ldp_hdr, buf, sizeof(ldp_hdr)); + if (ntohs(ldp_hdr.version) != LDP_VERSION) { + log_debug("%s: invalid LDP version %d, source %s", __func__, + ntohs(ldp_hdr.version), log_addr(af, &src)); + return (0); + } + if (ntohs(ldp_hdr.lspace_id) != 0) { + log_debug("%s: invalid label space %u, source %s", __func__, + ntohs(ldp_hdr.lspace_id), log_addr(af, &src)); + return (0); + } + /* check "PDU Length" field */ + pdu_len = ntohs(ldp_hdr.length); + if ((pdu_len < (LDP_HDR_PDU_LEN + LDP_MSG_SIZE)) || + (pdu_len > (len - LDP_HDR_DEAD_LEN))) { + log_debug("%s: invalid LDP packet length %u, source %s", + __func__, ntohs(ldp_hdr.length), log_addr(af, &src)); + return (0); + } + buf += LDP_HDR_SIZE; + len -= LDP_HDR_SIZE; + + lsr_id.s_addr = ldp_hdr.lsr_id; + + /* + * For UDP, we process only the first message of each packet. This does + * not impose any restrictions since LDP uses UDP only for sending Hello + * packets. + */ + memcpy(&msg, buf, sizeof(msg)); + + /* check "Message Length" field */ + msg_len = ntohs(msg.length); + if (msg_len < LDP_MSG_LEN || ((msg_len + LDP_MSG_DEAD_LEN) > pdu_len)) { + log_debug("%s: invalid LDP message length %u, source %s", + __func__, ntohs(msg.length), log_addr(af, &src)); + return (0); + } + buf += LDP_MSG_SIZE; + len -= LDP_MSG_SIZE; + + /* switch LDP packet type */ + switch (ntohs(msg.type)) { + case MSG_TYPE_HELLO: + recv_hello(lsr_id, &msg, af, &src, iface, multicast, buf, len); + break; + default: + log_debug("%s: unknown LDP packet type, source %s", __func__, + log_addr(af, &src)); + } + + return (0); +} + +static struct iface * +disc_find_iface(unsigned int ifindex, int af, union ldpd_addr *src, + int multicast) +{ + struct iface *iface; + struct iface_af *ia; + struct if_addr *if_addr; + in_addr_t mask; + + iface = if_lookup(leconf, ifindex); + if (iface == NULL) + return (NULL); + + /* + * For unicast packets, we just need to make sure that the interface + * is enabled for the given address-family. + */ + if (!multicast) { + ia = iface_af_get(iface, af); + if (ia->enabled) + return (iface); + return (NULL); + } + + switch (af) { + case AF_INET: + LIST_FOREACH(if_addr, &iface->addr_list, entry) { + if (if_addr->af != AF_INET) + continue; + + switch (iface->type) { + case IF_TYPE_POINTOPOINT: + if (if_addr->dstbrd.v4.s_addr == src->v4.s_addr) + return (iface); + break; + default: + mask = prefixlen2mask(if_addr->prefixlen); + if ((if_addr->addr.v4.s_addr & mask) == + (src->v4.s_addr & mask)) + return (iface); + break; + } + } + break; + case AF_INET6: + if (IN6_IS_ADDR_LINKLOCAL(&src->v6)) + return (iface); + break; + default: + fatalx("disc_find_iface: unknown af"); + } + + return (NULL); +} + +int +session_accept(struct thread *thread) +{ + int fd = THREAD_FD(thread); + struct sockaddr_storage src; + socklen_t len = sizeof(src); + int newfd; + int af; + union ldpd_addr addr; + struct nbr *nbr; + struct pending_conn *pconn; + + newfd = accept(fd, (struct sockaddr *)&src, &len); + if (newfd == -1) { + /* + * Pause accept if we are out of file descriptors, or + * libevent will haunt us here too. + */ + if (errno == ENFILE || errno == EMFILE) { + accept_pause(); + } else if (errno != EWOULDBLOCK && errno != EINTR && + errno != ECONNABORTED) + log_debug("%s: accept error: %s", __func__, + strerror(errno)); + return (0); + } + sock_set_nonblock(newfd); + + sa2addr((struct sockaddr *)&src, &af, &addr, NULL); + + /* + * Since we don't support label spaces, we can identify this neighbor + * just by its source address. This way we don't need to wait for its + * Initialization message to know who we are talking to. + */ + nbr = nbr_find_addr(af, &addr); + if (nbr == NULL) { + /* + * According to RFC 5036, we would need to send a No Hello + * Error Notification message and close this TCP connection + * right now. But doing so would trigger the backoff exponential + * timer in the remote peer, which would considerably slow down + * the session establishment process. The trick here is to wait + * five seconds before sending the Notification Message. There's + * a good chance that the remote peer will send us a Hello + * message within this interval, so it's worth waiting before + * taking a more drastic measure. + */ + pconn = pending_conn_find(af, &addr); + if (pconn) + close(newfd); + else + pending_conn_new(newfd, af, &addr); + return (0); + } + /* protection against buggy implementations */ + if (nbr_session_active_role(nbr)) { + close(newfd); + return (0); + } + if (nbr->state != NBR_STA_PRESENT) { + log_debug("%s: lsr-id %s: rejecting additional transport " + "connection", __func__, inet_ntoa(nbr->id)); + close(newfd); + return (0); + } + + session_accept_nbr(nbr, newfd); + + return (0); +} + +void +session_accept_nbr(struct nbr *nbr, int fd) +{ +#ifdef __OpenBSD__ + struct nbr_params *nbrp; + int opt; + socklen_t len; + + nbrp = nbr_params_find(leconf, nbr->id); + if (nbr_gtsm_check(fd, nbr, nbrp)) { + close(fd); + return; + } + + if (nbrp && nbrp->auth.method == AUTH_MD5SIG) { + if (sysdep.no_pfkey || sysdep.no_md5sig) { + log_warnx("md5sig configured but not available"); + close(fd); + return; + } + + len = sizeof(opt); + if (getsockopt(fd, IPPROTO_TCP, TCP_MD5SIG, &opt, &len) == -1) + fatal("getsockopt TCP_MD5SIG"); + if (!opt) { /* non-md5'd connection! */ + log_warnx("connection attempt without md5 signature"); + close(fd); + return; + } + } +#endif + + nbr->tcp = tcp_new(fd, nbr); + nbr_fsm(nbr, NBR_EVT_MATCH_ADJ); +} + +static int +session_read(struct thread *thread) +{ + int fd = THREAD_FD(thread); + struct nbr *nbr = THREAD_ARG(thread); + struct tcp_conn *tcp = nbr->tcp; + struct ldp_hdr *ldp_hdr; + struct ldp_msg *msg; + char *buf = NULL, *pdu; + ssize_t n, len; + uint16_t pdu_len, msg_len, msg_size, max_pdu_len; + int ret; + + tcp->rev = thread_add_read(master, session_read, nbr, fd); + + if ((n = read(fd, tcp->rbuf->buf + tcp->rbuf->wpos, + sizeof(tcp->rbuf->buf) - tcp->rbuf->wpos)) == -1) { + if (errno != EINTR && errno != EAGAIN) { + log_warn("%s: read error", __func__); + nbr_fsm(nbr, NBR_EVT_CLOSE_SESSION); + return (0); + } + /* retry read */ + return (0); + } + if (n == 0) { + /* connection closed */ + log_debug("%s: connection closed by remote end", __func__); + nbr_fsm(nbr, NBR_EVT_CLOSE_SESSION); + return (0); + } + tcp->rbuf->wpos += n; + + while ((len = session_get_pdu(tcp->rbuf, &buf)) > 0) { + pdu = buf; + ldp_hdr = (struct ldp_hdr *)pdu; + if (ntohs(ldp_hdr->version) != LDP_VERSION) { + session_shutdown(nbr, S_BAD_PROTO_VER, 0, 0); + free(buf); + return (0); + } + + pdu_len = ntohs(ldp_hdr->length); + /* + * RFC 5036 - Section 3.5.3: + * "Prior to completion of the negotiation, the maximum + * allowable length is 4096 bytes". + */ + if (nbr->state == NBR_STA_OPER) + max_pdu_len = nbr->max_pdu_len; + else + max_pdu_len = LDP_MAX_LEN; + if (pdu_len < (LDP_HDR_PDU_LEN + LDP_MSG_SIZE) || + pdu_len > max_pdu_len) { + session_shutdown(nbr, S_BAD_PDU_LEN, 0, 0); + free(buf); + return (0); + } + pdu_len -= LDP_HDR_PDU_LEN; + if (ldp_hdr->lsr_id != nbr->id.s_addr || + ldp_hdr->lspace_id != 0) { + session_shutdown(nbr, S_BAD_LDP_ID, 0, 0); + free(buf); + return (0); + } + pdu += LDP_HDR_SIZE; + len -= LDP_HDR_SIZE; + + nbr_fsm(nbr, NBR_EVT_PDU_RCVD); + + while (len >= LDP_MSG_SIZE) { + uint16_t type; + + msg = (struct ldp_msg *)pdu; + type = ntohs(msg->type); + msg_len = ntohs(msg->length); + if (msg_len < LDP_MSG_LEN || + (msg_len + LDP_MSG_DEAD_LEN) > pdu_len) { + session_shutdown(nbr, S_BAD_TLV_LEN, msg->id, + msg->type); + free(buf); + return (0); + } + msg_size = msg_len + LDP_MSG_DEAD_LEN; + pdu_len -= msg_size; + + /* check for error conditions earlier */ + switch (type) { + case MSG_TYPE_INIT: + if ((nbr->state != NBR_STA_INITIAL) && + (nbr->state != NBR_STA_OPENSENT)) { + session_shutdown(nbr, S_SHUTDOWN, + msg->id, msg->type); + free(buf); + return (0); + } + break; + case MSG_TYPE_KEEPALIVE: + if ((nbr->state == NBR_STA_INITIAL) || + (nbr->state == NBR_STA_OPENSENT)) { + session_shutdown(nbr, S_SHUTDOWN, + msg->id, msg->type); + free(buf); + return (0); + } + break; + case MSG_TYPE_ADDR: + case MSG_TYPE_ADDRWITHDRAW: + case MSG_TYPE_LABELMAPPING: + case MSG_TYPE_LABELREQUEST: + case MSG_TYPE_LABELWITHDRAW: + case MSG_TYPE_LABELRELEASE: + case MSG_TYPE_LABELABORTREQ: + if (nbr->state != NBR_STA_OPER) { + session_shutdown(nbr, S_SHUTDOWN, + msg->id, msg->type); + free(buf); + return (0); + } + break; + default: + break; + } + + /* switch LDP packet type */ + switch (type) { + case MSG_TYPE_NOTIFICATION: + ret = recv_notification(nbr, pdu, msg_size); + break; + case MSG_TYPE_INIT: + ret = recv_init(nbr, pdu, msg_size); + break; + case MSG_TYPE_KEEPALIVE: + ret = recv_keepalive(nbr, pdu, msg_size); + break; + case MSG_TYPE_ADDR: + case MSG_TYPE_ADDRWITHDRAW: + ret = recv_address(nbr, pdu, msg_size); + break; + case MSG_TYPE_LABELMAPPING: + case MSG_TYPE_LABELREQUEST: + case MSG_TYPE_LABELWITHDRAW: + case MSG_TYPE_LABELRELEASE: + case MSG_TYPE_LABELABORTREQ: + ret = recv_labelmessage(nbr, pdu, msg_size, + type); + break; + default: + log_debug("%s: unknown LDP message from nbr %s", + __func__, inet_ntoa(nbr->id)); + if (!(ntohs(msg->type) & UNKNOWN_FLAG)) + send_notification_nbr(nbr, + S_UNKNOWN_MSG, msg->id, msg->type); + /* ignore the message */ + ret = 0; + break; + } + + if (ret == -1) { + /* parser failed, giving up */ + free(buf); + return (0); + } + + /* Analyse the next message */ + pdu += msg_size; + len -= msg_size; + } + free(buf); + if (len != 0) { + session_shutdown(nbr, S_BAD_PDU_LEN, 0, 0); + return (0); + } + } + + return (0); +} + +static int +session_write(struct thread *thread) +{ + struct tcp_conn *tcp = THREAD_ARG(thread); + struct nbr *nbr = tcp->nbr; + + tcp->wbuf.ev = NULL; + + if (msgbuf_write(&tcp->wbuf.wbuf) <= 0) + if (errno != EAGAIN && nbr) + nbr_fsm(nbr, NBR_EVT_CLOSE_SESSION); + + if (nbr == NULL && !tcp->wbuf.wbuf.queued) { + /* + * We are done sending the notification message, now we can + * close the socket. + */ + tcp_close(tcp); + return (0); + } + + evbuf_event_add(&tcp->wbuf); + + return (0); +} + +void +session_shutdown(struct nbr *nbr, uint32_t status, uint32_t msg_id, + uint32_t msg_type) +{ + switch (nbr->state) { + case NBR_STA_PRESENT: + if (nbr_pending_connect(nbr)) + THREAD_WRITE_OFF(nbr->ev_connect); + break; + case NBR_STA_INITIAL: + case NBR_STA_OPENREC: + case NBR_STA_OPENSENT: + case NBR_STA_OPER: + log_debug("%s: lsr-id %s", __func__, inet_ntoa(nbr->id)); + + send_notification_nbr(nbr, status, msg_id, msg_type); + + nbr_fsm(nbr, NBR_EVT_CLOSE_SESSION); + break; + default: + fatalx("session_shutdown: unknown neighbor state"); + } +} + +void +session_close(struct nbr *nbr) +{ + log_debug("%s: closing session with lsr-id %s", __func__, + inet_ntoa(nbr->id)); + + tcp_close(nbr->tcp); + nbr_stop_ktimer(nbr); + nbr_stop_ktimeout(nbr); + nbr_stop_itimeout(nbr); +} + +static ssize_t +session_get_pdu(struct ibuf_read *r, char **b) +{ + struct ldp_hdr l; + size_t av, dlen, left; + + av = r->wpos; + if (av < sizeof(l)) + return (0); + + memcpy(&l, r->buf, sizeof(l)); + dlen = ntohs(l.length) + LDP_HDR_DEAD_LEN; + if (dlen > av) + return (0); + + if ((*b = malloc(dlen)) == NULL) + return (-1); + + memcpy(*b, r->buf, dlen); + if (dlen < av) { + left = av - dlen; + memmove(r->buf, r->buf + dlen, left); + r->wpos = left; + } else + r->wpos = 0; + + return (dlen); +} + +struct tcp_conn * +tcp_new(int fd, struct nbr *nbr) +{ + struct tcp_conn *tcp; + struct sockaddr_storage src; + socklen_t len = sizeof(src); + + if ((tcp = calloc(1, sizeof(*tcp))) == NULL) + fatal(__func__); + + tcp->fd = fd; + evbuf_init(&tcp->wbuf, tcp->fd, session_write, tcp); + + if (nbr) { + if ((tcp->rbuf = calloc(1, sizeof(struct ibuf_read))) == NULL) + fatal(__func__); + + tcp->rev = thread_add_read(master, session_read, nbr, tcp->fd); + tcp->nbr = nbr; + } + + getsockname(fd, (struct sockaddr *)&src, &len); + sa2addr((struct sockaddr *)&src, NULL, NULL, &tcp->lport); + getpeername(fd, (struct sockaddr *)&src, &len); + sa2addr((struct sockaddr *)&src, NULL, NULL, &tcp->rport); + + return (tcp); +} + +static void +tcp_close(struct tcp_conn *tcp) +{ + /* try to flush write buffer */ + msgbuf_write(&tcp->wbuf.wbuf); + evbuf_clear(&tcp->wbuf); + + if (tcp->nbr) { + THREAD_READ_OFF(tcp->rev); + free(tcp->rbuf); + tcp->nbr->tcp = NULL; + } + + close(tcp->fd); + accept_unpause(); + free(tcp); +} + +static struct pending_conn * +pending_conn_new(int fd, int af, union ldpd_addr *addr) +{ + struct pending_conn *pconn; + + if ((pconn = calloc(1, sizeof(*pconn))) == NULL) + fatal(__func__); + + pconn->fd = fd; + pconn->af = af; + pconn->addr = *addr; + TAILQ_INSERT_TAIL(&global.pending_conns, pconn, entry); + pconn->ev_timeout = thread_add_timer(master, pending_conn_timeout, + pconn, PENDING_CONN_TIMEOUT); + + return (pconn); +} + +void +pending_conn_del(struct pending_conn *pconn) +{ + THREAD_TIMER_OFF(pconn->ev_timeout); + TAILQ_REMOVE(&global.pending_conns, pconn, entry); + free(pconn); +} + +struct pending_conn * +pending_conn_find(int af, union ldpd_addr *addr) +{ + struct pending_conn *pconn; + + TAILQ_FOREACH(pconn, &global.pending_conns, entry) + if (af == pconn->af && + ldp_addrcmp(af, addr, &pconn->addr) == 0) + return (pconn); + + return (NULL); +} + +static int +pending_conn_timeout(struct thread *thread) +{ + struct pending_conn *pconn = THREAD_ARG(thread); + struct tcp_conn *tcp; + + pconn->ev_timeout = NULL; + + log_debug("%s: no adjacency with remote end: %s", __func__, + log_addr(pconn->af, &pconn->addr)); + + /* + * Create a write buffer detached from any neighbor to send a + * notification message reliably. + */ + tcp = tcp_new(pconn->fd, NULL); + send_notification(S_NO_HELLO, tcp, 0, 0); + msgbuf_write(&tcp->wbuf.wbuf); + + pending_conn_del(pconn); + + return (0); +} diff --git a/ldpd/pfkey.c b/ldpd/pfkey.c new file mode 100644 index 0000000000..29f763e6a6 --- /dev/null +++ b/ldpd/pfkey.c @@ -0,0 +1,469 @@ +/* $OpenBSD$ */ + +/* + * Copyright (c) 2003, 2004 Henning Brauer + * Copyright (c) 2003, 2004 Markus Friedl + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifdef __OpenBSD__ +#include +#include +#include +#include +#include +#include + +#include "ldpd.h" +#include "ldpe.h" +#include "log.h" + +static int pfkey_send(int, uint8_t, uint8_t, uint8_t, + int, union ldpd_addr *, union ldpd_addr *, + uint32_t, uint8_t, int, char *, uint8_t, int, char *, + uint16_t, uint16_t); +static int pfkey_reply(int, uint32_t *); +static int pfkey_sa_add(int, union ldpd_addr *, union ldpd_addr *, + uint8_t, char *, uint32_t *); +static int pfkey_sa_remove(int, union ldpd_addr *, union ldpd_addr *, + uint32_t *); +static int pfkey_md5sig_establish(struct nbr *, struct nbr_params *nbrp); +static int pfkey_md5sig_remove(struct nbr *); + +#define PFKEY2_CHUNK sizeof(uint64_t) +#define ROUNDUP(x) (((x) + (PFKEY2_CHUNK - 1)) & ~(PFKEY2_CHUNK - 1)) +#define IOV_CNT 20 + +static uint32_t sadb_msg_seq; +static uint32_t pid; /* should pid_t but pfkey needs uint32_t */ +static int fd; + +static int +pfkey_send(int sd, uint8_t satype, uint8_t mtype, uint8_t dir, + int af, union ldpd_addr *src, union ldpd_addr *dst, uint32_t spi, + uint8_t aalg, int alen, char *akey, uint8_t ealg, int elen, char *ekey, + uint16_t sport, uint16_t dport) +{ + struct sadb_msg smsg; + struct sadb_sa sa; + struct sadb_address sa_src, sa_dst; + struct sadb_key sa_akey, sa_ekey; + struct sadb_spirange sa_spirange; + struct iovec iov[IOV_CNT]; + ssize_t n; + int len = 0; + int iov_cnt; + struct sockaddr_storage ssrc, sdst, smask, dmask; + struct sockaddr *saptr; + + if (!pid) + pid = getpid(); + + /* we need clean sockaddr... no ports set */ + memset(&ssrc, 0, sizeof(ssrc)); + memset(&smask, 0, sizeof(smask)); + if ((saptr = addr2sa(af, src, 0))) + memcpy(&ssrc, saptr, sizeof(ssrc)); + switch (af) { + case AF_INET: + memset(&((struct sockaddr_in *)&smask)->sin_addr, 0xff, 32/8); + break; + case AF_INET6: + memset(&((struct sockaddr_in6 *)&smask)->sin6_addr, 0xff, + 128/8); + break; + default: + return (-1); + } + smask.ss_family = ssrc.ss_family; + smask.ss_len = ssrc.ss_len; + + memset(&sdst, 0, sizeof(sdst)); + memset(&dmask, 0, sizeof(dmask)); + if ((saptr = addr2sa(af, dst, 0))) + memcpy(&sdst, saptr, sizeof(sdst)); + switch (af) { + case AF_INET: + memset(&((struct sockaddr_in *)&dmask)->sin_addr, 0xff, 32/8); + break; + case AF_INET6: + memset(&((struct sockaddr_in6 *)&dmask)->sin6_addr, 0xff, + 128/8); + break; + default: + return (-1); + } + dmask.ss_family = sdst.ss_family; + dmask.ss_len = sdst.ss_len; + + memset(&smsg, 0, sizeof(smsg)); + smsg.sadb_msg_version = PF_KEY_V2; + smsg.sadb_msg_seq = ++sadb_msg_seq; + smsg.sadb_msg_pid = pid; + smsg.sadb_msg_len = sizeof(smsg) / 8; + smsg.sadb_msg_type = mtype; + smsg.sadb_msg_satype = satype; + + switch (mtype) { + case SADB_GETSPI: + memset(&sa_spirange, 0, sizeof(sa_spirange)); + sa_spirange.sadb_spirange_exttype = SADB_EXT_SPIRANGE; + sa_spirange.sadb_spirange_len = sizeof(sa_spirange) / 8; + sa_spirange.sadb_spirange_min = 0x100; + sa_spirange.sadb_spirange_max = 0xffffffff; + sa_spirange.sadb_spirange_reserved = 0; + break; + case SADB_ADD: + case SADB_UPDATE: + case SADB_DELETE: + memset(&sa, 0, sizeof(sa)); + sa.sadb_sa_exttype = SADB_EXT_SA; + sa.sadb_sa_len = sizeof(sa) / 8; + sa.sadb_sa_replay = 0; + sa.sadb_sa_spi = spi; + sa.sadb_sa_state = SADB_SASTATE_MATURE; + break; + } + + memset(&sa_src, 0, sizeof(sa_src)); + sa_src.sadb_address_exttype = SADB_EXT_ADDRESS_SRC; + sa_src.sadb_address_len = (sizeof(sa_src) + ROUNDUP(ssrc.ss_len)) / 8; + + memset(&sa_dst, 0, sizeof(sa_dst)); + sa_dst.sadb_address_exttype = SADB_EXT_ADDRESS_DST; + sa_dst.sadb_address_len = (sizeof(sa_dst) + ROUNDUP(sdst.ss_len)) / 8; + + sa.sadb_sa_auth = aalg; + sa.sadb_sa_encrypt = SADB_X_EALG_AES; /* XXX */ + + switch (mtype) { + case SADB_ADD: + case SADB_UPDATE: + memset(&sa_akey, 0, sizeof(sa_akey)); + sa_akey.sadb_key_exttype = SADB_EXT_KEY_AUTH; + sa_akey.sadb_key_len = (sizeof(sa_akey) + + ((alen + 7) / 8) * 8) / 8; + sa_akey.sadb_key_bits = 8 * alen; + + memset(&sa_ekey, 0, sizeof(sa_ekey)); + sa_ekey.sadb_key_exttype = SADB_EXT_KEY_ENCRYPT; + sa_ekey.sadb_key_len = (sizeof(sa_ekey) + + ((elen + 7) / 8) * 8) / 8; + sa_ekey.sadb_key_bits = 8 * elen; + + break; + } + + iov_cnt = 0; + + /* msghdr */ + iov[iov_cnt].iov_base = &smsg; + iov[iov_cnt].iov_len = sizeof(smsg); + iov_cnt++; + + switch (mtype) { + case SADB_ADD: + case SADB_UPDATE: + case SADB_DELETE: + /* SA hdr */ + iov[iov_cnt].iov_base = &sa; + iov[iov_cnt].iov_len = sizeof(sa); + smsg.sadb_msg_len += sa.sadb_sa_len; + iov_cnt++; + break; + case SADB_GETSPI: + /* SPI range */ + iov[iov_cnt].iov_base = &sa_spirange; + iov[iov_cnt].iov_len = sizeof(sa_spirange); + smsg.sadb_msg_len += sa_spirange.sadb_spirange_len; + iov_cnt++; + break; + } + + /* dest addr */ + iov[iov_cnt].iov_base = &sa_dst; + iov[iov_cnt].iov_len = sizeof(sa_dst); + iov_cnt++; + iov[iov_cnt].iov_base = &sdst; + iov[iov_cnt].iov_len = ROUNDUP(sdst.ss_len); + smsg.sadb_msg_len += sa_dst.sadb_address_len; + iov_cnt++; + + /* src addr */ + iov[iov_cnt].iov_base = &sa_src; + iov[iov_cnt].iov_len = sizeof(sa_src); + iov_cnt++; + iov[iov_cnt].iov_base = &ssrc; + iov[iov_cnt].iov_len = ROUNDUP(ssrc.ss_len); + smsg.sadb_msg_len += sa_src.sadb_address_len; + iov_cnt++; + + switch (mtype) { + case SADB_ADD: + case SADB_UPDATE: + if (alen) { + /* auth key */ + iov[iov_cnt].iov_base = &sa_akey; + iov[iov_cnt].iov_len = sizeof(sa_akey); + iov_cnt++; + iov[iov_cnt].iov_base = akey; + iov[iov_cnt].iov_len = ((alen + 7) / 8) * 8; + smsg.sadb_msg_len += sa_akey.sadb_key_len; + iov_cnt++; + } + if (elen) { + /* encryption key */ + iov[iov_cnt].iov_base = &sa_ekey; + iov[iov_cnt].iov_len = sizeof(sa_ekey); + iov_cnt++; + iov[iov_cnt].iov_base = ekey; + iov[iov_cnt].iov_len = ((elen + 7) / 8) * 8; + smsg.sadb_msg_len += sa_ekey.sadb_key_len; + iov_cnt++; + } + break; + } + + len = smsg.sadb_msg_len * 8; + do { + n = writev(sd, iov, iov_cnt); + } while (n == -1 && (errno == EAGAIN || errno == EINTR)); + + if (n == -1) { + log_warn("writev (%d/%d)", iov_cnt, len); + return (-1); + } + + return (0); +} + +int +pfkey_read(int sd, struct sadb_msg *h) +{ + struct sadb_msg hdr; + + if (recv(sd, &hdr, sizeof(hdr), MSG_PEEK) != sizeof(hdr)) { + if (errno == EAGAIN || errno == EINTR) + return (0); + log_warn("pfkey peek"); + return (-1); + } + + /* XXX: Only one message can be outstanding. */ + if (hdr.sadb_msg_seq == sadb_msg_seq && + hdr.sadb_msg_pid == pid) { + if (h) + *h = hdr; + return (0); + } + + /* not ours, discard */ + if (read(sd, &hdr, sizeof(hdr)) == -1) { + if (errno == EAGAIN || errno == EINTR) + return (0); + log_warn("pfkey read"); + return (-1); + } + + return (1); +} + +static int +pfkey_reply(int sd, uint32_t *spip) +{ + struct sadb_msg hdr, *msg; + struct sadb_ext *ext; + struct sadb_sa *sa; + uint8_t *data; + ssize_t len; + int rv; + + do { + rv = pfkey_read(sd, &hdr); + if (rv == -1) + return (-1); + } while (rv); + + if (hdr.sadb_msg_errno != 0) { + errno = hdr.sadb_msg_errno; + if (errno == ESRCH) + return (0); + else { + log_warn("pfkey"); + return (-1); + } + } + if ((data = reallocarray(NULL, hdr.sadb_msg_len, PFKEY2_CHUNK)) == NULL) { + log_warn("pfkey malloc"); + return (-1); + } + len = hdr.sadb_msg_len * PFKEY2_CHUNK; + if (read(sd, data, len) != len) { + log_warn("pfkey read"); + explicit_bzero(data, len); + free(data); + return (-1); + } + + if (hdr.sadb_msg_type == SADB_GETSPI) { + if (spip == NULL) { + explicit_bzero(data, len); + free(data); + return (0); + } + + msg = (struct sadb_msg *)data; + for (ext = (struct sadb_ext *)(msg + 1); + (size_t)((uint8_t *)ext - (uint8_t *)msg) < + msg->sadb_msg_len * PFKEY2_CHUNK; + ext = (struct sadb_ext *)((uint8_t *)ext + + ext->sadb_ext_len * PFKEY2_CHUNK)) { + if (ext->sadb_ext_type == SADB_EXT_SA) { + sa = (struct sadb_sa *) ext; + *spip = sa->sadb_sa_spi; + break; + } + } + } + explicit_bzero(data, len); + free(data); + return (0); +} + +static int +pfkey_sa_add(int af, union ldpd_addr *src, union ldpd_addr *dst, uint8_t keylen, + char *key, uint32_t *spi) +{ + if (pfkey_send(fd, SADB_X_SATYPE_TCPSIGNATURE, SADB_GETSPI, 0, + af, src, dst, 0, 0, 0, NULL, 0, 0, NULL, 0, 0) < 0) + return (-1); + if (pfkey_reply(fd, spi) < 0) + return (-1); + if (pfkey_send(fd, SADB_X_SATYPE_TCPSIGNATURE, SADB_UPDATE, 0, + af, src, dst, *spi, 0, keylen, key, 0, 0, NULL, 0, 0) < 0) + return (-1); + if (pfkey_reply(fd, NULL) < 0) + return (-1); + return (0); +} + +static int +pfkey_sa_remove(int af, union ldpd_addr *src, union ldpd_addr *dst, + uint32_t *spi) +{ + if (pfkey_send(fd, SADB_X_SATYPE_TCPSIGNATURE, SADB_DELETE, 0, + af, src, dst, *spi, 0, 0, NULL, 0, 0, NULL, 0, 0) < 0) + return (-1); + if (pfkey_reply(fd, NULL) < 0) + return (-1); + *spi = 0; + return (0); +} + +static int +pfkey_md5sig_establish(struct nbr *nbr, struct nbr_params *nbrp) +{ + sleep(1); + + if (!nbr->auth.spi_out) + if (pfkey_sa_add(nbr->af, &nbr->laddr, &nbr->raddr, + nbrp->auth.md5key_len, nbrp->auth.md5key, + &nbr->auth.spi_out) == -1) + return (-1); + if (!nbr->auth.spi_in) + if (pfkey_sa_add(nbr->af, &nbr->raddr, &nbr->laddr, + nbrp->auth.md5key_len, nbrp->auth.md5key, + &nbr->auth.spi_in) == -1) + return (-1); + + nbr->auth.established = 1; + return (0); +} + +static int +pfkey_md5sig_remove(struct nbr *nbr) +{ + if (nbr->auth.spi_out) + if (pfkey_sa_remove(nbr->af, &nbr->laddr, &nbr->raddr, + &nbr->auth.spi_out) == -1) + return (-1); + if (nbr->auth.spi_in) + if (pfkey_sa_remove(nbr->af, &nbr->raddr, &nbr->laddr, + &nbr->auth.spi_in) == -1) + return (-1); + + nbr->auth.established = 0; + nbr->auth.spi_in = 0; + nbr->auth.spi_out = 0; + nbr->auth.method = AUTH_NONE; + memset(nbr->auth.md5key, 0, sizeof(nbr->auth.md5key)); + + return (0); +} + +int +pfkey_establish(struct nbr *nbr, struct nbr_params *nbrp) +{ + if (nbrp->auth.method == AUTH_NONE) + return (0); + + /* + * make sure we keep copies of everything we need to + * remove SAs and flows later again. + */ + nbr->auth.method = nbrp->auth.method; + + switch (nbr->auth.method) { + case AUTH_MD5SIG: + strlcpy(nbr->auth.md5key, nbrp->auth.md5key, + sizeof(nbr->auth.md5key)); + return (pfkey_md5sig_establish(nbr, nbrp)); + default: + break; + } + + return (0); +} + +int +pfkey_remove(struct nbr *nbr) +{ + if (nbr->auth.method == AUTH_NONE || !nbr->auth.established) + return (0); + + switch (nbr->auth.method) { + case AUTH_MD5SIG: + return (pfkey_md5sig_remove(nbr)); + default: + break; + } + + return (0); +} + +int +pfkey_init(void) +{ + if ((fd = socket(PF_KEY, SOCK_RAW | SOCK_CLOEXEC | SOCK_NONBLOCK, + PF_KEY_V2)) == -1) { + if (errno == EPROTONOSUPPORT) { + log_warnx("PF_KEY not available"); + sysdep.no_pfkey = 1; + return (-1); + } else + fatal("pfkey setup failed"); + } + return (fd); +} +#endif /* __OpenBSD__ */ diff --git a/ldpd/socket.c b/ldpd/socket.c new file mode 100644 index 0000000000..cf352d7204 --- /dev/null +++ b/ldpd/socket.c @@ -0,0 +1,514 @@ +/* $OpenBSD$ */ + +/* + * Copyright (c) 2016 Renato Westphal + * Copyright (c) 2009 Michele Marchetto + * Copyright (c) 2005 Claudio Jeker + * Copyright (c) 2003, 2004 Henning Brauer + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include + +#include "ldpd.h" +#include "ldpe.h" +#include "log.h" + +#include "lib/log.h" +#include "privs.h" +#include "sockopt.h" + +extern struct zebra_privs_t ldpd_privs; +extern struct zebra_privs_t ldpe_privs; + +int +ldp_create_socket(int af, enum socket_type type) +{ + int fd, domain, proto; + union ldpd_addr addr; + struct sockaddr_storage local_sa; +#ifdef __OpenBSD__ + int opt; +#endif + int save_errno; + + /* create socket */ + switch (type) { + case LDP_SOCKET_DISC: + case LDP_SOCKET_EDISC: + domain = SOCK_DGRAM; + proto = IPPROTO_UDP; + break; + case LDP_SOCKET_SESSION: + domain = SOCK_STREAM; + proto = IPPROTO_TCP; + break; + default: + fatalx("ldp_create_socket: unknown socket type"); + } + fd = socket(af, domain, proto); + if (fd == -1) { + log_warn("%s: error creating socket", __func__); + return (-1); + } + sock_set_nonblock(fd); + sockopt_v6only(af, fd); + + /* bind to a local address/port */ + switch (type) { + case LDP_SOCKET_DISC: + /* listen on all addresses */ + memset(&addr, 0, sizeof(addr)); + memcpy(&local_sa, addr2sa(af, &addr, LDP_PORT), + sizeof(local_sa)); + break; + case LDP_SOCKET_EDISC: + case LDP_SOCKET_SESSION: + addr = (ldp_af_conf_get(ldpd_conf, af))->trans_addr; + memcpy(&local_sa, addr2sa(af, &addr, LDP_PORT), + sizeof(local_sa)); + /* ignore any possible error */ + sock_set_bindany(fd, 1); + break; + } + if (ldpd_privs.change(ZPRIVS_RAISE)) + log_warn("%s: could not raise privs", __func__); + if (sock_set_reuse(fd, 1) == -1) { + close(fd); + return (-1); + } + if (bind(fd, (struct sockaddr *)&local_sa, + sockaddr_len((struct sockaddr *)&local_sa)) == -1) { + save_errno = errno; + if (ldpd_privs.change(ZPRIVS_LOWER)) + log_warn("%s: could not lower privs", __func__); + log_warnx("%s: error binding socket: %s", __func__, + safe_strerror(save_errno)); + close(fd); + return (-1); + } + if (ldpd_privs.change(ZPRIVS_LOWER)) + log_warn("%s: could not lower privs", __func__); + + /* set options */ + switch (af) { + case AF_INET: + if (sock_set_ipv4_tos(fd, IPTOS_PREC_INTERNETCONTROL) == -1) { + close(fd); + return (-1); + } + if (type == LDP_SOCKET_DISC) { + if (sock_set_ipv4_mcast_ttl(fd, + IP_DEFAULT_MULTICAST_TTL) == -1) { + close(fd); + return (-1); + } + if (sock_set_ipv4_mcast_loop(fd) == -1) { + close(fd); + return (-1); + } + } + if (type == LDP_SOCKET_DISC || type == LDP_SOCKET_EDISC) { + if (sock_set_ipv4_recvif(fd, 1) == -1) { + close(fd); + return (-1); + } +#ifndef MSG_MCAST +#if defined(HAVE_IP_PKTINFO) + if (sock_set_ipv4_pktinfo(fd, 1) == -1) { + close(fd); + return (-1); + } +#elif defined(HAVE_IP_RECVDSTADDR) + if (sock_set_ipv4_recvdstaddr(fd, 1) == -1) { + close(fd); + return (-1); + } +#else +#error "Unsupported socket API" +#endif +#endif /* MSG_MCAST */ + } + if (type == LDP_SOCKET_SESSION) { + if (sock_set_ipv4_ucast_ttl(fd, 255) == -1) { + close(fd); + return (-1); + } + } + break; + case AF_INET6: + if (sock_set_ipv6_dscp(fd, IPTOS_PREC_INTERNETCONTROL) == -1) { + close(fd); + return (-1); + } + if (type == LDP_SOCKET_DISC) { + if (sock_set_ipv6_mcast_loop(fd) == -1) { + close(fd); + return (-1); + } + if (sock_set_ipv6_mcast_hops(fd, 255) == -1) { + close(fd); + return (-1); + } + if (!(ldpd_conf->ipv6.flags & F_LDPD_AF_NO_GTSM)) { + /* ignore any possible error */ + sock_set_ipv6_minhopcount(fd, 255); + } + } + if (type == LDP_SOCKET_DISC || type == LDP_SOCKET_EDISC) { + if (sock_set_ipv6_pktinfo(fd, 1) == -1) { + close(fd); + return (-1); + } + } + if (type == LDP_SOCKET_SESSION) { + if (sock_set_ipv6_ucast_hops(fd, 255) == -1) { + close(fd); + return (-1); + } + } + break; + } + switch (type) { + case LDP_SOCKET_DISC: + case LDP_SOCKET_EDISC: + sock_set_recvbuf(fd); + break; + case LDP_SOCKET_SESSION: + if (listen(fd, LDP_BACKLOG) == -1) + log_warn("%s: error listening on socket", __func__); + +#ifdef __OpenBSD__ + opt = 1; + if (setsockopt(fd, IPPROTO_TCP, TCP_MD5SIG, &opt, + sizeof(opt)) == -1) { + if (errno == ENOPROTOOPT) { /* system w/o md5sig */ + log_warnx("md5sig not available, disabling"); + sysdep.no_md5sig = 1; + } else { + close(fd); + return (-1); + } + } +#endif + break; + } + + return (fd); +} + +void +sock_set_nonblock(int fd) +{ + int flags; + + if ((flags = fcntl(fd, F_GETFL, 0)) == -1) + fatal("fcntl F_GETFL"); + + flags |= O_NONBLOCK; + + if ((flags = fcntl(fd, F_SETFL, flags)) == -1) + fatal("fcntl F_SETFL"); +} + +void +sock_set_cloexec(int fd) +{ + int flags; + + if ((flags = fcntl(fd, F_GETFD, 0)) == -1) + fatal("fcntl F_GETFD"); + + flags |= FD_CLOEXEC; + + if ((flags = fcntl(fd, F_SETFD, flags)) == -1) + fatal("fcntl F_SETFD"); +} + +void +sock_set_recvbuf(int fd) +{ + int bsize; + + bsize = 65535; + while (setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &bsize, + sizeof(bsize)) == -1) + bsize /= 2; +} + +int +sock_set_reuse(int fd, int enable) +{ + if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &enable, + sizeof(int)) < 0) { + log_warn("%s: error setting SO_REUSEADDR", __func__); + return (-1); + } + + return (0); +} + +int +sock_set_bindany(int fd, int enable) +{ +#ifdef HAVE_SO_BINDANY + if (ldpd_privs.change(ZPRIVS_RAISE)) + log_warn("%s: could not raise privs", __func__); + if (setsockopt(fd, SOL_SOCKET, SO_BINDANY, &enable, + sizeof(int)) < 0) { + if (ldpd_privs.change(ZPRIVS_LOWER)) + log_warn("%s: could not lower privs", __func__); + log_warn("%s: error setting SO_BINDANY", __func__); + return (-1); + } + if (ldpd_privs.change(ZPRIVS_LOWER)) + log_warn("%s: could not lower privs", __func__); + return (0); +#elif defined(HAVE_IP_FREEBIND) + if (setsockopt(fd, IPPROTO_IP, IP_FREEBIND, &enable, sizeof(int)) < 0) { + log_warn("%s: error setting IP_FREEBIND", __func__); + return (-1); + } + return (0); +#else + log_warnx("%s: missing SO_BINDANY and IP_FREEBIND, unable to bind " + "to a nonlocal IP address", __func__); + return (-1); +#endif /* HAVE_SO_BINDANY */ +} + +#ifndef __OpenBSD__ +/* + * Set MD5 key for the socket, for the given peer address. If the password + * is NULL or zero-length, the option will be disabled. + */ +int +sock_set_md5sig(int fd, int af, union ldpd_addr *addr, const char *password) +{ + int ret = -1; + int save_errno = ENOSYS; +#if HAVE_DECL_TCP_MD5SIG + union sockunion su; +#endif + + if (fd == -1) + return (0); +#if HAVE_DECL_TCP_MD5SIG + memcpy(&su, addr2sa(af, addr, 0), sizeof(su)); + + if (ldpe_privs.change(ZPRIVS_RAISE)) { + log_warn("%s: could not raise privs", __func__); + return (-1); + } + ret = sockopt_tcp_signature(fd, &su, password); + save_errno = errno; + if (ldpe_privs.change(ZPRIVS_LOWER)) + log_warn("%s: could not lower privs", __func__); +#endif /* HAVE_TCP_MD5SIG */ + if (ret < 0) + log_warnx("%s: can't set TCP_MD5SIG option on fd %d: %s", + __func__, fd, safe_strerror(save_errno)); + + return (ret); +} +#endif + +int +sock_set_ipv4_tos(int fd, int tos) +{ + if (setsockopt(fd, IPPROTO_IP, IP_TOS, (int *)&tos, sizeof(tos)) < 0) { + log_warn("%s: error setting IP_TOS to 0x%x", __func__, tos); + return (-1); + } + + return (0); +} + +int +sock_set_ipv4_recvif(int fd, int enable) +{ + return (setsockopt_ifindex(AF_INET, fd, enable)); +} + +int +sock_set_ipv4_minttl(int fd, int ttl) +{ + return (sockopt_minttl(AF_INET, fd, ttl)); +} + +int +sock_set_ipv4_ucast_ttl(int fd, int ttl) +{ + if (setsockopt(fd, IPPROTO_IP, IP_TTL, &ttl, sizeof(ttl)) < 0) { + log_warn("%s: error setting IP_TTL", __func__); + return (-1); + } + + return (0); +} + +int +sock_set_ipv4_mcast_ttl(int fd, uint8_t ttl) +{ + if (setsockopt(fd, IPPROTO_IP, IP_MULTICAST_TTL, + (char *)&ttl, sizeof(ttl)) < 0) { + log_warn("%s: error setting IP_MULTICAST_TTL to %d", + __func__, ttl); + return (-1); + } + + return (0); +} + +#ifndef MSG_MCAST +#if defined(HAVE_IP_PKTINFO) +int +sock_set_ipv4_pktinfo(int fd, int enable) +{ + if (setsockopt(fd, IPPROTO_IP, IP_PKTINFO, &enable, + sizeof(enable)) < 0) { + log_warn("%s: error setting IP_PKTINFO", __func__); + return (-1); + } + + return (0); +} +#elif defined(HAVE_IP_RECVDSTADDR) +int +sock_set_ipv4_recvdstaddr(int fd, int enable) +{ + if (setsockopt(fd, IPPROTO_IP, IP_RECVDSTADDR, &enable, + sizeof(enable)) < 0) { + log_warn("%s: error setting IP_RECVDSTADDR", __func__); + return (-1); + } + + return (0); +} +#else +#error "Unsupported socket API" +#endif +#endif /* MSG_MCAST */ + +int +sock_set_ipv4_mcast(struct iface *iface) +{ + struct in_addr if_addr; + + if_addr.s_addr = if_get_ipv4_addr(iface); + + if (setsockopt_ipv4_multicast_if(global.ipv4.ldp_disc_socket, + if_addr, iface->ifindex) < 0) { + log_warn("%s: error setting IP_MULTICAST_IF, interface %s", + __func__, iface->name); + return (-1); + } + + return (0); +} + +int +sock_set_ipv4_mcast_loop(int fd) +{ + uint8_t loop = 0; + + if (setsockopt(fd, IPPROTO_IP, IP_MULTICAST_LOOP, + (char *)&loop, sizeof(loop)) < 0) { + log_warn("%s: error setting IP_MULTICAST_LOOP", __func__); + return (-1); + } + + return (0); +} + +int +sock_set_ipv6_dscp(int fd, int dscp) +{ + if (setsockopt(fd, IPPROTO_IPV6, IPV6_TCLASS, &dscp, + sizeof(dscp)) < 0) { + log_warn("%s: error setting IPV6_TCLASS", __func__); + return (-1); + } + + return (0); +} + +int +sock_set_ipv6_pktinfo(int fd, int enable) +{ + if (setsockopt(fd, IPPROTO_IPV6, IPV6_RECVPKTINFO, &enable, + sizeof(enable)) < 0) { + log_warn("%s: error setting IPV6_RECVPKTINFO", __func__); + return (-1); + } + + return (0); +} + +int +sock_set_ipv6_minhopcount(int fd, int hoplimit) +{ + return (sockopt_minttl(AF_INET6, fd, hoplimit)); +} + +int +sock_set_ipv6_ucast_hops(int fd, int hoplimit) +{ + if (setsockopt(fd, IPPROTO_IPV6, IPV6_UNICAST_HOPS, + &hoplimit, sizeof(hoplimit)) < 0) { + log_warn("%s: error setting IPV6_UNICAST_HOPS", __func__); + return (-1); + } + + return (0); +} + +int +sock_set_ipv6_mcast_hops(int fd, int hoplimit) +{ + if (setsockopt(fd, IPPROTO_IPV6, IPV6_MULTICAST_HOPS, + &hoplimit, sizeof(hoplimit)) < 0) { + log_warn("%s: error setting IPV6_MULTICAST_HOPS", __func__); + return (-1); + } + + return (0); +} + +int +sock_set_ipv6_mcast(struct iface *iface) +{ + if (setsockopt(global.ipv6.ldp_disc_socket, IPPROTO_IPV6, + IPV6_MULTICAST_IF, &iface->ifindex, sizeof(iface->ifindex)) < 0) { + log_warn("%s: error setting IPV6_MULTICAST_IF, interface %s", + __func__, iface->name); + return (-1); + } + + return (0); +} + +int +sock_set_ipv6_mcast_loop(int fd) +{ + unsigned int loop = 0; + + if (setsockopt(fd, IPPROTO_IPV6, IPV6_MULTICAST_LOOP, + &loop, sizeof(loop)) < 0) { + log_warn("%s: error setting IPV6_MULTICAST_LOOP", __func__); + return (-1); + } + + return (0); +} diff --git a/ldpd/util.c b/ldpd/util.c new file mode 100644 index 0000000000..e735263f5f --- /dev/null +++ b/ldpd/util.c @@ -0,0 +1,385 @@ +/* $OpenBSD$ */ + +/* + * Copyright (c) 2015 Renato Westphal + * Copyright (c) 2012 Alexander Bluhm + * Copyright (c) 2004 Esben Norby + * Copyright (c) 2003, 2004 Henning Brauer + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include + +#include "ldpd.h" +#include "log.h" + +uint8_t +mask2prefixlen(in_addr_t ina) +{ + if (ina == 0) + return (0); + else + return (33 - ffs(ntohl(ina))); +} + +uint8_t +mask2prefixlen6(struct sockaddr_in6 *sa_in6) +{ + uint8_t l = 0, *ap, *ep; + + /* + * sin6_len is the size of the sockaddr so substract the offset of + * the possibly truncated sin6_addr struct. + */ + ap = (uint8_t *)&sa_in6->sin6_addr; + ep = (uint8_t *)sa_in6 + sockaddr_len((struct sockaddr *)sa_in6); + for (; ap < ep; ap++) { + /* this "beauty" is adopted from sbin/route/show.c ... */ + switch (*ap) { + case 0xff: + l += 8; + break; + case 0xfe: + l += 7; + return (l); + case 0xfc: + l += 6; + return (l); + case 0xf8: + l += 5; + return (l); + case 0xf0: + l += 4; + return (l); + case 0xe0: + l += 3; + return (l); + case 0xc0: + l += 2; + return (l); + case 0x80: + l += 1; + return (l); + case 0x00: + return (l); + default: + fatalx("non contiguous inet6 netmask"); + } + } + + return (l); +} + +in_addr_t +prefixlen2mask(uint8_t prefixlen) +{ + if (prefixlen == 0) + return (0); + + return (htonl(0xffffffff << (32 - prefixlen))); +} + +struct in6_addr * +prefixlen2mask6(uint8_t prefixlen) +{ + static struct in6_addr mask; + int i; + + memset(&mask, 0, sizeof(mask)); + for (i = 0; i < prefixlen / 8; i++) + mask.s6_addr[i] = 0xff; + i = prefixlen % 8; + if (i) + mask.s6_addr[prefixlen / 8] = 0xff00 >> i; + + return (&mask); +} + +void +ldp_applymask(int af, union ldpd_addr *dest, const union ldpd_addr *src, + int prefixlen) +{ + struct in6_addr mask; + int i; + + switch (af) { + case AF_INET: + dest->v4.s_addr = src->v4.s_addr & prefixlen2mask(prefixlen); + break; + case AF_INET6: + memset(&mask, 0, sizeof(mask)); + for (i = 0; i < prefixlen / 8; i++) + mask.s6_addr[i] = 0xff; + i = prefixlen % 8; + if (i) + mask.s6_addr[prefixlen / 8] = 0xff00 >> i; + + for (i = 0; i < 16; i++) + dest->v6.s6_addr[i] = src->v6.s6_addr[i] & + mask.s6_addr[i]; + break; + default: + fatalx("ldp_applymask: unknown af"); + } +} + +int +ldp_addrcmp(int af, const union ldpd_addr *a, const union ldpd_addr *b) +{ + switch (af) { + case AF_INET: + if (a->v4.s_addr == b->v4.s_addr) + return (0); + return ((ntohl(a->v4.s_addr) > ntohl(b->v4.s_addr)) ? 1 : -1); + case AF_INET6: + return (memcmp(&a->v6, &b->v6, sizeof(struct in6_addr))); + default: + fatalx("ldp_addrcmp: unknown af"); + } +} + +int +ldp_addrisset(int af, const union ldpd_addr *addr) +{ + switch (af) { + case AF_UNSPEC: + return (0); + case AF_INET: + if (addr->v4.s_addr != INADDR_ANY) + return (1); + break; + case AF_INET6: + if (!IN6_IS_ADDR_UNSPECIFIED(&addr->v6)) + return (1); + break; + default: + fatalx("ldp_addrisset: unknown af"); + } + + return (0); +} + +int +ldp_prefixcmp(int af, const union ldpd_addr *a, const union ldpd_addr *b, + uint8_t prefixlen) +{ + in_addr_t mask, aa, ba; + int i; + uint8_t m; + + switch (af) { + case AF_INET: + if (prefixlen == 0) + return (0); + if (prefixlen > 32) + fatalx("ldp_prefixcmp: bad IPv4 prefixlen"); + mask = htonl(prefixlen2mask(prefixlen)); + aa = htonl(a->v4.s_addr) & mask; + ba = htonl(b->v4.s_addr) & mask; + return (aa - ba); + case AF_INET6: + if (prefixlen == 0) + return (0); + if (prefixlen > 128) + fatalx("ldp_prefixcmp: bad IPv6 prefixlen"); + for (i = 0; i < prefixlen / 8; i++) + if (a->v6.s6_addr[i] != b->v6.s6_addr[i]) + return (a->v6.s6_addr[i] - b->v6.s6_addr[i]); + i = prefixlen % 8; + if (i) { + m = 0xff00 >> i; + if ((a->v6.s6_addr[prefixlen / 8] & m) != + (b->v6.s6_addr[prefixlen / 8] & m)) + return ((a->v6.s6_addr[prefixlen / 8] & m) - + (b->v6.s6_addr[prefixlen / 8] & m)); + } + return (0); + default: + fatalx("ldp_prefixcmp: unknown af"); + } + return (-1); +} + +int +bad_addr_v4(struct in_addr addr) +{ + uint32_t a = ntohl(addr.s_addr); + + if (((a >> IN_CLASSA_NSHIFT) == 0) || + ((a >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) || + IN_MULTICAST(a) || IN_BADCLASS(a)) + return (1); + + return (0); +} + +int +bad_addr_v6(struct in6_addr *addr) +{ + if (IN6_IS_ADDR_UNSPECIFIED(addr) || + IN6_IS_ADDR_LOOPBACK(addr) || + IN6_IS_ADDR_MULTICAST(addr) || + IN6_IS_ADDR_SITELOCAL(addr) || + IN6_IS_ADDR_V4MAPPED(addr) || + IN6_IS_ADDR_V4COMPAT(addr)) + return (1); + + return (0); +} + +int +bad_addr(int af, union ldpd_addr *addr) +{ + switch (af) { + case AF_INET: + return (bad_addr_v4(addr->v4)); + case AF_INET6: + return (bad_addr_v6(&addr->v6)); + default: + fatalx("bad_addr: unknown af"); + } +} + +void +embedscope(struct sockaddr_in6 *sin6) +{ + uint16_t tmp16; + + if (IN6_IS_SCOPE_EMBED(&sin6->sin6_addr)) { + memcpy(&tmp16, &sin6->sin6_addr.s6_addr[2], sizeof(tmp16)); + if (tmp16 != 0) { + log_warnx("%s: address %s already has embeded scope %u", + __func__, log_sockaddr(sin6), ntohs(tmp16)); + } + tmp16 = htons(sin6->sin6_scope_id); + memcpy(&sin6->sin6_addr.s6_addr[2], &tmp16, sizeof(tmp16)); + sin6->sin6_scope_id = 0; + } +} + +void +recoverscope(struct sockaddr_in6 *sin6) +{ + uint16_t tmp16; + + if (sin6->sin6_scope_id != 0) + log_warnx("%s: address %s already has scope id %u", + __func__, log_sockaddr(sin6), sin6->sin6_scope_id); + + if (IN6_IS_SCOPE_EMBED(&sin6->sin6_addr)) { + memcpy(&tmp16, &sin6->sin6_addr.s6_addr[2], sizeof(tmp16)); + sin6->sin6_scope_id = ntohs(tmp16); + sin6->sin6_addr.s6_addr[2] = 0; + sin6->sin6_addr.s6_addr[3] = 0; + } +} + +void +addscope(struct sockaddr_in6 *sin6, uint32_t id) +{ + if (sin6->sin6_scope_id != 0) + log_warnx("%s: address %s already has scope id %u", __func__, + log_sockaddr(sin6), sin6->sin6_scope_id); + + if (IN6_IS_SCOPE_EMBED(&sin6->sin6_addr)) + sin6->sin6_scope_id = id; +} + +void +clearscope(struct in6_addr *in6) +{ + if (IN6_IS_SCOPE_EMBED(in6)) { + in6->s6_addr[2] = 0; + in6->s6_addr[3] = 0; + } +} + +struct sockaddr * +addr2sa(int af, union ldpd_addr *addr, uint16_t port) +{ + static struct sockaddr_storage ss; + struct sockaddr_in *sa_in = (struct sockaddr_in *)&ss; + struct sockaddr_in6 *sa_in6 = (struct sockaddr_in6 *)&ss; + + memset(&ss, 0, sizeof(ss)); + switch (af) { + case AF_INET: + sa_in->sin_family = AF_INET; +#ifdef HAVE_STRUCT_SOCKADDR_IN_SIN_LEN + sa_in->sin_len = sizeof(struct sockaddr_in); +#endif + sa_in->sin_addr = addr->v4; + sa_in->sin_port = htons(port); + break; + case AF_INET6: + sa_in6->sin6_family = AF_INET6; +#ifdef HAVE_STRUCT_SOCKADDR_IN_SIN_LEN + sa_in6->sin6_len = sizeof(struct sockaddr_in6); +#endif + sa_in6->sin6_addr = addr->v6; + sa_in6->sin6_port = htons(port); + break; + default: + fatalx("addr2sa: unknown af"); + } + + return ((struct sockaddr *)&ss); +} + +void +sa2addr(struct sockaddr *sa, int *af, union ldpd_addr *addr, in_port_t *port) +{ + struct sockaddr_in *sa_in = (struct sockaddr_in *)sa; + struct sockaddr_in6 *sa_in6 = (struct sockaddr_in6 *)sa; + + if (addr) + memset(addr, 0, sizeof(*addr)); + switch (sa->sa_family) { + case AF_INET: + if (af) + *af = AF_INET; + if (addr) + addr->v4 = sa_in->sin_addr; + if (port) + *port = sa_in->sin_port; + break; + case AF_INET6: + if (af) + *af = AF_INET6; + if (addr) + addr->v6 = sa_in6->sin6_addr; + if (port) + *port = sa_in6->sin6_port; + break; + default: + fatalx("sa2addr: unknown af"); + } +} + +socklen_t +sockaddr_len(struct sockaddr *sa) +{ +#ifdef HAVE_STRUCT_SOCKADDR_IN_SIN_LEN + return (sa->sa_len); +#else + switch (sa->sa_family) { + case AF_INET: + return (sizeof(struct sockaddr_in)); + case AF_INET6: + return (sizeof(struct sockaddr_in6)); + default: + fatalx("sockaddr_len: unknown af"); + } +#endif +} diff --git a/lib/Makefile.am b/lib/Makefile.am index acaf7e6744..dbf1a82be2 100644 --- a/lib/Makefile.am +++ b/lib/Makefile.am @@ -1,6 +1,7 @@ ## Process this file with automake to produce Makefile.in. -AM_CPPFLAGS = -I.. -I$(top_srcdir) -I$(top_srcdir)/lib -I$(top_builddir)/lib +AM_CPPFLAGS = -I.. -I$(top_srcdir) -I$(top_srcdir)/lib -I$(top_builddir)/lib \ + -DVTY_DEPRECATE_INDEX AM_CFLAGS = $(WERROR) DEFS = @DEFS@ -DSYSCONFDIR=\"$(sysconfdir)/\" AM_YFLAGS = -d @@ -17,7 +18,9 @@ libzebra_la_SOURCES = \ filter.c routemap.c distribute.c stream.c str.c log.c plist.c \ zclient.c sockopt.c smux.c agentx.c snmp.c md5.c if_rmap.c keychain.c privs.c \ sigevent.c pqueue.c jhash.c workqueue.c nexthop.c json.c \ - ptm_lib.c csv.c bfd.c vrf.c systemd.c ns.c memory.c memory_vty.c + ptm_lib.c csv.c bfd.c vrf.c systemd.c ns.c memory.c memory_vty.c \ + imsg-buffer.c imsg.c skiplist.c \ + qobj.c BUILT_SOURCES = route_types.h gitversion.h command_parse.h @@ -36,7 +39,8 @@ pkginclude_HEADERS = \ privs.h sigevent.h pqueue.h jhash.h zassert.h \ workqueue.h route_types.h libospf.h nexthop.h json.h \ ptm_lib.h csv.h bfd.h vrf.h ns.h systemd.h bitfield.h \ - fifo.h memory_vty.h + fifo.h memory_vty.h mpls.h imsg.h openbsd-queue.h openbsd-tree.h \ + skiplist.h qobj.h noinst_HEADERS = \ plist_int.h diff --git a/lib/checksum.c b/lib/checksum.c index 116aaafc97..3d6cd45791 100644 --- a/lib/checksum.c +++ b/lib/checksum.c @@ -47,7 +47,7 @@ in_cksum(void *parg, int nbytes) } /* Fletcher Checksum -- Refer to RFC1008. */ -#define MODX 4102 /* 5802 should be fine */ +#define MODX 4102U /* 5802 should be fine */ /* To be consistent, offset is 0-based index, rather than the 1-based index required in the specification ISO 8473, Annex C.1 */ diff --git a/lib/command.c b/lib/command.c index 2f83a7fa5e..63cfc695dc 100644 --- a/lib/command.c +++ b/lib/command.c @@ -40,6 +40,7 @@ #include "vrf.h" #include "command_match.h" #include "command_parse.h" +#include "qobj.h" DEFINE_MTYPE( LIB, HOST, "Host config") DEFINE_MTYPE( LIB, STRVEC, "String vector") @@ -65,12 +66,6 @@ static struct cmd_node view_node = "%s> ", }; -static struct cmd_node restricted_node = -{ - RESTRICTED_NODE, - "%s$ ", -}; - static struct cmd_node auth_enable_node = { AUTH_ENABLE_NODE, @@ -342,6 +337,9 @@ install_element (enum node_type ntype, struct cmd_element *cmd) command_parse_format (cnode->cmdgraph, cmd); vector_set (cnode->cmd_vector, cmd); + + if (ntype == VIEW_NODE) + install_element (ENABLE_NODE, cmd); } static const unsigned char itoa64[] = @@ -480,7 +478,6 @@ cmd_try_do_shortcut (enum node_type node, char* first_word) { node != VIEW_NODE && node != AUTH_ENABLE_NODE && node != ENABLE_NODE && - node != RESTRICTED_NODE && 0 == strcmp( "do", first_word ) ) return 1; return 0; @@ -695,6 +692,9 @@ node_parent ( enum node_type node ) case BGP_VPNV6_NODE: case BGP_ENCAP_NODE: case BGP_ENCAPV6_NODE: + case BGP_VNC_DEFAULTS_NODE: + case BGP_VNC_NVE_GROUP_NODE: + case BGP_VNC_L2_GROUP_NODE: case BGP_IPV4_NODE: case BGP_IPV4M_NODE: case BGP_IPV6_NODE: @@ -707,6 +707,19 @@ node_parent ( enum node_type node ) case LINK_PARAMS_NODE: ret = INTERFACE_NODE; break; + case LDP_IPV4_NODE: + case LDP_IPV6_NODE: + ret = LDP_NODE; + break; + case LDP_IPV4_IFACE_NODE: + ret = LDP_IPV4_NODE; + break; + case LDP_IPV6_IFACE_NODE: + ret = LDP_IPV6_NODE; + break; + case LDP_PSEUDOWIRE_NODE: + ret = LDP_L2VPN_NODE; + break; default: ret = CONFIG_NODE; break; @@ -999,7 +1012,6 @@ DEFUN (config_exit, { case VIEW_NODE: case ENABLE_NODE: - case RESTRICTED_NODE: if (vty_shell (vty)) exit (0); else @@ -1018,6 +1030,8 @@ DEFUN (config_exit, case RIPNG_NODE: case OSPF_NODE: case OSPF6_NODE: + case LDP_NODE: + case LDP_L2VPN_NODE: case ISIS_NODE: case KEYCHAIN_NODE: case MASC_NODE: @@ -1032,10 +1046,26 @@ DEFUN (config_exit, case BGP_VPNV6_NODE: case BGP_ENCAP_NODE: case BGP_ENCAPV6_NODE: + case BGP_VNC_DEFAULTS_NODE: + case BGP_VNC_NVE_GROUP_NODE: + case BGP_VNC_L2_GROUP_NODE: case BGP_IPV6_NODE: case BGP_IPV6M_NODE: vty->node = BGP_NODE; break; + case LDP_IPV4_NODE: + case LDP_IPV6_NODE: + vty->node = LDP_NODE; + break; + case LDP_IPV4_IFACE_NODE: + vty->node = LDP_IPV4_NODE; + break; + case LDP_IPV6_IFACE_NODE: + vty->node = LDP_IPV6_NODE; + break; + case LDP_PSEUDOWIRE_NODE: + vty->node = LDP_L2VPN_NODE; + break; case KEYCHAIN_KEY_NODE: vty->node = KEYCHAIN_NODE; break; @@ -1068,7 +1098,6 @@ DEFUN (config_end, { case VIEW_NODE: case ENABLE_NODE: - case RESTRICTED_NODE: /* Nothing to do. */ break; case CONFIG_NODE: @@ -1081,6 +1110,9 @@ DEFUN (config_end, case BGP_NODE: case BGP_ENCAP_NODE: case BGP_ENCAPV6_NODE: + case BGP_VNC_DEFAULTS_NODE: + case BGP_VNC_NVE_GROUP_NODE: + case BGP_VNC_L2_GROUP_NODE: case BGP_VPNV4_NODE: case BGP_VPNV6_NODE: case BGP_IPV4_NODE: @@ -1090,6 +1122,13 @@ DEFUN (config_end, case RMAP_NODE: case OSPF_NODE: case OSPF6_NODE: + case LDP_NODE: + case LDP_IPV4_NODE: + case LDP_IPV6_NODE: + case LDP_IPV4_IFACE_NODE: + case LDP_IPV6_IFACE_NODE: + case LDP_L2VPN_NODE: + case LDP_PSEUDOWIRE_NODE: case ISIS_NODE: case KEYCHAIN_NODE: case KEYCHAIN_KEY_NODE: @@ -1725,6 +1764,7 @@ DEFUN (config_logmsg, zlog(NULL, level, "%s", ((message = argv_concat(argv, argc, idx_message)) ? message : "")); if (message) XFREE(MTYPE_TMP, message); + return CMD_SUCCESS; } @@ -2195,6 +2235,8 @@ install_default (enum node_type node) void cmd_init (int terminal) { + qobj_init (); + /* Allocate initial top vector of commands. */ cmdvec = vector_init (VECTOR_MIN_SIZE); @@ -2213,7 +2255,6 @@ cmd_init (int terminal) install_node (&enable_node, NULL); install_node (&auth_node, NULL); install_node (&auth_enable_node, NULL); - install_node (&restricted_node, NULL); install_node (&config_node, config_write_host); /* Each node's basic commands. */ @@ -2230,36 +2271,22 @@ cmd_init (int terminal) install_element (VIEW_NODE, &show_logging_cmd); install_element (VIEW_NODE, &show_commandtree_cmd); install_element (VIEW_NODE, &echo_cmd); - - install_element (RESTRICTED_NODE, &config_list_cmd); - install_element (RESTRICTED_NODE, &config_exit_cmd); - install_element (RESTRICTED_NODE, &config_quit_cmd); - install_element (RESTRICTED_NODE, &config_help_cmd); - install_element (RESTRICTED_NODE, &config_enable_cmd); - install_element (RESTRICTED_NODE, &config_terminal_length_cmd); - install_element (RESTRICTED_NODE, &config_terminal_no_length_cmd); - install_element (RESTRICTED_NODE, &echo_cmd); } if (terminal) { - install_default (ENABLE_NODE); + install_element (ENABLE_NODE, &config_end_cmd); install_element (ENABLE_NODE, &config_disable_cmd); install_element (ENABLE_NODE, &config_terminal_cmd); install_element (ENABLE_NODE, ©_runningconf_startupconf_cmd); + install_element (ENABLE_NODE, &config_write_cmd); + install_element (ENABLE_NODE, &show_running_config_cmd); } install_element (ENABLE_NODE, &show_startup_config_cmd); - install_element (ENABLE_NODE, &show_version_cmd); - install_element (ENABLE_NODE, &show_commandtree_cmd); if (terminal) { - install_element (ENABLE_NODE, &config_terminal_length_cmd); - install_element (ENABLE_NODE, &config_terminal_no_length_cmd); - install_element (ENABLE_NODE, &show_logging_cmd); - install_element (ENABLE_NODE, &echo_cmd); install_element (ENABLE_NODE, &config_logmsg_cmd); - install_default (CONFIG_NODE); } @@ -2297,12 +2324,9 @@ cmd_init (int terminal) install_element (CONFIG_NODE, &no_service_terminal_length_cmd); install_element (VIEW_NODE, &show_thread_cpu_cmd); - install_element (ENABLE_NODE, &show_thread_cpu_cmd); - install_element (RESTRICTED_NODE, &show_thread_cpu_cmd); install_element (ENABLE_NODE, &clear_thread_cpu_cmd); install_element (VIEW_NODE, &show_work_queues_cmd); - install_element (ENABLE_NODE, &show_work_queues_cmd); vrf_install_commands (); } diff --git a/lib/command.h b/lib/command.h index e411e9c18c..3c1008d4eb 100644 --- a/lib/command.h +++ b/lib/command.h @@ -70,7 +70,6 @@ struct host enum node_type { AUTH_NODE, /* Authentication mode of vty interface. */ - RESTRICTED_NODE, /* Restricted view mode */ VIEW_NODE, /* View node. Default mode of vty interface. */ AUTH_ENABLE_NODE, /* Authentication mode for change enable. */ ENABLE_NODE, /* Enable node. */ @@ -78,6 +77,7 @@ enum node_type SERVICE_NODE, /* Service node. */ DEBUG_NODE, /* Debug node. */ VRF_DEBUG_NODE, /* Vrf Debug node. */ + DEBUG_VNC_NODE, /* Debug VNC node. */ AAA_NODE, /* AAA node. */ KEYCHAIN_NODE, /* Key-chain node. */ KEYCHAIN_KEY_NODE, /* Key-chain key node. */ @@ -97,8 +97,19 @@ enum node_type BGP_IPV6M_NODE, /* BGP IPv6 multicast address family. */ BGP_ENCAP_NODE, /* BGP ENCAP SAFI */ BGP_ENCAPV6_NODE, /* BGP ENCAP SAFI */ + BGP_VNC_DEFAULTS_NODE, /* BGP VNC nve defaults */ + BGP_VNC_NVE_GROUP_NODE, /* BGP VNC nve group */ + BGP_VNC_L2_GROUP_NODE, /* BGP VNC L2 group */ + RFP_DEFAULTS_NODE, /* RFP defaults node */ OSPF_NODE, /* OSPF protocol mode */ OSPF6_NODE, /* OSPF protocol for IPv6 mode */ + LDP_NODE, /* LDP protocol mode */ + LDP_IPV4_NODE, /* LDP IPv4 address family */ + LDP_IPV6_NODE, /* LDP IPv6 address family */ + LDP_IPV4_IFACE_NODE, /* LDP IPv4 Interface */ + LDP_IPV6_IFACE_NODE, /* LDP IPv6 Interface */ + LDP_L2VPN_NODE, /* LDP L2VPN node */ + LDP_PSEUDOWIRE_NODE, /* LDP Pseudowire node */ ISIS_NODE, /* ISIS protocol mode */ PIM_NODE, /* PIM protocol mode */ MASC_NODE, /* MASC for multicast. */ @@ -115,6 +126,7 @@ enum node_type DUMP_NODE, /* Packet dump node. */ FORWARDING_NODE, /* IP forwarding node. */ PROTOCOL_NODE, /* protocol filtering node */ + MPLS_NODE, /* MPLS config node */ VTY_NODE, /* Vty node. */ LINK_PARAMS_NODE, /* Link-parameters node */ }; @@ -365,6 +377,7 @@ struct cmd_element #define LINK_PARAMS_STR "Configure interface link parameters\n" #define OSPF_RI_STR "OSPF Router Information specific commands\n" #define PCE_STR "PCE Router Information specific commands\n" +#define MPLS_STR "MPLS information\n" #define CONF_BACKUP_EXT ".sav" diff --git a/lib/distribute.c b/lib/distribute.c index 8a00833915..8726e993c5 100644 --- a/lib/distribute.c +++ b/lib/distribute.c @@ -49,22 +49,35 @@ distribute_new (void) static void distribute_free (struct distribute *dist) { + int i = 0; + if (dist->ifname) XFREE (MTYPE_DISTRIBUTE_IFNAME, dist->ifname); - if (dist->list[DISTRIBUTE_IN]) - XFREE(MTYPE_DISTRIBUTE_NAME, dist->list[DISTRIBUTE_IN]); - if (dist->list[DISTRIBUTE_OUT]) - XFREE(MTYPE_DISTRIBUTE_NAME, dist->list[DISTRIBUTE_OUT]); + for (i = 0; i < DISTRIBUTE_MAX; i++) + if (dist->list[i]) + XFREE(MTYPE_DISTRIBUTE_NAME, dist->list[i]); - if (dist->prefix[DISTRIBUTE_IN]) - XFREE(MTYPE_DISTRIBUTE_NAME, dist->prefix[DISTRIBUTE_IN]); - if (dist->prefix[DISTRIBUTE_OUT]) - XFREE(MTYPE_DISTRIBUTE_NAME, dist->prefix[DISTRIBUTE_OUT]); + for (i = 0; i < DISTRIBUTE_MAX; i++) + if (dist->prefix[i]) + XFREE(MTYPE_DISTRIBUTE_NAME, dist->prefix[i]); XFREE (MTYPE_DISTRIBUTE, dist); } +static void +distribute_free_if_empty(struct distribute *dist) +{ + int i; + + for (i = 0; i < DISTRIBUTE_MAX; i++) + if (dist->list[i] != NULL || dist->prefix[i] != NULL) + return; + + hash_release (disthash, dist); + distribute_free (dist); +} + /* Lookup interface's distribute list. */ struct distribute * distribute_lookup (const char *ifname) @@ -156,18 +169,9 @@ distribute_list_set (const char *ifname, enum distribute_type type, dist = distribute_get (ifname); - if (type == DISTRIBUTE_IN) - { - if (dist->list[DISTRIBUTE_IN]) - XFREE(MTYPE_DISTRIBUTE_NAME, dist->list[DISTRIBUTE_IN]); - dist->list[DISTRIBUTE_IN] = XSTRDUP(MTYPE_DISTRIBUTE_NAME, alist_name); - } - if (type == DISTRIBUTE_OUT) - { - if (dist->list[DISTRIBUTE_OUT]) - XFREE(MTYPE_DISTRIBUTE_NAME, dist->list[DISTRIBUTE_OUT]); - dist->list[DISTRIBUTE_OUT] = XSTRDUP(MTYPE_DISTRIBUTE_NAME, alist_name); - } + if (dist->list[type]) + XFREE(MTYPE_DISTRIBUTE_NAME, dist->list[type]); + dist->list[type] = XSTRDUP(MTYPE_DISTRIBUTE_NAME, alist_name); /* Apply this distribute-list to the interface. */ (*distribute_add_hook) (dist); @@ -185,41 +189,19 @@ distribute_list_unset (const char *ifname, enum distribute_type type, if (!dist) return 0; - if (type == DISTRIBUTE_IN) - { - if (!dist->list[DISTRIBUTE_IN]) + if (!dist->list[type]) return 0; - if (strcmp (dist->list[DISTRIBUTE_IN], alist_name) != 0) + if (strcmp (dist->list[type], alist_name) != 0) return 0; - XFREE(MTYPE_DISTRIBUTE_NAME, dist->list[DISTRIBUTE_IN]); - dist->list[DISTRIBUTE_IN] = NULL; - } - - if (type == DISTRIBUTE_OUT) - { - if (!dist->list[DISTRIBUTE_OUT]) - return 0; - if (strcmp (dist->list[DISTRIBUTE_OUT], alist_name) != 0) - return 0; - - XFREE(MTYPE_DISTRIBUTE_NAME, dist->list[DISTRIBUTE_OUT]); - dist->list[DISTRIBUTE_OUT] = NULL; - } + XFREE(MTYPE_DISTRIBUTE_NAME, dist->list[type]); + dist->list[type] = NULL; /* Apply this distribute-list to the interface. */ (*distribute_delete_hook) (dist); - /* If both out and in is NULL then free distribute list. */ - if (dist->list[DISTRIBUTE_IN] == NULL && - dist->list[DISTRIBUTE_OUT] == NULL && - dist->prefix[DISTRIBUTE_IN] == NULL && - dist->prefix[DISTRIBUTE_OUT] == NULL) - { - hash_release (disthash, dist); - distribute_free (dist); - } - + /* If all dist are NULL, then free distribute list. */ + distribute_free_if_empty(dist); return 1; } @@ -232,18 +214,9 @@ distribute_list_prefix_set (const char *ifname, enum distribute_type type, dist = distribute_get (ifname); - if (type == DISTRIBUTE_IN) - { - if (dist->prefix[DISTRIBUTE_IN]) - XFREE(MTYPE_DISTRIBUTE_NAME, dist->prefix[DISTRIBUTE_IN]); - dist->prefix[DISTRIBUTE_IN] = XSTRDUP(MTYPE_DISTRIBUTE_NAME, plist_name); - } - if (type == DISTRIBUTE_OUT) - { - if (dist->prefix[DISTRIBUTE_OUT]) - XFREE(MTYPE_DISTRIBUTE_NAME, dist->prefix[DISTRIBUTE_OUT]); - dist->prefix[DISTRIBUTE_OUT] = XSTRDUP(MTYPE_DISTRIBUTE_NAME, plist_name); - } + if (dist->prefix[type]) + XFREE(MTYPE_DISTRIBUTE_NAME, dist->prefix[type]); + dist->prefix[type] = XSTRDUP(MTYPE_DISTRIBUTE_NAME, plist_name); /* Apply this distribute-list to the interface. */ (*distribute_add_hook) (dist); @@ -261,41 +234,19 @@ distribute_list_prefix_unset (const char *ifname, enum distribute_type type, if (!dist) return 0; - if (type == DISTRIBUTE_IN) - { - if (!dist->prefix[DISTRIBUTE_IN]) + if (!dist->prefix[type]) return 0; - if (strcmp (dist->prefix[DISTRIBUTE_IN], plist_name) != 0) + if (strcmp (dist->prefix[type], plist_name) != 0) return 0; - XFREE(MTYPE_DISTRIBUTE_NAME, dist->prefix[DISTRIBUTE_IN]); - dist->prefix[DISTRIBUTE_IN] = NULL; - } - - if (type == DISTRIBUTE_OUT) - { - if (!dist->prefix[DISTRIBUTE_OUT]) - return 0; - if (strcmp (dist->prefix[DISTRIBUTE_OUT], plist_name) != 0) - return 0; - - XFREE(MTYPE_DISTRIBUTE_NAME, dist->prefix[DISTRIBUTE_OUT]); - dist->prefix[DISTRIBUTE_OUT] = NULL; - } + XFREE(MTYPE_DISTRIBUTE_NAME, dist->prefix[type]); + dist->prefix[type] = NULL; /* Apply this distribute-list to the interface. */ (*distribute_delete_hook) (dist); - /* If both out and in is NULL then free distribute list. */ - if (dist->list[DISTRIBUTE_IN] == NULL && - dist->list[DISTRIBUTE_OUT] == NULL && - dist->prefix[DISTRIBUTE_IN] == NULL && - dist->prefix[DISTRIBUTE_OUT] == NULL) - { - hash_release (disthash, dist); - distribute_free (dist); - } - + /* If all dist are NULL, then free distribute list. */ + distribute_free_if_empty(dist); return 1; } @@ -312,7 +263,7 @@ DEFUN (distribute_list, /* Check of distribute list type. */ enum distribute_type type = argv[2 + prefix]->arg[0] == 'i' ? - DISTRIBUTE_IN : DISTRIBUTE_OUT; + DISTRIBUTE_V4_IN : DISTRIBUTE_V4_OUT; /* Set appropriate function call */ void (*distfn)(const char *, enum distribute_type, const char *) = prefix ? @@ -329,10 +280,10 @@ DEFUN (distribute_list, return CMD_SUCCESS; } -DEFUN (no_distribute_list, - no_distribute_list_cmd, - "no distribute-list [prefix] WORD [WORD]", - NO_STR +DEFUN (ipv6_distribute_list, + ipv6_distribute_list_cmd, + "ipv6 distribute-list [prefix] WORD [WORD]", + "IPv6\n" "Filter networks in routing updates\n" "Access-list name\n" "Filter incoming routing updates\n" @@ -343,7 +294,44 @@ DEFUN (no_distribute_list, /* Check of distribute list type. */ enum distribute_type type = argv[3 + prefix]->arg[0] == 'i' ? - DISTRIBUTE_IN : DISTRIBUTE_OUT; + DISTRIBUTE_V6_IN : DISTRIBUTE_V6_OUT; + + /* Set appropriate function call */ + void (*distfn)(const char *, enum distribute_type, const char *) = prefix ? + &distribute_list_prefix_set : &distribute_list_set; + + /* if interface is present, get name */ + const char *ifname = NULL; + if (argv[argc - 1]->type == VARIABLE_TKN) + ifname = argv[argc - 1]->arg; + + /* Get interface name corresponding distribute list. */ + distfn (ifname, type, argv[1 + prefix]->arg); + + return CMD_SUCCESS; +} + +DEFUN (no_distribute_list, + no_distribute_list_cmd, + "no [ipv6] distribute-list [prefix] WORD [WORD]", + NO_STR + "Filter networks in routing updates\n" + "Access-list name\n" + "Filter incoming routing updates\n" + "Filter outgoing routing updates\n" + "Interface name\n") +{ + int ipv6 = strmatch(argv[1]->text, "ipv6"); + int prefix = (argv[2 + ipv6]->type == WORD_TKN) ? 1 : 0; + + int idx_alname = 2 + ipv6 + prefix; + int idx_disttype = idx_alname + 1; + + /* Check of distribute list type. */ + enum distribute_type distin = (ipv6) ? DISTRIBUTE_V6_IN : DISTRIBUTE_V4_IN; + enum distribute_type distout = (ipv6) ? DISTRIBUTE_V6_OUT : DISTRIBUTE_V4_OUT; + + enum distribute_type type = argv[idx_disttype]->arg[0] == 'i' ? distin : distout; /* Set appropriate function call */ int (*distfn)(const char *, enum distribute_type, const char *) = prefix ? @@ -353,7 +341,6 @@ DEFUN (no_distribute_list, const char *ifname = NULL; if (argv[argc - 1]->type == VARIABLE_TKN) ifname = argv[argc - 1]->arg; - /* Get interface name corresponding distribute list. */ int ret = distfn (ifname, type, argv[2 + prefix]->arg); @@ -365,79 +352,112 @@ DEFUN (no_distribute_list, return CMD_SUCCESS; } +static int +distribute_print (struct vty *vty, char *tab[], int is_prefix, + enum distribute_type type, int has_print) +{ + if (tab[type]) { + vty_out (vty, "%s %s%s", + has_print ? "," : "", + is_prefix ? "(prefix-list) " : "", + tab[type]); + return 1; + } + return has_print; +} + int config_show_distribute (struct vty *vty) { unsigned int i; + int has_print = 0; struct hash_backet *mp; struct distribute *dist; /* Output filter configuration. */ dist = distribute_lookup (NULL); - if (dist && (dist->list[DISTRIBUTE_OUT] || dist->prefix[DISTRIBUTE_OUT])) - { vty_out (vty, " Outgoing update filter list for all interface is"); - if (dist->list[DISTRIBUTE_OUT]) - vty_out (vty, " %s", dist->list[DISTRIBUTE_OUT]); - if (dist->prefix[DISTRIBUTE_OUT]) - vty_out (vty, "%s (prefix-list) %s", - dist->list[DISTRIBUTE_OUT] ? "," : "", - dist->prefix[DISTRIBUTE_OUT]); - vty_out (vty, "%s", VTY_NEWLINE); + has_print = 0; + if (dist) + { + has_print = distribute_print(vty, dist->list, 0, + DISTRIBUTE_V4_OUT, has_print); + has_print = distribute_print(vty, dist->prefix, 1, + DISTRIBUTE_V4_OUT, has_print); + has_print = distribute_print(vty, dist->list, 0, + DISTRIBUTE_V6_OUT, has_print); + has_print = distribute_print(vty, dist->prefix, 1, + DISTRIBUTE_V6_OUT, has_print); } + if (has_print) + vty_out (vty, "%s", VTY_NEWLINE); else - vty_out (vty, " Outgoing update filter list for all interface is not set%s", VTY_NEWLINE); + vty_out (vty, " not set%s", VTY_NEWLINE); for (i = 0; i < disthash->size; i++) for (mp = disthash->index[i]; mp; mp = mp->next) { dist = mp->data; if (dist->ifname) - if (dist->list[DISTRIBUTE_OUT] || dist->prefix[DISTRIBUTE_OUT]) { vty_out (vty, " %s filtered by", dist->ifname); - if (dist->list[DISTRIBUTE_OUT]) - vty_out (vty, " %s", dist->list[DISTRIBUTE_OUT]); - if (dist->prefix[DISTRIBUTE_OUT]) - vty_out (vty, "%s (prefix-list) %s", - dist->list[DISTRIBUTE_OUT] ? "," : "", - dist->prefix[DISTRIBUTE_OUT]); + has_print = 0; + has_print = distribute_print(vty, dist->list, 0, + DISTRIBUTE_V4_OUT, has_print); + has_print = distribute_print(vty, dist->prefix, 1, + DISTRIBUTE_V4_OUT, has_print); + has_print = distribute_print(vty, dist->list, 0, + DISTRIBUTE_V6_OUT, has_print); + has_print = distribute_print(vty, dist->prefix, 1, + DISTRIBUTE_V6_OUT, has_print); + if (has_print) vty_out (vty, "%s", VTY_NEWLINE); + else + vty_out(vty, " nothing%s", VTY_NEWLINE); } } /* Input filter configuration. */ dist = distribute_lookup (NULL); - if (dist && (dist->list[DISTRIBUTE_IN] || dist->prefix[DISTRIBUTE_IN])) - { vty_out (vty, " Incoming update filter list for all interface is"); - if (dist->list[DISTRIBUTE_IN]) - vty_out (vty, " %s", dist->list[DISTRIBUTE_IN]); - if (dist->prefix[DISTRIBUTE_IN]) - vty_out (vty, "%s (prefix-list) %s", - dist->list[DISTRIBUTE_IN] ? "," : "", - dist->prefix[DISTRIBUTE_IN]); - vty_out (vty, "%s", VTY_NEWLINE); + has_print = 0; + if (dist) + { + has_print = distribute_print(vty, dist->list, 0, + DISTRIBUTE_V4_IN, has_print); + has_print = distribute_print(vty, dist->prefix, 1, + DISTRIBUTE_V4_IN, has_print); + has_print = distribute_print(vty, dist->list, 0, + DISTRIBUTE_V6_IN, has_print); + has_print = distribute_print(vty, dist->prefix, 1, + DISTRIBUTE_V6_IN, has_print); } + if (has_print) + vty_out (vty, "%s", VTY_NEWLINE); else - vty_out (vty, " Incoming update filter list for all interface is not set%s", VTY_NEWLINE); + vty_out (vty, " not set%s", VTY_NEWLINE); for (i = 0; i < disthash->size; i++) for (mp = disthash->index[i]; mp; mp = mp->next) { dist = mp->data; if (dist->ifname) - if (dist->list[DISTRIBUTE_IN] || dist->prefix[DISTRIBUTE_IN]) { vty_out (vty, " %s filtered by", dist->ifname); - if (dist->list[DISTRIBUTE_IN]) - vty_out (vty, " %s", dist->list[DISTRIBUTE_IN]); - if (dist->prefix[DISTRIBUTE_IN]) - vty_out (vty, "%s (prefix-list) %s", - dist->list[DISTRIBUTE_IN] ? "," : "", - dist->prefix[DISTRIBUTE_IN]); + has_print = 0; + has_print = distribute_print(vty, dist->list, 0, + DISTRIBUTE_V4_IN, has_print); + has_print = distribute_print(vty, dist->prefix, 1, + DISTRIBUTE_V4_IN, has_print); + has_print = distribute_print(vty, dist->list, 0, + DISTRIBUTE_V6_IN, has_print); + has_print = distribute_print(vty, dist->prefix, 1, + DISTRIBUTE_V6_IN, has_print); + if (has_print) vty_out (vty, "%s", VTY_NEWLINE); + else + vty_out(vty, " nothing%s", VTY_NEWLINE); } } return 0; @@ -448,6 +468,8 @@ int config_write_distribute (struct vty *vty) { unsigned int i; + int j; + int output, v6; struct hash_backet *mp; int write = 0; @@ -458,38 +480,27 @@ config_write_distribute (struct vty *vty) dist = mp->data; - if (dist->list[DISTRIBUTE_IN]) - { - vty_out (vty, " distribute-list %s in %s%s", - dist->list[DISTRIBUTE_IN], + for (j = 0; j < DISTRIBUTE_MAX; j++) + if (dist->list[j]) { + output = j == DISTRIBUTE_V4_OUT || j == DISTRIBUTE_V6_OUT; + v6 = j == DISTRIBUTE_V6_IN || j == DISTRIBUTE_V6_OUT; + vty_out (vty, " %sdistribute-list %s %s %s%s", + v6 ? "ipv6 " : "", + dist->list[j], + output ? "out" : "in", dist->ifname ? dist->ifname : "", VTY_NEWLINE); write++; } - if (dist->list[DISTRIBUTE_OUT]) - { - vty_out (vty, " distribute-list %s out %s%s", - - dist->list[DISTRIBUTE_OUT], - dist->ifname ? dist->ifname : "", - VTY_NEWLINE); - write++; - } - - if (dist->prefix[DISTRIBUTE_IN]) - { - vty_out (vty, " distribute-list prefix %s in %s%s", - dist->prefix[DISTRIBUTE_IN], - dist->ifname ? dist->ifname : "", - VTY_NEWLINE); - write++; - } - - if (dist->prefix[DISTRIBUTE_OUT]) - { - vty_out (vty, " distribute-list prefix %s out %s%s", - dist->prefix[DISTRIBUTE_OUT], + for (j = 0; j < DISTRIBUTE_MAX; j++) + if (dist->prefix[j]) { + output = j == DISTRIBUTE_V4_OUT || j == DISTRIBUTE_V6_OUT; + v6 = j == DISTRIBUTE_V6_IN || j == DISTRIBUTE_V6_OUT; + vty_out (vty, " %sdistribute-list prefix %s %s %s%s", + v6 ? "ipv6 " : "", + dist->prefix[j], + output ? "out" : "in", dist->ifname ? dist->ifname : "", VTY_NEWLINE); write++; @@ -514,4 +525,21 @@ distribute_list_init (int node) install_element (node, &distribute_list_cmd); install_element (node, &no_distribute_list_cmd); + + /* install v6 */ + if (node == RIPNG_NODE) { + install_element (node, &ipv6_distribute_list_cmd); + } + + /* TODO: install v4 syntax command for v6 only protocols. */ + /* if (node == RIPNG_NODE) { + * install_element (node, &ipv6_as_v4_distribute_list_all_cmd); + * install_element (node, &no_ipv6_as_v4_distribute_list_all_cmd); + * install_element (node, &ipv6_as_v4_distribute_list_cmd); + * install_element (node, &no_ipv6_as_v4_distribute_list_cmd); + * install_element (node, &ipv6_as_v4_distribute_list_prefix_all_cmd); + * install_element (node, &no_ipv6_as_v4_distribute_list_prefix_all_cmd); + * install_element (node, &ipv6_as_v4_distribute_list_prefix_cmd); + * install_element (node, &no_ipv6_as_v4_distribute_list_prefix_cmd); + }*/ } diff --git a/lib/distribute.h b/lib/distribute.h index a2ffffd5fc..e9625a3548 100644 --- a/lib/distribute.h +++ b/lib/distribute.h @@ -29,8 +29,10 @@ /* Disctirubte list types. */ enum distribute_type { - DISTRIBUTE_IN, - DISTRIBUTE_OUT, + DISTRIBUTE_V4_IN, + DISTRIBUTE_V6_IN, + DISTRIBUTE_V4_OUT, + DISTRIBUTE_V6_OUT, DISTRIBUTE_MAX }; diff --git a/lib/if.c b/lib/if.c index dd8922ee91..6235884e5d 100644 --- a/lib/if.c +++ b/lib/if.c @@ -43,6 +43,8 @@ DEFINE_MTYPE_STATIC(LIB, NBR_CONNECTED, "Neighbor Connected") DEFINE_MTYPE( LIB, CONNECTED_LABEL, "Connected interface label") DEFINE_MTYPE_STATIC(LIB, IF_LINK_PARAMS, "Informational Link Parameters") +DEFINE_QOBJ_TYPE(interface) + /* List of interfaces in only the default VRF */ int ptm_enable = 0; @@ -149,6 +151,8 @@ if_create_vrf (const char *name, int namelen, vrf_id_t vrf_id) /* Enable Link-detection by default */ SET_FLAG(ifp->status, ZEBRA_INTERFACE_LINKDETECTION); + QOBJ_REG (ifp, interface); + if (if_master.if_new_hook) (*if_master.if_new_hook) (ifp); @@ -193,6 +197,8 @@ if_delete_retain (struct interface *ifp) if (if_master.if_delete_hook) (*if_master.if_delete_hook) (ifp); + QOBJ_UNREG (ifp); + /* Free connected address list */ list_delete_all_node (ifp->connected); @@ -678,9 +684,8 @@ DEFUN (interface_desc, "Characters describing this interface\n") { int idx_line = 1; - struct interface *ifp; + VTY_DECLVAR_CONTEXT (interface, ifp); - ifp = vty->index; if (ifp->desc) XFREE (MTYPE_TMP, ifp->desc); ifp->desc = argv_concat(argv, argc, idx_line); @@ -694,9 +699,8 @@ DEFUN (no_interface_desc, NO_STR "Interface specific description\n") { - struct interface *ifp; + VTY_DECLVAR_CONTEXT (interface, ifp); - ifp = vty->index; if (ifp->desc) XFREE (MTYPE_TMP, ifp->desc); ifp->desc = NULL; @@ -786,8 +790,7 @@ DEFUN (interface, vty_out (vty, "%% interface %s not in %s%s", ifname, vrfname, VTY_NEWLINE); return CMD_WARNING; } - vty->index = ifp; - vty->node = INTERFACE_NODE; + VTY_PUSH_CONTEXT_COMPAT (INTERFACE_NODE, ifp); return CMD_SUCCESS; } @@ -852,8 +855,7 @@ DEFUN (vrf, vrfp = vrf_get (VRF_UNKNOWN, vrfname); - vty->index = vrfp; - vty->node = VRF_NODE; + VTY_PUSH_CONTEXT_COMPAT (VRF_NODE, vrfp); return CMD_SUCCESS; } diff --git a/lib/if.h b/lib/if.h index d1875e695a..57062cd3fc 100644 --- a/lib/if.h +++ b/lib/if.h @@ -24,6 +24,7 @@ Boston, MA 02111-1307, USA. */ #include "zebra.h" #include "linklist.h" #include "memory.h" +#include "qobj.h" DECLARE_MTYPE(IF) DECLARE_MTYPE(CONNECTED_LABEL) @@ -267,7 +268,10 @@ struct interface struct route_node *node; vrf_id_t vrf_id; + + QOBJ_FIELDS }; +DECLARE_QOBJ_TYPE(interface) /* Connected address structure. */ struct connected diff --git a/lib/imsg-buffer.c b/lib/imsg-buffer.c new file mode 100644 index 0000000000..a486fc17c1 --- /dev/null +++ b/lib/imsg-buffer.c @@ -0,0 +1,301 @@ +/* $OpenBSD$ */ + +/* + * Copyright (c) 2003, 2004 Henning Brauer + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include + +#include "openbsd-queue.h" +#include "imsg.h" + +int ibuf_realloc(struct ibuf *, size_t); +void ibuf_enqueue(struct msgbuf *, struct ibuf *); +void ibuf_dequeue(struct msgbuf *, struct ibuf *); + +struct ibuf * +ibuf_open(size_t len) +{ + struct ibuf *buf; + + if ((buf = calloc(1, sizeof(struct ibuf))) == NULL) + return (NULL); + if ((buf->buf = malloc(len)) == NULL) { + free(buf); + return (NULL); + } + buf->size = buf->max = len; + buf->fd = -1; + + return (buf); +} + +struct ibuf * +ibuf_dynamic(size_t len, size_t max) +{ + struct ibuf *buf; + + if (max < len) + return (NULL); + + if ((buf = ibuf_open(len)) == NULL) + return (NULL); + + if (max > 0) + buf->max = max; + + return (buf); +} + +int +ibuf_realloc(struct ibuf *buf, size_t len) +{ + u_char *b; + + /* on static buffers max is eq size and so the following fails */ + if (buf->wpos + len > buf->max) { + errno = ERANGE; + return (-1); + } + + b = realloc(buf->buf, buf->wpos + len); + if (b == NULL) + return (-1); + buf->buf = b; + buf->size = buf->wpos + len; + + return (0); +} + +int +ibuf_add(struct ibuf *buf, const void *data, size_t len) +{ + if (buf->wpos + len > buf->size) + if (ibuf_realloc(buf, len) == -1) + return (-1); + + memcpy(buf->buf + buf->wpos, data, len); + buf->wpos += len; + return (0); +} + +void * +ibuf_reserve(struct ibuf *buf, size_t len) +{ + void *b; + + if (buf->wpos + len > buf->size) + if (ibuf_realloc(buf, len) == -1) + return (NULL); + + b = buf->buf + buf->wpos; + buf->wpos += len; + return (b); +} + +void * +ibuf_seek(struct ibuf *buf, size_t pos, size_t len) +{ + /* only allowed to seek in already written parts */ + if (pos + len > buf->wpos) + return (NULL); + + return (buf->buf + pos); +} + +size_t +ibuf_size(struct ibuf *buf) +{ + return (buf->wpos); +} + +size_t +ibuf_left(struct ibuf *buf) +{ + return (buf->max - buf->wpos); +} + +void +ibuf_close(struct msgbuf *msgbuf, struct ibuf *buf) +{ + ibuf_enqueue(msgbuf, buf); +} + +int +ibuf_write(struct msgbuf *msgbuf) +{ + struct iovec iov[IOV_MAX]; + struct ibuf *buf; + unsigned int i = 0; + ssize_t n; + + memset(&iov, 0, sizeof(iov)); + TAILQ_FOREACH(buf, &msgbuf->bufs, entry) { + if (i >= IOV_MAX) + break; + iov[i].iov_base = buf->buf + buf->rpos; + iov[i].iov_len = buf->wpos - buf->rpos; + i++; + } + +again: + if ((n = writev(msgbuf->fd, iov, i)) == -1) { + if (errno == EINTR) + goto again; + if (errno == ENOBUFS) + errno = EAGAIN; + return (-1); + } + + if (n == 0) { /* connection closed */ + errno = 0; + return (0); + } + + msgbuf_drain(msgbuf, n); + + return (1); +} + +void +ibuf_free(struct ibuf *buf) +{ + if (buf == NULL) + return; + free(buf->buf); + free(buf); +} + +void +msgbuf_init(struct msgbuf *msgbuf) +{ + msgbuf->queued = 0; + msgbuf->fd = -1; + TAILQ_INIT(&msgbuf->bufs); +} + +void +msgbuf_drain(struct msgbuf *msgbuf, size_t n) +{ + struct ibuf *buf, *next; + + for (buf = TAILQ_FIRST(&msgbuf->bufs); buf != NULL && n > 0; + buf = next) { + next = TAILQ_NEXT(buf, entry); + if (buf->rpos + n >= buf->wpos) { + n -= buf->wpos - buf->rpos; + ibuf_dequeue(msgbuf, buf); + } else { + buf->rpos += n; + n = 0; + } + } +} + +void +msgbuf_clear(struct msgbuf *msgbuf) +{ + struct ibuf *buf; + + while ((buf = TAILQ_FIRST(&msgbuf->bufs)) != NULL) + ibuf_dequeue(msgbuf, buf); +} + +int +msgbuf_write(struct msgbuf *msgbuf) +{ + struct iovec iov[IOV_MAX]; + struct ibuf *buf; + unsigned int i = 0; + ssize_t n; + struct msghdr msg; + struct cmsghdr *cmsg; + union { + struct cmsghdr hdr; + char buf[CMSG_SPACE(sizeof(int))]; + } cmsgbuf; + + memset(&iov, 0, sizeof(iov)); + memset(&msg, 0, sizeof(msg)); + memset(&cmsgbuf, 0, sizeof(cmsgbuf)); + TAILQ_FOREACH(buf, &msgbuf->bufs, entry) { + if (i >= IOV_MAX) + break; + iov[i].iov_base = buf->buf + buf->rpos; + iov[i].iov_len = buf->wpos - buf->rpos; + i++; + if (buf->fd != -1) + break; + } + + msg.msg_iov = iov; + msg.msg_iovlen = i; + + if (buf != NULL && buf->fd != -1) { + msg.msg_control = (caddr_t)&cmsgbuf.buf; + msg.msg_controllen = sizeof(cmsgbuf.buf); + cmsg = CMSG_FIRSTHDR(&msg); + cmsg->cmsg_len = CMSG_LEN(sizeof(int)); + cmsg->cmsg_level = SOL_SOCKET; + cmsg->cmsg_type = SCM_RIGHTS; + memcpy(CMSG_DATA(cmsg), &buf->fd, sizeof(int)); + } + +again: + if ((n = sendmsg(msgbuf->fd, &msg, 0)) == -1) { + if (errno == EINTR) + goto again; + if (errno == ENOBUFS) + errno = EAGAIN; + return (-1); + } + + if (n == 0) { /* connection closed */ + errno = 0; + return (0); + } + + /* + * assumption: fd got sent if sendmsg sent anything + * this works because fds are passed one at a time + */ + if (buf != NULL && buf->fd != -1) { + close(buf->fd); + buf->fd = -1; + } + + msgbuf_drain(msgbuf, n); + + return (1); +} + +void +ibuf_enqueue(struct msgbuf *msgbuf, struct ibuf *buf) +{ + TAILQ_INSERT_TAIL(&msgbuf->bufs, buf, entry); + msgbuf->queued++; +} + +void +ibuf_dequeue(struct msgbuf *msgbuf, struct ibuf *buf) +{ + TAILQ_REMOVE(&msgbuf->bufs, buf, entry); + + if (buf->fd != -1) + close(buf->fd); + + msgbuf->queued--; + ibuf_free(buf); +} diff --git a/lib/imsg.c b/lib/imsg.c new file mode 100644 index 0000000000..246430cdd5 --- /dev/null +++ b/lib/imsg.c @@ -0,0 +1,334 @@ +/* $OpenBSD$ */ + +/* + * Copyright (c) 2003, 2004 Henning Brauer + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include + +#include "openbsd-queue.h" +#include "imsg.h" + +int imsg_fd_overhead = 0; + +int imsg_get_fd(struct imsgbuf *); + +#ifndef __OpenBSD__ +/* + * The original code calls getdtablecount() which is OpenBSD specific. Use + * available_fds() from OpenSMTPD instead. + */ +static int +available_fds(unsigned int n) +{ + unsigned int i; + int ret, fds[256]; + + if (n > (sizeof(fds)/sizeof(fds[0]))) + return (1); + + ret = 0; + for (i = 0; i < n; i++) { + fds[i] = -1; + if ((fds[i] = socket(AF_INET, SOCK_DGRAM, 0)) < 0) { + if (errno == EAFNOSUPPORT || errno == EPROTONOSUPPORT) + fds[i] = socket(AF_INET6, SOCK_DGRAM, 0); + if (fds[i] < 0) { + ret = 1; + break; + } + } + } + + for (i = 0; i < n && fds[i] >= 0; i++) + close(fds[i]); + + return (ret); +} +#endif + +void +imsg_init(struct imsgbuf *ibuf, int fd) +{ + msgbuf_init(&ibuf->w); + memset(&ibuf->r, 0, sizeof(ibuf->r)); + ibuf->fd = fd; + ibuf->w.fd = fd; + ibuf->pid = getpid(); + TAILQ_INIT(&ibuf->fds); +} + +ssize_t +imsg_read(struct imsgbuf *ibuf) +{ + struct msghdr msg; + struct cmsghdr *cmsg; + union { + struct cmsghdr hdr; + char buf[CMSG_SPACE(sizeof(int) * 1)]; + } cmsgbuf; + struct iovec iov; + ssize_t n = -1; + int fd; + struct imsg_fd *ifd; + + memset(&msg, 0, sizeof(msg)); + memset(&cmsgbuf, 0, sizeof(cmsgbuf)); + + iov.iov_base = ibuf->r.buf + ibuf->r.wpos; + iov.iov_len = sizeof(ibuf->r.buf) - ibuf->r.wpos; + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + msg.msg_control = &cmsgbuf.buf; + msg.msg_controllen = sizeof(cmsgbuf.buf); + + if ((ifd = calloc(1, sizeof(struct imsg_fd))) == NULL) + return (-1); + +again: +#ifdef __OpenBSD__ + if (getdtablecount() + imsg_fd_overhead + + (int)((CMSG_SPACE(sizeof(int))-CMSG_SPACE(0))/sizeof(int)) + >= getdtablesize()) { +#else + if (available_fds(imsg_fd_overhead + + (CMSG_SPACE(sizeof(int))-CMSG_SPACE(0))/sizeof(int))) { +#endif + errno = EAGAIN; + free(ifd); + return (-1); + } + + if ((n = recvmsg(ibuf->fd, &msg, 0)) == -1) { + if (errno == EINTR) + goto again; + goto fail; + } + + ibuf->r.wpos += n; + + for (cmsg = CMSG_FIRSTHDR(&msg); cmsg != NULL; + cmsg = CMSG_NXTHDR(&msg, cmsg)) { + if (cmsg->cmsg_level == SOL_SOCKET && + cmsg->cmsg_type == SCM_RIGHTS) { + int i; + int j; + + /* + * We only accept one file descriptor. Due to C + * padding rules, our control buffer might contain + * more than one fd, and we must close them. + */ + j = ((char *)cmsg + cmsg->cmsg_len - + (char *)CMSG_DATA(cmsg)) / sizeof(int); + for (i = 0; i < j; i++) { + fd = ((int *)CMSG_DATA(cmsg))[i]; + if (ifd != NULL) { + ifd->fd = fd; + TAILQ_INSERT_TAIL(&ibuf->fds, ifd, + entry); + ifd = NULL; + } else + close(fd); + } + } + /* we do not handle other ctl data level */ + } + +fail: + free(ifd); + return (n); +} + +ssize_t +imsg_get(struct imsgbuf *ibuf, struct imsg *imsg) +{ + size_t av, left, datalen; + + av = ibuf->r.wpos; + + if (IMSG_HEADER_SIZE > av) + return (0); + + memcpy(&imsg->hdr, ibuf->r.buf, sizeof(imsg->hdr)); + if (imsg->hdr.len < IMSG_HEADER_SIZE || + imsg->hdr.len > MAX_IMSGSIZE) { + errno = ERANGE; + return (-1); + } + if (imsg->hdr.len > av) + return (0); + datalen = imsg->hdr.len - IMSG_HEADER_SIZE; + ibuf->r.rptr = ibuf->r.buf + IMSG_HEADER_SIZE; + if (datalen == 0) + imsg->data = NULL; + else if ((imsg->data = malloc(datalen)) == NULL) + return (-1); + + if (imsg->hdr.flags & IMSGF_HASFD) + imsg->fd = imsg_get_fd(ibuf); + else + imsg->fd = -1; + + memcpy(imsg->data, ibuf->r.rptr, datalen); + + if (imsg->hdr.len < av) { + left = av - imsg->hdr.len; + memmove(&ibuf->r.buf, ibuf->r.buf + imsg->hdr.len, left); + ibuf->r.wpos = left; + } else + ibuf->r.wpos = 0; + + return (datalen + IMSG_HEADER_SIZE); +} + +int +imsg_compose(struct imsgbuf *ibuf, u_int32_t type, u_int32_t peerid, + pid_t pid, int fd, const void *data, u_int16_t datalen) +{ + struct ibuf *wbuf; + + if ((wbuf = imsg_create(ibuf, type, peerid, pid, datalen)) == NULL) + return (-1); + + if (imsg_add(wbuf, data, datalen) == -1) + return (-1); + + wbuf->fd = fd; + + imsg_close(ibuf, wbuf); + + return (1); +} + +int +imsg_composev(struct imsgbuf *ibuf, u_int32_t type, u_int32_t peerid, + pid_t pid, int fd, const struct iovec *iov, int iovcnt) +{ + struct ibuf *wbuf; + int i, datalen = 0; + + for (i = 0; i < iovcnt; i++) + datalen += iov[i].iov_len; + + if ((wbuf = imsg_create(ibuf, type, peerid, pid, datalen)) == NULL) + return (-1); + + for (i = 0; i < iovcnt; i++) + if (imsg_add(wbuf, iov[i].iov_base, iov[i].iov_len) == -1) + return (-1); + + wbuf->fd = fd; + + imsg_close(ibuf, wbuf); + + return (1); +} + +/* ARGSUSED */ +struct ibuf * +imsg_create(struct imsgbuf *ibuf, u_int32_t type, u_int32_t peerid, + pid_t pid, u_int16_t datalen) +{ + struct ibuf *wbuf; + struct imsg_hdr hdr; + + datalen += IMSG_HEADER_SIZE; + if (datalen > MAX_IMSGSIZE) { + errno = ERANGE; + return (NULL); + } + + hdr.type = type; + hdr.flags = 0; + hdr.peerid = peerid; + if ((hdr.pid = pid) == 0) + hdr.pid = ibuf->pid; + if ((wbuf = ibuf_dynamic(datalen, MAX_IMSGSIZE)) == NULL) { + return (NULL); + } + if (imsg_add(wbuf, &hdr, sizeof(hdr)) == -1) + return (NULL); + + return (wbuf); +} + +int +imsg_add(struct ibuf *msg, const void *data, u_int16_t datalen) +{ + if (datalen) + if (ibuf_add(msg, data, datalen) == -1) { + ibuf_free(msg); + return (-1); + } + return (datalen); +} + +void +imsg_close(struct imsgbuf *ibuf, struct ibuf *msg) +{ + struct imsg_hdr *hdr; + + hdr = (struct imsg_hdr *)msg->buf; + + hdr->flags &= ~IMSGF_HASFD; + if (msg->fd != -1) + hdr->flags |= IMSGF_HASFD; + + hdr->len = (u_int16_t)msg->wpos; + + ibuf_close(&ibuf->w, msg); +} + +void +imsg_free(struct imsg *imsg) +{ + free(imsg->data); +} + +int +imsg_get_fd(struct imsgbuf *ibuf) +{ + int fd; + struct imsg_fd *ifd; + + if ((ifd = TAILQ_FIRST(&ibuf->fds)) == NULL) + return (-1); + + fd = ifd->fd; + TAILQ_REMOVE(&ibuf->fds, ifd, entry); + free(ifd); + + return (fd); +} + +int +imsg_flush(struct imsgbuf *ibuf) +{ + while (ibuf->w.queued) + if (msgbuf_write(&ibuf->w) <= 0) + return (-1); + return (0); +} + +void +imsg_clear(struct imsgbuf *ibuf) +{ + int fd; + + msgbuf_clear(&ibuf->w); + while ((fd = imsg_get_fd(ibuf)) != -1) + close(fd); +} diff --git a/lib/imsg.h b/lib/imsg.h new file mode 100644 index 0000000000..d053d01956 --- /dev/null +++ b/lib/imsg.h @@ -0,0 +1,112 @@ +/* $OpenBSD$ */ + +/* + * Copyright (c) 2006, 2007 Pierre-Yves Ritschard + * Copyright (c) 2006, 2007, 2008 Reyk Floeter + * Copyright (c) 2003, 2004 Henning Brauer + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _IMSG_H_ +#define _IMSG_H_ + +#define IBUF_READ_SIZE 65535 +#define IMSG_HEADER_SIZE sizeof(struct imsg_hdr) +#define MAX_IMSGSIZE 16384 + +struct ibuf { + TAILQ_ENTRY(ibuf) entry; + u_char *buf; + size_t size; + size_t max; + size_t wpos; + size_t rpos; + int fd; +}; + +struct msgbuf { + TAILQ_HEAD(, ibuf) bufs; + u_int32_t queued; + int fd; +}; + +struct ibuf_read { + u_char buf[IBUF_READ_SIZE]; + u_char *rptr; + size_t wpos; +}; + +struct imsg_fd { + TAILQ_ENTRY(imsg_fd) entry; + int fd; +}; + +struct imsgbuf { + TAILQ_HEAD(, imsg_fd) fds; + struct ibuf_read r; + struct msgbuf w; + int fd; + pid_t pid; +}; + +#define IMSGF_HASFD 1 + +struct imsg_hdr { + u_int32_t type; + u_int16_t len; + u_int16_t flags; + u_int32_t peerid; + u_int32_t pid; +}; + +struct imsg { + struct imsg_hdr hdr; + int fd; + void *data; +}; + + +/* buffer.c */ +struct ibuf *ibuf_open(size_t); +struct ibuf *ibuf_dynamic(size_t, size_t); +int ibuf_add(struct ibuf *, const void *, size_t); +void *ibuf_reserve(struct ibuf *, size_t); +void *ibuf_seek(struct ibuf *, size_t, size_t); +size_t ibuf_size(struct ibuf *); +size_t ibuf_left(struct ibuf *); +void ibuf_close(struct msgbuf *, struct ibuf *); +int ibuf_write(struct msgbuf *); +void ibuf_free(struct ibuf *); +void msgbuf_init(struct msgbuf *); +void msgbuf_clear(struct msgbuf *); +int msgbuf_write(struct msgbuf *); +void msgbuf_drain(struct msgbuf *, size_t); + +/* imsg.c */ +void imsg_init(struct imsgbuf *, int); +ssize_t imsg_read(struct imsgbuf *); +ssize_t imsg_get(struct imsgbuf *, struct imsg *); +int imsg_compose(struct imsgbuf *, u_int32_t, u_int32_t, pid_t, + int, const void *, u_int16_t); +int imsg_composev(struct imsgbuf *, u_int32_t, u_int32_t, pid_t, + int, const struct iovec *, int); +struct ibuf *imsg_create(struct imsgbuf *, u_int32_t, u_int32_t, pid_t, + u_int16_t); +int imsg_add(struct ibuf *, const void *, u_int16_t); +void imsg_close(struct imsgbuf *, struct ibuf *); +void imsg_free(struct imsg *); +int imsg_flush(struct imsgbuf *); +void imsg_clear(struct imsgbuf *); + +#endif diff --git a/lib/json.h b/lib/json.h index 3fcfe340e9..e3d73d9d88 100644 --- a/lib/json.h +++ b/lib/json.h @@ -26,6 +26,12 @@ #include #else #include + +/* + * json_object_to_json_string_ext is only available for json-c + * so let's just turn it back to the original usage. + */ +#define json_object_to_json_string_ext(A, B) json_object_to_json_string (A) #endif extern int use_json(const int argc, struct cmd_token *argv[]); diff --git a/lib/keychain.c b/lib/keychain.c index c2d6e45714..f8a3ffc012 100644 --- a/lib/keychain.c +++ b/lib/keychain.c @@ -28,30 +28,40 @@ Boston, MA 02111-1307, USA. */ DEFINE_MTYPE_STATIC(LIB, KEY, "Key") DEFINE_MTYPE_STATIC(LIB, KEYCHAIN, "Key chain") +DEFINE_QOBJ_TYPE(keychain) +DEFINE_QOBJ_TYPE(key) + /* Master list of key chain. */ struct list *keychain_list; static struct keychain * keychain_new (void) { - return XCALLOC (MTYPE_KEYCHAIN, sizeof (struct keychain)); + struct keychain *keychain; + keychain = XCALLOC (MTYPE_KEYCHAIN, sizeof (struct keychain)); + QOBJ_REG (keychain, keychain); + return keychain; } static void keychain_free (struct keychain *keychain) { + QOBJ_UNREG (keychain); XFREE (MTYPE_KEYCHAIN, keychain); } static struct key * key_new (void) { - return XCALLOC (MTYPE_KEY, sizeof (struct key)); + struct key *key = XCALLOC (MTYPE_KEY, sizeof (struct key)); + QOBJ_REG (key, key); + return key; } static void key_free (struct key *key) { + QOBJ_UNREG (key); XFREE (MTYPE_KEY, key); } @@ -241,8 +251,7 @@ DEFUN (key_chain, struct keychain *keychain; keychain = keychain_get (argv[idx_word]->arg); - vty->index = keychain; - vty->node = KEYCHAIN_NODE; + VTY_PUSH_CONTEXT_COMPAT (KEYCHAIN_NODE, keychain); return CMD_SUCCESS; } @@ -278,16 +287,13 @@ DEFUN (key, "Key identifier number\n") { int idx_number = 1; - struct keychain *keychain; + VTY_DECLVAR_CONTEXT (keychain, keychain); struct key *key; u_int32_t index; - keychain = vty->index; - VTY_GET_INTEGER ("key identifier", index, argv[idx_number]->arg); key = key_get (keychain, index); - vty->index_sub = key; - vty->node = KEYCHAIN_KEY_NODE; + VTY_PUSH_CONTEXT_SUB (KEYCHAIN_KEY_NODE, key); return CMD_SUCCESS; } @@ -300,12 +306,10 @@ DEFUN (no_key, "Key identifier number\n") { int idx_number = 2; - struct keychain *keychain; + VTY_DECLVAR_CONTEXT (keychain, keychain); struct key *key; u_int32_t index; - keychain = vty->index; - VTY_GET_INTEGER ("key identifier", index, argv[idx_number]->arg); key = key_lookup (keychain, index); if (! key) @@ -328,9 +332,7 @@ DEFUN (key_string, "The key\n") { int idx_line = 1; - struct key *key; - - key = vty->index_sub; + VTY_DECLVAR_CONTEXT_SUB (key, key); if (key->string) XFREE(MTYPE_KEY, key->string); @@ -346,9 +348,7 @@ DEFUN (no_key_string, "Unset key string\n" "The key\n") { - struct key *key; - - key = vty->index_sub; + VTY_DECLVAR_CONTEXT_SUB (key, key); if (key->string) { @@ -565,9 +565,7 @@ DEFUN (accept_lifetime_day_month_day_month, int idx_number_3 = 6; int idx_month_2 = 7; int idx_number_4 = 8; - struct key *key; - - key = vty->index_sub; + VTY_DECLVAR_CONTEXT_SUB (key, key); return key_lifetime_set (vty, &key->accept, argv[idx_hhmmss]->arg, argv[idx_number]->arg, argv[idx_month]->arg, argv[idx_number_2]->arg, argv[idx_hhmmss_2]->arg, argv[idx_number_3]->arg, argv[idx_month_2]->arg, argv[idx_number_4]->arg); @@ -594,9 +592,7 @@ DEFUN (accept_lifetime_day_month_month_day, int idx_month_2 = 6; int idx_number_3 = 7; int idx_number_4 = 8; - struct key *key; - - key = vty->index_sub; + VTY_DECLVAR_CONTEXT_SUB (key, key); return key_lifetime_set (vty, &key->accept, argv[idx_hhmmss]->arg, argv[idx_number]->arg, argv[idx_month]->arg, argv[idx_number_2]->arg, argv[idx_hhmmss_2]->arg, argv[idx_number_3]->arg, argv[idx_month_2]->arg, argv[idx_number_4]->arg); @@ -623,9 +619,7 @@ DEFUN (accept_lifetime_month_day_day_month, int idx_number_3 = 6; int idx_month_2 = 7; int idx_number_4 = 8; - struct key *key; - - key = vty->index_sub; + VTY_DECLVAR_CONTEXT_SUB (key, key); return key_lifetime_set (vty, &key->accept, argv[idx_hhmmss]->arg, argv[idx_number]->arg, argv[idx_month]->arg, argv[idx_number_2]->arg, argv[idx_hhmmss_2]->arg, argv[idx_number_3]->arg, argv[idx_month_2]->arg, argv[idx_number_4]->arg); @@ -652,9 +646,7 @@ DEFUN (accept_lifetime_month_day_month_day, int idx_month_2 = 6; int idx_number_3 = 7; int idx_number_4 = 8; - struct key *key; - - key = vty->index_sub; + VTY_DECLVAR_CONTEXT_SUB (key, key); return key_lifetime_set (vty, &key->accept, argv[idx_hhmmss]->arg, argv[idx_number]->arg, argv[idx_month]->arg, argv[idx_number_2]->arg, argv[idx_hhmmss_2]->arg, argv[idx_number_3]->arg, argv[idx_month_2]->arg, argv[idx_number_4]->arg); @@ -674,9 +666,7 @@ DEFUN (accept_lifetime_infinite_day_month, int idx_number = 2; int idx_month = 3; int idx_number_2 = 4; - struct key *key; - - key = vty->index_sub; + VTY_DECLVAR_CONTEXT_SUB (key, key); return key_lifetime_infinite_set (vty, &key->accept, argv[idx_hhmmss]->arg, argv[idx_number]->arg, argv[idx_month]->arg, argv[idx_number_2]->arg); @@ -696,9 +686,7 @@ DEFUN (accept_lifetime_infinite_month_day, int idx_month = 2; int idx_number = 3; int idx_number_2 = 4; - struct key *key; - - key = vty->index_sub; + VTY_DECLVAR_CONTEXT_SUB (key, key); return key_lifetime_infinite_set (vty, &key->accept, argv[idx_hhmmss]->arg, argv[idx_number]->arg, argv[idx_month]->arg, argv[idx_number_2]->arg); @@ -720,9 +708,7 @@ DEFUN (accept_lifetime_duration_day_month, int idx_month = 3; int idx_number_2 = 4; int idx_number_3 = 6; - struct key *key; - - key = vty->index_sub; + VTY_DECLVAR_CONTEXT_SUB (key, key); return key_lifetime_duration_set (vty, &key->accept, argv[idx_hhmmss]->arg, argv[idx_number]->arg, argv[idx_month]->arg, argv[idx_number_2]->arg, argv[idx_number_3]->arg); @@ -744,9 +730,7 @@ DEFUN (accept_lifetime_duration_month_day, int idx_number = 3; int idx_number_2 = 4; int idx_number_3 = 6; - struct key *key; - - key = vty->index_sub; + VTY_DECLVAR_CONTEXT_SUB (key, key); return key_lifetime_duration_set (vty, &key->accept, argv[idx_hhmmss]->arg, argv[idx_number]->arg, argv[idx_month]->arg, argv[idx_number_2]->arg, argv[idx_number_3]->arg); @@ -773,9 +757,7 @@ DEFUN (send_lifetime_day_month_day_month, int idx_number_3 = 6; int idx_month_2 = 7; int idx_number_4 = 8; - struct key *key; - - key = vty->index_sub; + VTY_DECLVAR_CONTEXT_SUB (key, key); return key_lifetime_set (vty, &key->send, argv[idx_hhmmss]->arg, argv[idx_number]->arg, argv[idx_month]->arg, argv[idx_number_2]->arg, argv[idx_hhmmss_2]->arg, argv[idx_number_3]->arg, argv[idx_month_2]->arg, argv[idx_number_4]->arg); @@ -802,9 +784,7 @@ DEFUN (send_lifetime_day_month_month_day, int idx_month_2 = 6; int idx_number_3 = 7; int idx_number_4 = 8; - struct key *key; - - key = vty->index_sub; + VTY_DECLVAR_CONTEXT_SUB (key, key); return key_lifetime_set (vty, &key->send, argv[idx_hhmmss]->arg, argv[idx_number]->arg, argv[idx_month]->arg, argv[idx_number_2]->arg, argv[idx_hhmmss_2]->arg, argv[idx_number_3]->arg, argv[idx_month_2]->arg, argv[idx_number_4]->arg); @@ -831,9 +811,7 @@ DEFUN (send_lifetime_month_day_day_month, int idx_number_3 = 6; int idx_month_2 = 7; int idx_number_4 = 8; - struct key *key; - - key = vty->index_sub; + VTY_DECLVAR_CONTEXT_SUB (key, key); return key_lifetime_set (vty, &key->send, argv[idx_hhmmss]->arg, argv[idx_number]->arg, argv[idx_month]->arg, argv[idx_number_2]->arg, argv[idx_hhmmss_2]->arg, argv[idx_number_3]->arg, argv[idx_month_2]->arg, argv[idx_number_4]->arg); @@ -860,9 +838,7 @@ DEFUN (send_lifetime_month_day_month_day, int idx_month_2 = 6; int idx_number_3 = 7; int idx_number_4 = 8; - struct key *key; - - key = vty->index_sub; + VTY_DECLVAR_CONTEXT_SUB (key, key); return key_lifetime_set (vty, &key->send, argv[idx_hhmmss]->arg, argv[idx_number]->arg, argv[idx_month]->arg, argv[idx_number_2]->arg, argv[idx_hhmmss_2]->arg, argv[idx_number_3]->arg, argv[idx_month_2]->arg, argv[idx_number_4]->arg); @@ -882,9 +858,7 @@ DEFUN (send_lifetime_infinite_day_month, int idx_number = 2; int idx_month = 3; int idx_number_2 = 4; - struct key *key; - - key = vty->index_sub; + VTY_DECLVAR_CONTEXT_SUB (key, key); return key_lifetime_infinite_set (vty, &key->send, argv[idx_hhmmss]->arg, argv[idx_number]->arg, argv[idx_month]->arg, argv[idx_number_2]->arg); @@ -904,9 +878,7 @@ DEFUN (send_lifetime_infinite_month_day, int idx_month = 2; int idx_number = 3; int idx_number_2 = 4; - struct key *key; - - key = vty->index_sub; + VTY_DECLVAR_CONTEXT_SUB (key, key); return key_lifetime_infinite_set (vty, &key->send, argv[idx_hhmmss]->arg, argv[idx_number]->arg, argv[idx_month]->arg, argv[idx_number_2]->arg); @@ -928,9 +900,7 @@ DEFUN (send_lifetime_duration_day_month, int idx_month = 3; int idx_number_2 = 4; int idx_number_3 = 6; - struct key *key; - - key = vty->index_sub; + VTY_DECLVAR_CONTEXT_SUB (key, key); return key_lifetime_duration_set (vty, &key->send, argv[idx_hhmmss]->arg, argv[idx_number]->arg, argv[idx_month]->arg, argv[idx_number_2]->arg, argv[idx_number_3]->arg); @@ -952,9 +922,7 @@ DEFUN (send_lifetime_duration_month_day, int idx_number = 3; int idx_number_2 = 4; int idx_number_3 = 6; - struct key *key; - - key = vty->index_sub; + VTY_DECLVAR_CONTEXT_SUB (key, key); return key_lifetime_duration_set (vty, &key->send, argv[idx_hhmmss]->arg, argv[idx_number]->arg, argv[idx_month]->arg, argv[idx_number_2]->arg, argv[idx_number_3]->arg); diff --git a/lib/keychain.h b/lib/keychain.h index f962864c5b..d3f9168a0f 100644 --- a/lib/keychain.h +++ b/lib/keychain.h @@ -22,12 +22,17 @@ #ifndef _ZEBRA_KEYCHAIN_H #define _ZEBRA_KEYCHAIN_H +#include "qobj.h" + struct keychain { char *name; struct list *key; + + QOBJ_FIELDS }; +DECLARE_QOBJ_TYPE(keychain) struct key_range { @@ -45,7 +50,10 @@ struct key struct key_range send; struct key_range accept; + + QOBJ_FIELDS }; +DECLARE_QOBJ_TYPE(key) extern void keychain_init (void); extern struct keychain *keychain_lookup (const char *); diff --git a/lib/log.c b/lib/log.c index b9edb12fbd..cd1f0bb771 100644 --- a/lib/log.c +++ b/lib/log.c @@ -41,6 +41,10 @@ static int logfile_fd = -1; /* Used in signal handler. */ struct zlog *zlog_default = NULL; +/* + * This must be kept in the same order as the + * zlog_proto_t enum + */ const char *zlog_proto_names[] = { "NONE", @@ -51,9 +55,10 @@ const char *zlog_proto_names[] = "OSPF", "RIPNG", "OSPF6", + "LDP", "ISIS", "PIM", - "MASC", + "RFP", NULL, }; @@ -177,7 +182,7 @@ time_print(FILE *fp, struct timestamp_control *ctl) /* va_list version of zlog. */ -static void +void vzlog (struct zlog *zl, int priority, const char *format, va_list args) { char proto_str[32]; @@ -257,6 +262,44 @@ vzlog (struct zlog *zl, int priority, const char *format, va_list args) errno = original_errno; } +int +vzlog_test (struct zlog *zl, int priority) +{ + /* If zlog is not specified, use default one. */ + if (zl == NULL) + zl = zlog_default; + + /* When zlog_default is also NULL, use stderr for logging. */ + if (zl == NULL) + { + return 1; + } + + /* Syslog output */ + if (priority <= zl->maxlvl[ZLOG_DEST_SYSLOG]) + { + return 1; + } + + /* File output. */ + if ((priority <= zl->maxlvl[ZLOG_DEST_FILE]) && zl->fp) + { + return 1; + } + + /* stdout output. */ + if (priority <= zl->maxlvl[ZLOG_DEST_STDOUT]) + { + return 1; + } + + /* Terminal monitor. */ + if (priority <= zl->maxlvl[ZLOG_DEST_MONITOR]) + return 1; + + return 0; +} + static char * str_append(char *dst, int len, const char *src) { @@ -679,6 +722,7 @@ _zlog_assert_failed (const char *assertion, const char *file, assertion,file,line,(function ? function : "?")); zlog_backtrace(LOG_CRIT); zlog_thread_info(LOG_CRIT); + log_memstats_stderr ("log"); abort(); } @@ -938,6 +982,12 @@ static const struct zebra_desc_table command_types[] = { DESC_ENTRY (ZEBRA_INTERFACE_ENABLE_RADV), DESC_ENTRY (ZEBRA_INTERFACE_DISABLE_RADV), DESC_ENTRY (ZEBRA_IPV4_NEXTHOP_LOOKUP_MRIB), + DESC_ENTRY (ZEBRA_MPLS_LABELS_ADD), + DESC_ENTRY (ZEBRA_MPLS_LABELS_DELETE), + DESC_ENTRY (ZEBRA_IPV4_NEXTHOP_ADD), + DESC_ENTRY (ZEBRA_IPV4_NEXTHOP_DELETE), + DESC_ENTRY (ZEBRA_IPV6_NEXTHOP_ADD), + DESC_ENTRY (ZEBRA_IPV6_NEXTHOP_DELETE), }; #undef DESC_ENTRY @@ -1026,6 +1076,10 @@ proto_redistnum(int afi, const char *s) return ZEBRA_ROUTE_BGP; else if (strncmp (s, "ta", 2) == 0) return ZEBRA_ROUTE_TABLE; + else if (strncmp (s, "v", 1) == 0) + return ZEBRA_ROUTE_VNC; + else if (strncmp (s, "vd", 1) == 0) + return ZEBRA_ROUTE_VNC_DIRECT; } if (afi == AFI_IP6) { @@ -1045,6 +1099,10 @@ proto_redistnum(int afi, const char *s) return ZEBRA_ROUTE_BGP; else if (strncmp (s, "ta", 2) == 0) return ZEBRA_ROUTE_TABLE; + else if (strncmp (s, "v", 1) == 0) + return ZEBRA_ROUTE_VNC; + else if (strncmp (s, "vd", 1) == 0) + return ZEBRA_ROUTE_VNC_DIRECT; } return -1; } diff --git a/lib/log.h b/lib/log.h index b5edc75f16..cd4cd1495a 100644 --- a/lib/log.h +++ b/lib/log.h @@ -41,6 +41,10 @@ * please use LOG_ERR instead. */ +/* + * This must be kept in the same order as + * zlog_proto_names[] + */ typedef enum { ZLOG_NONE, @@ -51,9 +55,10 @@ typedef enum ZLOG_OSPF, ZLOG_RIPNG, ZLOG_OSPF6, + ZLOG_LDP, ZLOG_ISIS, ZLOG_PIM, - ZLOG_MASC + ZLOG_RFP, } zlog_proto_t; /* If maxlvl is set to ZLOG_DISABLED, then no messages will be sent @@ -115,12 +120,15 @@ extern void zlog (struct zlog *zl, int priority, const char *format, ...) PRINTF_ATTRIBUTE(3, 4); /* Handy zlog functions. */ +extern void vzlog (struct zlog *zl, int priority, const char *format, va_list args); extern void zlog_err (const char *format, ...) PRINTF_ATTRIBUTE(1, 2); extern void zlog_warn (const char *format, ...) PRINTF_ATTRIBUTE(1, 2); extern void zlog_info (const char *format, ...) PRINTF_ATTRIBUTE(1, 2); extern void zlog_notice (const char *format, ...) PRINTF_ATTRIBUTE(1, 2); extern void zlog_debug (const char *format, ...) PRINTF_ATTRIBUTE(1, 2); +extern void vzlog (struct zlog *, int , const char *, va_list ); + extern void zlog_thread_info (int log_level); /* Set logging level for the given destination. If the log_level @@ -180,6 +188,10 @@ extern size_t quagga_timestamp(int timestamp_precision /* # subsecond digits */, extern void zlog_hexdump(const void *mem, unsigned int len); + +extern int +vzlog_test (struct zlog *zl, int priority); + /* structure useful for avoiding repeated rendering of the same timestamp */ struct timestamp_control { size_t len; /* length of rendered timestamp */ diff --git a/lib/memory.c b/lib/memory.c index 38e424da7d..99b191c2be 100644 --- a/lib/memory.c +++ b/lib/memory.c @@ -134,7 +134,7 @@ qmem_exit_walker (void *arg, struct memgroup *mg, struct memtype *mt) char size[32]; eda->error++; snprintf (size, sizeof (size), "%10zu", mt->size); - fprintf (stderr, "%s: %-30s: %6zu * %s\n", + fprintf (stderr, "%s: memstats: %-30s: %6zu * %s\n", eda->prefix, mt->name, mt->n_alloc, mt->size == SIZE_VAR ? "(variably sized)" : size); } diff --git a/lib/memory_vty.c b/lib/memory_vty.c index e4cb295cf0..ff0363d45a 100644 --- a/lib/memory_vty.c +++ b/lib/memory_vty.c @@ -82,13 +82,15 @@ static int qmem_walker(void *arg, struct memgroup *mg, struct memtype *mt) if (!mt) vty_out (vty, "--- qmem %s ---%s", mg->name, VTY_NEWLINE); else { - char size[32]; - snprintf(size, sizeof(size), "%6zu", mt->size); - vty_out (vty, "%-30s: %10zu %s%s", - mt->name, mt->n_alloc, - mt->size == 0 ? "" : - mt->size == SIZE_VAR ? "(variably sized)" : - size, VTY_NEWLINE); + if (mt->n_alloc != 0) { + char size[32]; + snprintf(size, sizeof(size), "%6zu", mt->size); + vty_out (vty, "%-30s: %10zu %s%s", + mt->name, mt->n_alloc, + mt->size == 0 ? "" : + mt->size == SIZE_VAR ? "(variably sized)" : + size, VTY_NEWLINE); + } } return 0; } @@ -111,11 +113,7 @@ DEFUN (show_memory, void memory_init (void) { - install_element (RESTRICTED_NODE, &show_memory_cmd); - install_element (VIEW_NODE, &show_memory_cmd); - - install_element (ENABLE_NODE, &show_memory_cmd); } /* Stats querying from users */ diff --git a/lib/mpls.h b/lib/mpls.h new file mode 100644 index 0000000000..1f77aaa536 --- /dev/null +++ b/lib/mpls.h @@ -0,0 +1,190 @@ +/* + * MPLS definitions + * Copyright 2015 Cumulus Networks, Inc. + * + * This file is part of GNU Zebra. + * + * GNU Zebra is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2, or (at your + * option) any later version. + * + * GNU Zebra is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU Zebra; see the file COPYING. If not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 02111-1307, USA. + */ + +#ifndef _QUAGGA_MPLS_H +#define _QUAGGA_MPLS_H + +/* Well-known MPLS label values (RFC 3032 etc). */ +#define MPLS_V4_EXP_NULL_LABEL 0 +#define MPLS_RA_LABEL 1 +#define MPLS_V6_EXP_NULL_LABEL 2 +#define MPLS_IMP_NULL_LABEL 3 +#define MPLS_ENTROPY_LABEL_INDICATOR 7 +#define MPLS_GAL_LABEL 13 +#define MPLS_OAM_ALERT_LABEL 14 +#define MPLS_EXTENSION_LABEL 15 + +/* Minimum and maximum label values */ +#define MPLS_MIN_RESERVED_LABEL 0 +#define MPLS_MAX_RESERVED_LABEL 15 +#define MPLS_MIN_UNRESERVED_LABEL 16 +#define MPLS_MAX_UNRESERVED_LABEL 1048575 + +#define IS_MPLS_RESERVED_LABEL(label) \ + (label >= MPLS_MIN_RESERVED_LABEL && label <= MPLS_MAX_RESERVED_LABEL) + +#define IS_MPLS_UNRESERVED_LABEL(label) \ + (label >= MPLS_MIN_UNRESERVED_LABEL && label <= MPLS_MAX_UNRESERVED_LABEL) + +/* Definitions for a MPLS label stack entry (RFC 3032). This encodes the + * label, EXP, BOS and TTL fields. + */ +typedef unsigned int mpls_lse_t; + +#define MPLS_LS_LABEL_MASK 0xFFFFF000 +#define MPLS_LS_LABEL_SHIFT 12 +#define MPLS_LS_EXP_MASK 0x00000E00 +#define MPLS_LS_EXP_SHIFT 9 +#define MPLS_LS_S_MASK 0x00000100 +#define MPLS_LS_S_SHIFT 8 +#define MPLS_LS_TTL_MASK 0x000000FF +#define MPLS_LS_TTL_SHIFT 0 + +#define MPLS_LABEL_VALUE(lse) \ + ((lse & MPLS_LS_LABEL_MASK) >> MPLS_LS_LABEL_SHIFT) +#define MPLS_LABEL_EXP(lse) \ + ((lse & MPLS_LS_EXP_MASK) >> MPLS_LS_EXP_SHIFT) +#define MPLS_LABEL_BOS(lse) \ + ((lse & MPLS_LS_S_MASK) >> MPLS_LS_S_SHIFT) +#define MPLS_LABEL_TTL(lse) \ + ((lse & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT) + +#define IS_MPLS_LABEL_BOS(ls) (MPLS_LABEL_BOS(ls) == 1) + +#define MPLS_LABEL_LEN_BITS 20 + +/* MPLS label value as a 32-bit (mostly we only care about the label value). */ +typedef unsigned int mpls_label_t; + +#define MPLS_NO_LABEL 0xFFFFFFFF +#define MPLS_INVALID_LABEL 0xFFFFFFFF + +/* LSP types. */ +enum lsp_types_t +{ + ZEBRA_LSP_NONE = 0, /* No LSP. */ + ZEBRA_LSP_STATIC = 1, /* Static LSP. */ + ZEBRA_LSP_LDP = 2 /* LDP LSP. */ +}; + +/* Functions for basic label operations. */ + +/* Encode a label stack entry from fields; convert to network byte-order as + * the Netlink interface expects MPLS labels to be in this format. + */ +static inline mpls_lse_t +mpls_lse_encode (mpls_label_t label, u_int32_t ttl, + u_int32_t exp, u_int32_t bos) +{ + mpls_lse_t lse; + lse = htonl ((label << MPLS_LS_LABEL_SHIFT) | + (exp << MPLS_LS_EXP_SHIFT) | + (bos ? (1 << MPLS_LS_S_SHIFT) : 0) | + (ttl << MPLS_LS_TTL_SHIFT)); + return lse; +} + +/* Extract the fields from a label stack entry after converting to host-byte + * order. This is expected to be called only for messages received over the + * Netlink interface. + */ +static inline void +mpls_lse_decode (mpls_lse_t lse, mpls_label_t *label, + u_int32_t *ttl, u_int32_t *exp, u_int32_t *bos) +{ + mpls_lse_t local_lse; + + local_lse = ntohl (lse); + *label = MPLS_LABEL_VALUE(local_lse); + *exp = MPLS_LABEL_EXP(local_lse); + *bos = MPLS_LABEL_BOS(local_lse); + *ttl = MPLS_LABEL_TTL(local_lse); +} + + +/* Printable string for labels (with consideration for reserved values). */ +static inline char * +label2str (mpls_label_t label, char *buf, int len) +{ + switch(label) { + case MPLS_V4_EXP_NULL_LABEL: + strncpy(buf, "IPv4 Explicit Null", len); + return(buf); + break; + case MPLS_RA_LABEL: + strncpy(buf, "Router Alert", len); + return(buf); + break; + case MPLS_V6_EXP_NULL_LABEL: + strncpy(buf, "IPv6 Explict Null", len); + return(buf); + break; + case MPLS_IMP_NULL_LABEL: + strncpy(buf, "implicit-null", len); + return(buf); + break; + case MPLS_ENTROPY_LABEL_INDICATOR: + strncpy(buf, "Entropy Label Indicator", len); + return(buf); + break; + case MPLS_GAL_LABEL: + strncpy(buf, "Generic Associated Channel", len); + return(buf); + break; + case MPLS_OAM_ALERT_LABEL: + strncpy(buf, "OAM Alert", len); + return(buf); + break; + case MPLS_EXTENSION_LABEL: + strncpy(buf, "Extension", len); + return(buf); + break; + case 4: + case 5: + case 6: + case 8: + case 9: + case 10: + case 11: + case 12: + strncpy(buf, "Reserved", len); + return(buf); + break; + default: + sprintf(buf, "%u", label); + return(buf); + } + + strncpy(buf, "Error", len); + return(buf); +} + +/* constants used by ldpd */ +#define MPLS_LABEL_IPV4NULL 0 /* IPv4 Explicit NULL Label */ +#define MPLS_LABEL_RTALERT 1 /* Router Alert Label */ +#define MPLS_LABEL_IPV6NULL 2 /* IPv6 Explicit NULL Label */ +#define MPLS_LABEL_IMPLNULL 3 /* Implicit NULL Label */ +/* MPLS_LABEL_RESERVED 4-15 */ /* Values 4-15 are reserved */ +#define MPLS_LABEL_RESERVED_MAX 15 +#define MPLS_LABEL_MAX ((1 << 20) - 1) + +#endif diff --git a/lib/nexthop.c b/lib/nexthop.c index 427f77f87a..23ee28b7dc 100644 --- a/lib/nexthop.c +++ b/lib/nexthop.c @@ -32,8 +32,10 @@ #include "thread.h" #include "prefix.h" #include "nexthop.h" +#include "mpls.h" -DEFINE_MTYPE_STATIC(LIB, NEXTHOP, "Nexthop") +DEFINE_MTYPE_STATIC(LIB, NEXTHOP, "Nexthop") +DEFINE_MTYPE_STATIC(LIB, NH_LABEL, "Nexthop label") /* check if nexthops are same, non-recursive */ int @@ -127,6 +129,9 @@ copy_nexthops (struct nexthop **tnh, struct nexthop *nh) nexthop->ifindex = nh->ifindex; memcpy(&(nexthop->gate), &(nh->gate), sizeof(union g_addr)); memcpy(&(nexthop->src), &(nh->src), sizeof(union g_addr)); + if (nh->nh_label) + nexthop_add_labels (nexthop, nh->nh_label_type, + nh->nh_label->num_labels, &nh->nh_label->label[0]); nexthop_add(tnh, nexthop); if (CHECK_FLAG(nh1->flags, NEXTHOP_FLAG_RECURSIVE)) @@ -138,6 +143,7 @@ copy_nexthops (struct nexthop **tnh, struct nexthop *nh) void nexthop_free (struct nexthop *nexthop) { + nexthop_del_labels (nexthop); if (nexthop->resolved) nexthops_free(nexthop->resolved); XFREE (MTYPE_NEXTHOP, nexthop); @@ -156,6 +162,34 @@ nexthops_free (struct nexthop *nexthop) } } +/* Update nexthop with label information. */ +void +nexthop_add_labels (struct nexthop *nexthop, enum lsp_types_t type, + u_int8_t num_labels, mpls_label_t *label) +{ + struct nexthop_label *nh_label; + int i; + + nexthop->nh_label_type = type; + nh_label = XCALLOC (MTYPE_NH_LABEL, sizeof (struct nexthop_label) + + num_labels * sizeof (mpls_label_t)); + nh_label->num_labels = num_labels; + for (i = 0; i < num_labels; i++) + nh_label->label[i] = *(label + i); + nexthop->nh_label = nh_label; +} + +/* Free label information of nexthop, if present. */ +void +nexthop_del_labels (struct nexthop *nexthop) +{ + if (nexthop->nh_label) + { + XFREE (MTYPE_NH_LABEL, nexthop->nh_label); + nexthop->nh_label_type = ZEBRA_LSP_NONE; + } +} + const char * nexthop2str (struct nexthop *nexthop, char *str, int size) { diff --git a/lib/nexthop.h b/lib/nexthop.h index 39e8b5425f..e66e0eee20 100644 --- a/lib/nexthop.h +++ b/lib/nexthop.h @@ -25,6 +25,7 @@ #define _LIB_NEXTHOP_H #include "prefix.h" +#include "mpls.h" /* Maximum next hop string length - gateway + ifindex */ #define NEXTHOP_STRLEN (INET6_ADDRSTRLEN + 30) @@ -44,6 +45,14 @@ enum nexthop_types_t NEXTHOP_TYPE_BLACKHOLE, /* Null0 nexthop. */ }; +/* Nexthop label structure. */ +struct nexthop_label +{ + u_int8_t num_labels; + u_int8_t reserved[3]; + mpls_label_t label[0]; /* 1 or more labels. */ +}; + /* Nexthop structure. */ struct nexthop { @@ -75,6 +84,12 @@ struct nexthop * obtained by recursive resolution will be added to `resolved'. * Only one level of recursive resolution is currently supported. */ struct nexthop *resolved; + + /* Type of label(s), if any */ + enum lsp_types_t nh_label_type; + + /* Label(s) associated with this nexthop. */ + struct nexthop_label *nh_label; }; extern int zebra_rnh_ip_default_route; @@ -97,6 +112,9 @@ void copy_nexthops (struct nexthop **tnh, struct nexthop *nh); void nexthop_free (struct nexthop *nexthop); void nexthops_free (struct nexthop *nexthop); +void nexthop_add_labels (struct nexthop *, enum lsp_types_t, u_int8_t, mpls_label_t *); +void nexthop_del_labels (struct nexthop *); + extern const char *nexthop_type_to_str (enum nexthop_types_t nh_type); extern int nexthop_same_no_recurse (struct nexthop *next1, struct nexthop *next2); diff --git a/lib/openbsd-queue.h b/lib/openbsd-queue.h new file mode 100644 index 0000000000..5e81fdd13d --- /dev/null +++ b/lib/openbsd-queue.h @@ -0,0 +1,533 @@ +/* $OpenBSD: queue.h,v 1.43 2015/12/28 19:38:40 millert Exp $ */ +/* $NetBSD: queue.h,v 1.11 1996/05/16 05:17:14 mycroft Exp $ */ + +/* + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)queue.h 8.5 (Berkeley) 8/20/94 + */ + +#ifndef _SYS_QUEUE_H_ +#define _SYS_QUEUE_H_ + +/* + * This file defines five types of data structures: singly-linked lists, + * lists, simple queues, tail queues and XOR simple queues. + * + * + * A singly-linked list is headed by a single forward pointer. The elements + * are singly linked for minimum space and pointer manipulation overhead at + * the expense of O(n) removal for arbitrary elements. New elements can be + * added to the list after an existing element or at the head of the list. + * Elements being removed from the head of the list should use the explicit + * macro for this purpose for optimum efficiency. A singly-linked list may + * only be traversed in the forward direction. Singly-linked lists are ideal + * for applications with large datasets and few or no removals or for + * implementing a LIFO queue. + * + * A list is headed by a single forward pointer (or an array of forward + * pointers for a hash table header). The elements are doubly linked + * so that an arbitrary element can be removed without a need to + * traverse the list. New elements can be added to the list before + * or after an existing element or at the head of the list. A list + * may only be traversed in the forward direction. + * + * A simple queue is headed by a pair of pointers, one to the head of the + * list and the other to the tail of the list. The elements are singly + * linked to save space, so elements can only be removed from the + * head of the list. New elements can be added to the list before or after + * an existing element, at the head of the list, or at the end of the + * list. A simple queue may only be traversed in the forward direction. + * + * A tail queue is headed by a pair of pointers, one to the head of the + * list and the other to the tail of the list. The elements are doubly + * linked so that an arbitrary element can be removed without a need to + * traverse the list. New elements can be added to the list before or + * after an existing element, at the head of the list, or at the end of + * the list. A tail queue may be traversed in either direction. + * + * An XOR simple queue is used in the same way as a regular simple queue. + * The difference is that the head structure also includes a "cookie" that + * is XOR'd with the queue pointer (first, last or next) to generate the + * real pointer value. + * + * For details on the use of these macros, see the queue(3) manual page. + */ + +#if defined(QUEUE_MACRO_DEBUG) || (defined(_KERNEL) && defined(DIAGNOSTIC)) +#define _Q_INVALIDATE(a) (a) = ((void *)-1) +#else +#define _Q_INVALIDATE(a) +#endif + +/* + * Singly-linked List definitions. + */ +#define SLIST_HEAD(name, type) \ +struct name { \ + struct type *slh_first; /* first element */ \ +} + +#define SLIST_HEAD_INITIALIZER(head) \ + { NULL } + +#define SLIST_ENTRY(type) \ +struct { \ + struct type *sle_next; /* next element */ \ +} + +/* + * Singly-linked List access methods. + */ +#define SLIST_FIRST(head) ((head)->slh_first) +#define SLIST_END(head) NULL +#define SLIST_EMPTY(head) (SLIST_FIRST(head) == SLIST_END(head)) +#define SLIST_NEXT(elm, field) ((elm)->field.sle_next) + +#define SLIST_FOREACH(var, head, field) \ + for((var) = SLIST_FIRST(head); \ + (var) != SLIST_END(head); \ + (var) = SLIST_NEXT(var, field)) + +#define SLIST_FOREACH_SAFE(var, head, field, tvar) \ + for ((var) = SLIST_FIRST(head); \ + (var) && ((tvar) = SLIST_NEXT(var, field), 1); \ + (var) = (tvar)) + +/* + * Singly-linked List functions. + */ +#define SLIST_INIT(head) { \ + SLIST_FIRST(head) = SLIST_END(head); \ +} + +#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \ + (elm)->field.sle_next = (slistelm)->field.sle_next; \ + (slistelm)->field.sle_next = (elm); \ +} while (0) + +#define SLIST_INSERT_HEAD(head, elm, field) do { \ + (elm)->field.sle_next = (head)->slh_first; \ + (head)->slh_first = (elm); \ +} while (0) + +#define SLIST_REMOVE_AFTER(elm, field) do { \ + (elm)->field.sle_next = (elm)->field.sle_next->field.sle_next; \ +} while (0) + +#define SLIST_REMOVE_HEAD(head, field) do { \ + (head)->slh_first = (head)->slh_first->field.sle_next; \ +} while (0) + +#define SLIST_REMOVE(head, elm, type, field) do { \ + if ((head)->slh_first == (elm)) { \ + SLIST_REMOVE_HEAD((head), field); \ + } else { \ + struct type *curelm = (head)->slh_first; \ + \ + while (curelm->field.sle_next != (elm)) \ + curelm = curelm->field.sle_next; \ + curelm->field.sle_next = \ + curelm->field.sle_next->field.sle_next; \ + } \ + _Q_INVALIDATE((elm)->field.sle_next); \ +} while (0) + +/* + * List definitions. + */ +#define LIST_HEAD(name, type) \ +struct name { \ + struct type *lh_first; /* first element */ \ +} + +#define LIST_HEAD_INITIALIZER(head) \ + { NULL } + +#define LIST_ENTRY(type) \ +struct { \ + struct type *le_next; /* next element */ \ + struct type **le_prev; /* address of previous next element */ \ +} + +/* + * List access methods. + */ +#define LIST_FIRST(head) ((head)->lh_first) +#define LIST_END(head) NULL +#define LIST_EMPTY(head) (LIST_FIRST(head) == LIST_END(head)) +#define LIST_NEXT(elm, field) ((elm)->field.le_next) + +#define LIST_FOREACH(var, head, field) \ + for((var) = LIST_FIRST(head); \ + (var)!= LIST_END(head); \ + (var) = LIST_NEXT(var, field)) + +#define LIST_FOREACH_SAFE(var, head, field, tvar) \ + for ((var) = LIST_FIRST(head); \ + (var) && ((tvar) = LIST_NEXT(var, field), 1); \ + (var) = (tvar)) + +/* + * List functions. + */ +#define LIST_INIT(head) do { \ + LIST_FIRST(head) = LIST_END(head); \ +} while (0) + +#define LIST_INSERT_AFTER(listelm, elm, field) do { \ + if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \ + (listelm)->field.le_next->field.le_prev = \ + &(elm)->field.le_next; \ + (listelm)->field.le_next = (elm); \ + (elm)->field.le_prev = &(listelm)->field.le_next; \ +} while (0) + +#define LIST_INSERT_BEFORE(listelm, elm, field) do { \ + (elm)->field.le_prev = (listelm)->field.le_prev; \ + (elm)->field.le_next = (listelm); \ + *(listelm)->field.le_prev = (elm); \ + (listelm)->field.le_prev = &(elm)->field.le_next; \ +} while (0) + +#define LIST_INSERT_HEAD(head, elm, field) do { \ + if (((elm)->field.le_next = (head)->lh_first) != NULL) \ + (head)->lh_first->field.le_prev = &(elm)->field.le_next;\ + (head)->lh_first = (elm); \ + (elm)->field.le_prev = &(head)->lh_first; \ +} while (0) + +#define LIST_REMOVE(elm, field) do { \ + if ((elm)->field.le_next != NULL) \ + (elm)->field.le_next->field.le_prev = \ + (elm)->field.le_prev; \ + *(elm)->field.le_prev = (elm)->field.le_next; \ + _Q_INVALIDATE((elm)->field.le_prev); \ + _Q_INVALIDATE((elm)->field.le_next); \ +} while (0) + +#define LIST_REPLACE(elm, elm2, field) do { \ + if (((elm2)->field.le_next = (elm)->field.le_next) != NULL) \ + (elm2)->field.le_next->field.le_prev = \ + &(elm2)->field.le_next; \ + (elm2)->field.le_prev = (elm)->field.le_prev; \ + *(elm2)->field.le_prev = (elm2); \ + _Q_INVALIDATE((elm)->field.le_prev); \ + _Q_INVALIDATE((elm)->field.le_next); \ +} while (0) + +/* + * Simple queue definitions. + */ +#define SIMPLEQ_HEAD(name, type) \ +struct name { \ + struct type *sqh_first; /* first element */ \ + struct type **sqh_last; /* addr of last next element */ \ +} + +#define SIMPLEQ_HEAD_INITIALIZER(head) \ + { NULL, &(head).sqh_first } + +#define SIMPLEQ_ENTRY(type) \ +struct { \ + struct type *sqe_next; /* next element */ \ +} + +/* + * Simple queue access methods. + */ +#define SIMPLEQ_FIRST(head) ((head)->sqh_first) +#define SIMPLEQ_END(head) NULL +#define SIMPLEQ_EMPTY(head) (SIMPLEQ_FIRST(head) == SIMPLEQ_END(head)) +#define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next) + +#define SIMPLEQ_FOREACH(var, head, field) \ + for((var) = SIMPLEQ_FIRST(head); \ + (var) != SIMPLEQ_END(head); \ + (var) = SIMPLEQ_NEXT(var, field)) + +#define SIMPLEQ_FOREACH_SAFE(var, head, field, tvar) \ + for ((var) = SIMPLEQ_FIRST(head); \ + (var) && ((tvar) = SIMPLEQ_NEXT(var, field), 1); \ + (var) = (tvar)) + +/* + * Simple queue functions. + */ +#define SIMPLEQ_INIT(head) do { \ + (head)->sqh_first = NULL; \ + (head)->sqh_last = &(head)->sqh_first; \ +} while (0) + +#define SIMPLEQ_INSERT_HEAD(head, elm, field) do { \ + if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \ + (head)->sqh_last = &(elm)->field.sqe_next; \ + (head)->sqh_first = (elm); \ +} while (0) + +#define SIMPLEQ_INSERT_TAIL(head, elm, field) do { \ + (elm)->field.sqe_next = NULL; \ + *(head)->sqh_last = (elm); \ + (head)->sqh_last = &(elm)->field.sqe_next; \ +} while (0) + +#define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ + if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\ + (head)->sqh_last = &(elm)->field.sqe_next; \ + (listelm)->field.sqe_next = (elm); \ +} while (0) + +#define SIMPLEQ_REMOVE_HEAD(head, field) do { \ + if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \ + (head)->sqh_last = &(head)->sqh_first; \ +} while (0) + +#define SIMPLEQ_REMOVE_AFTER(head, elm, field) do { \ + if (((elm)->field.sqe_next = (elm)->field.sqe_next->field.sqe_next) \ + == NULL) \ + (head)->sqh_last = &(elm)->field.sqe_next; \ +} while (0) + +#define SIMPLEQ_CONCAT(head1, head2) do { \ + if (!SIMPLEQ_EMPTY((head2))) { \ + *(head1)->sqh_last = (head2)->sqh_first; \ + (head1)->sqh_last = (head2)->sqh_last; \ + SIMPLEQ_INIT((head2)); \ + } \ +} while (0) + +/* + * XOR Simple queue definitions. + */ +#define XSIMPLEQ_HEAD(name, type) \ +struct name { \ + struct type *sqx_first; /* first element */ \ + struct type **sqx_last; /* addr of last next element */ \ + unsigned long sqx_cookie; \ +} + +#define XSIMPLEQ_ENTRY(type) \ +struct { \ + struct type *sqx_next; /* next element */ \ +} + +/* + * XOR Simple queue access methods. + */ +#define XSIMPLEQ_XOR(head, ptr) ((__typeof(ptr))((head)->sqx_cookie ^ \ + (unsigned long)(ptr))) +#define XSIMPLEQ_FIRST(head) XSIMPLEQ_XOR(head, ((head)->sqx_first)) +#define XSIMPLEQ_END(head) NULL +#define XSIMPLEQ_EMPTY(head) (XSIMPLEQ_FIRST(head) == XSIMPLEQ_END(head)) +#define XSIMPLEQ_NEXT(head, elm, field) XSIMPLEQ_XOR(head, ((elm)->field.sqx_next)) + + +#define XSIMPLEQ_FOREACH(var, head, field) \ + for ((var) = XSIMPLEQ_FIRST(head); \ + (var) != XSIMPLEQ_END(head); \ + (var) = XSIMPLEQ_NEXT(head, var, field)) + +#define XSIMPLEQ_FOREACH_SAFE(var, head, field, tvar) \ + for ((var) = XSIMPLEQ_FIRST(head); \ + (var) && ((tvar) = XSIMPLEQ_NEXT(head, var, field), 1); \ + (var) = (tvar)) + +/* + * XOR Simple queue functions. + */ +#define XSIMPLEQ_INIT(head) do { \ + arc4random_buf(&(head)->sqx_cookie, sizeof((head)->sqx_cookie)); \ + (head)->sqx_first = XSIMPLEQ_XOR(head, NULL); \ + (head)->sqx_last = XSIMPLEQ_XOR(head, &(head)->sqx_first); \ +} while (0) + +#define XSIMPLEQ_INSERT_HEAD(head, elm, field) do { \ + if (((elm)->field.sqx_next = (head)->sqx_first) == \ + XSIMPLEQ_XOR(head, NULL)) \ + (head)->sqx_last = XSIMPLEQ_XOR(head, &(elm)->field.sqx_next); \ + (head)->sqx_first = XSIMPLEQ_XOR(head, (elm)); \ +} while (0) + +#define XSIMPLEQ_INSERT_TAIL(head, elm, field) do { \ + (elm)->field.sqx_next = XSIMPLEQ_XOR(head, NULL); \ + *(XSIMPLEQ_XOR(head, (head)->sqx_last)) = XSIMPLEQ_XOR(head, (elm)); \ + (head)->sqx_last = XSIMPLEQ_XOR(head, &(elm)->field.sqx_next); \ +} while (0) + +#define XSIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ + if (((elm)->field.sqx_next = (listelm)->field.sqx_next) == \ + XSIMPLEQ_XOR(head, NULL)) \ + (head)->sqx_last = XSIMPLEQ_XOR(head, &(elm)->field.sqx_next); \ + (listelm)->field.sqx_next = XSIMPLEQ_XOR(head, (elm)); \ +} while (0) + +#define XSIMPLEQ_REMOVE_HEAD(head, field) do { \ + if (((head)->sqx_first = XSIMPLEQ_XOR(head, \ + (head)->sqx_first)->field.sqx_next) == XSIMPLEQ_XOR(head, NULL)) \ + (head)->sqx_last = XSIMPLEQ_XOR(head, &(head)->sqx_first); \ +} while (0) + +#define XSIMPLEQ_REMOVE_AFTER(head, elm, field) do { \ + if (((elm)->field.sqx_next = XSIMPLEQ_XOR(head, \ + (elm)->field.sqx_next)->field.sqx_next) \ + == XSIMPLEQ_XOR(head, NULL)) \ + (head)->sqx_last = \ + XSIMPLEQ_XOR(head, &(elm)->field.sqx_next); \ +} while (0) + + +/* + * Tail queue definitions. + */ +#define TAILQ_HEAD(name, type) \ +struct name { \ + struct type *tqh_first; /* first element */ \ + struct type **tqh_last; /* addr of last next element */ \ +} + +#define TAILQ_HEAD_INITIALIZER(head) \ + { NULL, &(head).tqh_first } + +#define TAILQ_ENTRY(type) \ +struct { \ + struct type *tqe_next; /* next element */ \ + struct type **tqe_prev; /* address of previous next element */ \ +} + +/* + * Tail queue access methods. + */ +#define TAILQ_FIRST(head) ((head)->tqh_first) +#define TAILQ_END(head) NULL +#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) +#define TAILQ_LAST(head, headname) \ + (*(((struct headname *)((head)->tqh_last))->tqh_last)) +/* XXX */ +#define TAILQ_PREV(elm, headname, field) \ + (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) +#define TAILQ_EMPTY(head) \ + (TAILQ_FIRST(head) == TAILQ_END(head)) + +#define TAILQ_FOREACH(var, head, field) \ + for((var) = TAILQ_FIRST(head); \ + (var) != TAILQ_END(head); \ + (var) = TAILQ_NEXT(var, field)) + +#define TAILQ_FOREACH_SAFE(var, head, field, tvar) \ + for ((var) = TAILQ_FIRST(head); \ + (var) != TAILQ_END(head) && \ + ((tvar) = TAILQ_NEXT(var, field), 1); \ + (var) = (tvar)) + + +#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \ + for((var) = TAILQ_LAST(head, headname); \ + (var) != TAILQ_END(head); \ + (var) = TAILQ_PREV(var, headname, field)) + +#define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar) \ + for ((var) = TAILQ_LAST(head, headname); \ + (var) != TAILQ_END(head) && \ + ((tvar) = TAILQ_PREV(var, headname, field), 1); \ + (var) = (tvar)) + +/* + * Tail queue functions. + */ +#define TAILQ_INIT(head) do { \ + (head)->tqh_first = NULL; \ + (head)->tqh_last = &(head)->tqh_first; \ +} while (0) + +#define TAILQ_INSERT_HEAD(head, elm, field) do { \ + if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \ + (head)->tqh_first->field.tqe_prev = \ + &(elm)->field.tqe_next; \ + else \ + (head)->tqh_last = &(elm)->field.tqe_next; \ + (head)->tqh_first = (elm); \ + (elm)->field.tqe_prev = &(head)->tqh_first; \ +} while (0) + +#define TAILQ_INSERT_TAIL(head, elm, field) do { \ + (elm)->field.tqe_next = NULL; \ + (elm)->field.tqe_prev = (head)->tqh_last; \ + *(head)->tqh_last = (elm); \ + (head)->tqh_last = &(elm)->field.tqe_next; \ +} while (0) + +#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ + if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\ + (elm)->field.tqe_next->field.tqe_prev = \ + &(elm)->field.tqe_next; \ + else \ + (head)->tqh_last = &(elm)->field.tqe_next; \ + (listelm)->field.tqe_next = (elm); \ + (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \ +} while (0) + +#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \ + (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ + (elm)->field.tqe_next = (listelm); \ + *(listelm)->field.tqe_prev = (elm); \ + (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \ +} while (0) + +#define TAILQ_REMOVE(head, elm, field) do { \ + if (((elm)->field.tqe_next) != NULL) \ + (elm)->field.tqe_next->field.tqe_prev = \ + (elm)->field.tqe_prev; \ + else \ + (head)->tqh_last = (elm)->field.tqe_prev; \ + *(elm)->field.tqe_prev = (elm)->field.tqe_next; \ + _Q_INVALIDATE((elm)->field.tqe_prev); \ + _Q_INVALIDATE((elm)->field.tqe_next); \ +} while (0) + +#define TAILQ_REPLACE(head, elm, elm2, field) do { \ + if (((elm2)->field.tqe_next = (elm)->field.tqe_next) != NULL) \ + (elm2)->field.tqe_next->field.tqe_prev = \ + &(elm2)->field.tqe_next; \ + else \ + (head)->tqh_last = &(elm2)->field.tqe_next; \ + (elm2)->field.tqe_prev = (elm)->field.tqe_prev; \ + *(elm2)->field.tqe_prev = (elm2); \ + _Q_INVALIDATE((elm)->field.tqe_prev); \ + _Q_INVALIDATE((elm)->field.tqe_next); \ +} while (0) + +#define TAILQ_CONCAT(head1, head2, field) do { \ + if (!TAILQ_EMPTY(head2)) { \ + *(head1)->tqh_last = (head2)->tqh_first; \ + (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \ + (head1)->tqh_last = (head2)->tqh_last; \ + TAILQ_INIT((head2)); \ + } \ +} while (0) + +#endif /* !_SYS_QUEUE_H_ */ diff --git a/lib/openbsd-tree.h b/lib/openbsd-tree.h new file mode 100644 index 0000000000..e6502b1e74 --- /dev/null +++ b/lib/openbsd-tree.h @@ -0,0 +1,748 @@ +/* $OpenBSD: tree.h,v 1.14 2015/05/25 03:07:49 deraadt Exp $ */ +/* + * Copyright 2002 Niels Provos + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _SYS_TREE_H_ +#define _SYS_TREE_H_ + +/* + * This file defines data structures for different types of trees: + * splay trees and red-black trees. + * + * A splay tree is a self-organizing data structure. Every operation + * on the tree causes a splay to happen. The splay moves the requested + * node to the root of the tree and partly rebalances it. + * + * This has the benefit that request locality causes faster lookups as + * the requested nodes move to the top of the tree. On the other hand, + * every lookup causes memory writes. + * + * The Balance Theorem bounds the total access time for m operations + * and n inserts on an initially empty tree as O((m + n)lg n). The + * amortized cost for a sequence of m accesses to a splay tree is O(lg n); + * + * A red-black tree is a binary search tree with the node color as an + * extra attribute. It fulfills a set of conditions: + * - every search path from the root to a leaf consists of the + * same number of black nodes, + * - each red node (except for the root) has a black parent, + * - each leaf node is black. + * + * Every operation on a red-black tree is bounded as O(lg n). + * The maximum height of a red-black tree is 2lg (n+1). + */ + +#define SPLAY_HEAD(name, type) \ +struct name { \ + struct type *sph_root; /* root of the tree */ \ +} + +#define SPLAY_INITIALIZER(root) \ + { NULL } + +#define SPLAY_INIT(root) do { \ + (root)->sph_root = NULL; \ +} while (0) + +#define SPLAY_ENTRY(type) \ +struct { \ + struct type *spe_left; /* left element */ \ + struct type *spe_right; /* right element */ \ +} + +#define SPLAY_LEFT(elm, field) (elm)->field.spe_left +#define SPLAY_RIGHT(elm, field) (elm)->field.spe_right +#define SPLAY_ROOT(head) (head)->sph_root +#define SPLAY_EMPTY(head) (SPLAY_ROOT(head) == NULL) + +/* SPLAY_ROTATE_{LEFT,RIGHT} expect that tmp hold SPLAY_{RIGHT,LEFT} */ +#define SPLAY_ROTATE_RIGHT(head, tmp, field) do { \ + SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(tmp, field); \ + SPLAY_RIGHT(tmp, field) = (head)->sph_root; \ + (head)->sph_root = tmp; \ +} while (0) + +#define SPLAY_ROTATE_LEFT(head, tmp, field) do { \ + SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(tmp, field); \ + SPLAY_LEFT(tmp, field) = (head)->sph_root; \ + (head)->sph_root = tmp; \ +} while (0) + +#define SPLAY_LINKLEFT(head, tmp, field) do { \ + SPLAY_LEFT(tmp, field) = (head)->sph_root; \ + tmp = (head)->sph_root; \ + (head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \ +} while (0) + +#define SPLAY_LINKRIGHT(head, tmp, field) do { \ + SPLAY_RIGHT(tmp, field) = (head)->sph_root; \ + tmp = (head)->sph_root; \ + (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \ +} while (0) + +#define SPLAY_ASSEMBLE(head, node, left, right, field) do { \ + SPLAY_RIGHT(left, field) = SPLAY_LEFT((head)->sph_root, field); \ + SPLAY_LEFT(right, field) = SPLAY_RIGHT((head)->sph_root, field);\ + SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(node, field); \ + SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(node, field); \ +} while (0) + +/* Generates prototypes and inline functions */ + +#define SPLAY_PROTOTYPE(name, type, field, cmp) \ +void name##_SPLAY(struct name *, struct type *); \ +void name##_SPLAY_MINMAX(struct name *, int); \ +struct type *name##_SPLAY_INSERT(struct name *, struct type *); \ +struct type *name##_SPLAY_REMOVE(struct name *, struct type *); \ + \ +/* Finds the node with the same key as elm */ \ +static __inline struct type * \ +name##_SPLAY_FIND(struct name *head, struct type *elm) \ +{ \ + if (SPLAY_EMPTY(head)) \ + return(NULL); \ + name##_SPLAY(head, elm); \ + if ((cmp)(elm, (head)->sph_root) == 0) \ + return (head->sph_root); \ + return (NULL); \ +} \ + \ +static __inline struct type * \ +name##_SPLAY_NEXT(struct name *head, struct type *elm) \ +{ \ + name##_SPLAY(head, elm); \ + if (SPLAY_RIGHT(elm, field) != NULL) { \ + elm = SPLAY_RIGHT(elm, field); \ + while (SPLAY_LEFT(elm, field) != NULL) { \ + elm = SPLAY_LEFT(elm, field); \ + } \ + } else \ + elm = NULL; \ + return (elm); \ +} \ + \ +static __inline struct type * \ +name##_SPLAY_MIN_MAX(struct name *head, int val) \ +{ \ + name##_SPLAY_MINMAX(head, val); \ + return (SPLAY_ROOT(head)); \ +} + +/* Main splay operation. + * Moves node close to the key of elm to top + */ +#define SPLAY_GENERATE(name, type, field, cmp) \ +struct type * \ +name##_SPLAY_INSERT(struct name *head, struct type *elm) \ +{ \ + if (SPLAY_EMPTY(head)) { \ + SPLAY_LEFT(elm, field) = SPLAY_RIGHT(elm, field) = NULL; \ + } else { \ + int __comp; \ + name##_SPLAY(head, elm); \ + __comp = (cmp)(elm, (head)->sph_root); \ + if(__comp < 0) { \ + SPLAY_LEFT(elm, field) = SPLAY_LEFT((head)->sph_root, field);\ + SPLAY_RIGHT(elm, field) = (head)->sph_root; \ + SPLAY_LEFT((head)->sph_root, field) = NULL; \ + } else if (__comp > 0) { \ + SPLAY_RIGHT(elm, field) = SPLAY_RIGHT((head)->sph_root, field);\ + SPLAY_LEFT(elm, field) = (head)->sph_root; \ + SPLAY_RIGHT((head)->sph_root, field) = NULL; \ + } else \ + return ((head)->sph_root); \ + } \ + (head)->sph_root = (elm); \ + return (NULL); \ +} \ + \ +struct type * \ +name##_SPLAY_REMOVE(struct name *head, struct type *elm) \ +{ \ + struct type *__tmp; \ + if (SPLAY_EMPTY(head)) \ + return (NULL); \ + name##_SPLAY(head, elm); \ + if ((cmp)(elm, (head)->sph_root) == 0) { \ + if (SPLAY_LEFT((head)->sph_root, field) == NULL) { \ + (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field);\ + } else { \ + __tmp = SPLAY_RIGHT((head)->sph_root, field); \ + (head)->sph_root = SPLAY_LEFT((head)->sph_root, field);\ + name##_SPLAY(head, elm); \ + SPLAY_RIGHT((head)->sph_root, field) = __tmp; \ + } \ + return (elm); \ + } \ + return (NULL); \ +} \ + \ +void \ +name##_SPLAY(struct name *head, struct type *elm) \ +{ \ + struct type __node, *__left, *__right, *__tmp; \ + int __comp; \ +\ + SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\ + __left = __right = &__node; \ +\ + while ((__comp = (cmp)(elm, (head)->sph_root))) { \ + if (__comp < 0) { \ + __tmp = SPLAY_LEFT((head)->sph_root, field); \ + if (__tmp == NULL) \ + break; \ + if ((cmp)(elm, __tmp) < 0){ \ + SPLAY_ROTATE_RIGHT(head, __tmp, field); \ + if (SPLAY_LEFT((head)->sph_root, field) == NULL)\ + break; \ + } \ + SPLAY_LINKLEFT(head, __right, field); \ + } else if (__comp > 0) { \ + __tmp = SPLAY_RIGHT((head)->sph_root, field); \ + if (__tmp == NULL) \ + break; \ + if ((cmp)(elm, __tmp) > 0){ \ + SPLAY_ROTATE_LEFT(head, __tmp, field); \ + if (SPLAY_RIGHT((head)->sph_root, field) == NULL)\ + break; \ + } \ + SPLAY_LINKRIGHT(head, __left, field); \ + } \ + } \ + SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \ +} \ + \ +/* Splay with either the minimum or the maximum element \ + * Used to find minimum or maximum element in tree. \ + */ \ +void name##_SPLAY_MINMAX(struct name *head, int __comp) \ +{ \ + struct type __node, *__left, *__right, *__tmp; \ +\ + SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\ + __left = __right = &__node; \ +\ + while (1) { \ + if (__comp < 0) { \ + __tmp = SPLAY_LEFT((head)->sph_root, field); \ + if (__tmp == NULL) \ + break; \ + if (__comp < 0){ \ + SPLAY_ROTATE_RIGHT(head, __tmp, field); \ + if (SPLAY_LEFT((head)->sph_root, field) == NULL)\ + break; \ + } \ + SPLAY_LINKLEFT(head, __right, field); \ + } else if (__comp > 0) { \ + __tmp = SPLAY_RIGHT((head)->sph_root, field); \ + if (__tmp == NULL) \ + break; \ + if (__comp > 0) { \ + SPLAY_ROTATE_LEFT(head, __tmp, field); \ + if (SPLAY_RIGHT((head)->sph_root, field) == NULL)\ + break; \ + } \ + SPLAY_LINKRIGHT(head, __left, field); \ + } \ + } \ + SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \ +} + +#define SPLAY_NEGINF -1 +#define SPLAY_INF 1 + +#define SPLAY_INSERT(name, x, y) name##_SPLAY_INSERT(x, y) +#define SPLAY_REMOVE(name, x, y) name##_SPLAY_REMOVE(x, y) +#define SPLAY_FIND(name, x, y) name##_SPLAY_FIND(x, y) +#define SPLAY_NEXT(name, x, y) name##_SPLAY_NEXT(x, y) +#define SPLAY_MIN(name, x) (SPLAY_EMPTY(x) ? NULL \ + : name##_SPLAY_MIN_MAX(x, SPLAY_NEGINF)) +#define SPLAY_MAX(name, x) (SPLAY_EMPTY(x) ? NULL \ + : name##_SPLAY_MIN_MAX(x, SPLAY_INF)) + +#define SPLAY_FOREACH(x, name, head) \ + for ((x) = SPLAY_MIN(name, head); \ + (x) != NULL; \ + (x) = SPLAY_NEXT(name, head, x)) + +/* Macros that define a red-black tree */ +#define RB_HEAD(name, type) \ +struct name { \ + struct type *rbh_root; /* root of the tree */ \ +} + +#define RB_INITIALIZER(root) \ + { NULL } + +#define RB_INIT(root) do { \ + (root)->rbh_root = NULL; \ +} while (0) + +#define RB_BLACK 0 +#define RB_RED 1 +#define RB_ENTRY(type) \ +struct { \ + struct type *rbe_left; /* left element */ \ + struct type *rbe_right; /* right element */ \ + struct type *rbe_parent; /* parent element */ \ + int rbe_color; /* node color */ \ +} + +#define RB_LEFT(elm, field) (elm)->field.rbe_left +#define RB_RIGHT(elm, field) (elm)->field.rbe_right +#define RB_PARENT(elm, field) (elm)->field.rbe_parent +#define RB_COLOR(elm, field) (elm)->field.rbe_color +#define RB_ROOT(head) (head)->rbh_root +#define RB_EMPTY(head) (RB_ROOT(head) == NULL) + +#define RB_SET(elm, parent, field) do { \ + RB_PARENT(elm, field) = parent; \ + RB_LEFT(elm, field) = RB_RIGHT(elm, field) = NULL; \ + RB_COLOR(elm, field) = RB_RED; \ +} while (0) + +#define RB_SET_BLACKRED(black, red, field) do { \ + RB_COLOR(black, field) = RB_BLACK; \ + RB_COLOR(red, field) = RB_RED; \ +} while (0) + +#ifndef RB_AUGMENT +#define RB_AUGMENT(x) do {} while (0) +#endif + +#define RB_ROTATE_LEFT(head, elm, tmp, field) do { \ + (tmp) = RB_RIGHT(elm, field); \ + if ((RB_RIGHT(elm, field) = RB_LEFT(tmp, field))) { \ + RB_PARENT(RB_LEFT(tmp, field), field) = (elm); \ + } \ + RB_AUGMENT(elm); \ + if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field))) { \ + if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \ + RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \ + else \ + RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \ + } else \ + (head)->rbh_root = (tmp); \ + RB_LEFT(tmp, field) = (elm); \ + RB_PARENT(elm, field) = (tmp); \ + RB_AUGMENT(tmp); \ + if ((RB_PARENT(tmp, field))) \ + RB_AUGMENT(RB_PARENT(tmp, field)); \ +} while (0) + +#define RB_ROTATE_RIGHT(head, elm, tmp, field) do { \ + (tmp) = RB_LEFT(elm, field); \ + if ((RB_LEFT(elm, field) = RB_RIGHT(tmp, field))) { \ + RB_PARENT(RB_RIGHT(tmp, field), field) = (elm); \ + } \ + RB_AUGMENT(elm); \ + if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field))) { \ + if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \ + RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \ + else \ + RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \ + } else \ + (head)->rbh_root = (tmp); \ + RB_RIGHT(tmp, field) = (elm); \ + RB_PARENT(elm, field) = (tmp); \ + RB_AUGMENT(tmp); \ + if ((RB_PARENT(tmp, field))) \ + RB_AUGMENT(RB_PARENT(tmp, field)); \ +} while (0) + +/* Generates prototypes and inline functions */ +#define RB_PROTOTYPE(name, type, field, cmp) \ + RB_PROTOTYPE_INTERNAL(name, type, field, cmp,) +#define RB_PROTOTYPE_STATIC(name, type, field, cmp) \ + RB_PROTOTYPE_INTERNAL(name, type, field, cmp, __attribute__((__unused__)) static) +#define RB_PROTOTYPE_INTERNAL(name, type, field, cmp, attr) \ +attr void name##_RB_INSERT_COLOR(struct name *, struct type *); \ +attr void name##_RB_REMOVE_COLOR(struct name *, struct type *, struct type *);\ +attr struct type *name##_RB_REMOVE(struct name *, struct type *); \ +attr struct type *name##_RB_INSERT(struct name *, struct type *); \ +attr struct type *name##_RB_FIND(struct name *, struct type *); \ +attr struct type *name##_RB_NFIND(struct name *, struct type *); \ +attr struct type *name##_RB_NEXT(struct type *); \ +attr struct type *name##_RB_PREV(struct type *); \ +attr struct type *name##_RB_MINMAX(struct name *, int); \ + \ + +/* Main rb operation. + * Moves node close to the key of elm to top + */ +#define RB_GENERATE(name, type, field, cmp) \ + RB_GENERATE_INTERNAL(name, type, field, cmp,) +#define RB_GENERATE_STATIC(name, type, field, cmp) \ + RB_GENERATE_INTERNAL(name, type, field, cmp, __attribute__((__unused__)) static) +#define RB_GENERATE_INTERNAL(name, type, field, cmp, attr) \ +attr void \ +name##_RB_INSERT_COLOR(struct name *head, struct type *elm) \ +{ \ + struct type *parent, *gparent, *tmp; \ + while ((parent = RB_PARENT(elm, field)) && \ + RB_COLOR(parent, field) == RB_RED) { \ + gparent = RB_PARENT(parent, field); \ + if (parent == RB_LEFT(gparent, field)) { \ + tmp = RB_RIGHT(gparent, field); \ + if (tmp && RB_COLOR(tmp, field) == RB_RED) { \ + RB_COLOR(tmp, field) = RB_BLACK; \ + RB_SET_BLACKRED(parent, gparent, field);\ + elm = gparent; \ + continue; \ + } \ + if (RB_RIGHT(parent, field) == elm) { \ + RB_ROTATE_LEFT(head, parent, tmp, field);\ + tmp = parent; \ + parent = elm; \ + elm = tmp; \ + } \ + RB_SET_BLACKRED(parent, gparent, field); \ + RB_ROTATE_RIGHT(head, gparent, tmp, field); \ + } else { \ + tmp = RB_LEFT(gparent, field); \ + if (tmp && RB_COLOR(tmp, field) == RB_RED) { \ + RB_COLOR(tmp, field) = RB_BLACK; \ + RB_SET_BLACKRED(parent, gparent, field);\ + elm = gparent; \ + continue; \ + } \ + if (RB_LEFT(parent, field) == elm) { \ + RB_ROTATE_RIGHT(head, parent, tmp, field);\ + tmp = parent; \ + parent = elm; \ + elm = tmp; \ + } \ + RB_SET_BLACKRED(parent, gparent, field); \ + RB_ROTATE_LEFT(head, gparent, tmp, field); \ + } \ + } \ + RB_COLOR(head->rbh_root, field) = RB_BLACK; \ +} \ + \ +attr void \ +name##_RB_REMOVE_COLOR(struct name *head, struct type *parent, struct type *elm) \ +{ \ + struct type *tmp; \ + while ((elm == NULL || RB_COLOR(elm, field) == RB_BLACK) && \ + elm != RB_ROOT(head)) { \ + if (RB_LEFT(parent, field) == elm) { \ + tmp = RB_RIGHT(parent, field); \ + if (RB_COLOR(tmp, field) == RB_RED) { \ + RB_SET_BLACKRED(tmp, parent, field); \ + RB_ROTATE_LEFT(head, parent, tmp, field);\ + tmp = RB_RIGHT(parent, field); \ + } \ + if ((RB_LEFT(tmp, field) == NULL || \ + RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) &&\ + (RB_RIGHT(tmp, field) == NULL || \ + RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) {\ + RB_COLOR(tmp, field) = RB_RED; \ + elm = parent; \ + parent = RB_PARENT(elm, field); \ + } else { \ + if (RB_RIGHT(tmp, field) == NULL || \ + RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK) {\ + struct type *oleft; \ + if ((oleft = RB_LEFT(tmp, field)))\ + RB_COLOR(oleft, field) = RB_BLACK;\ + RB_COLOR(tmp, field) = RB_RED; \ + RB_ROTATE_RIGHT(head, tmp, oleft, field);\ + tmp = RB_RIGHT(parent, field); \ + } \ + RB_COLOR(tmp, field) = RB_COLOR(parent, field);\ + RB_COLOR(parent, field) = RB_BLACK; \ + if (RB_RIGHT(tmp, field)) \ + RB_COLOR(RB_RIGHT(tmp, field), field) = RB_BLACK;\ + RB_ROTATE_LEFT(head, parent, tmp, field);\ + elm = RB_ROOT(head); \ + break; \ + } \ + } else { \ + tmp = RB_LEFT(parent, field); \ + if (RB_COLOR(tmp, field) == RB_RED) { \ + RB_SET_BLACKRED(tmp, parent, field); \ + RB_ROTATE_RIGHT(head, parent, tmp, field);\ + tmp = RB_LEFT(parent, field); \ + } \ + if ((RB_LEFT(tmp, field) == NULL || \ + RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) &&\ + (RB_RIGHT(tmp, field) == NULL || \ + RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) {\ + RB_COLOR(tmp, field) = RB_RED; \ + elm = parent; \ + parent = RB_PARENT(elm, field); \ + } else { \ + if (RB_LEFT(tmp, field) == NULL || \ + RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) {\ + struct type *oright; \ + if ((oright = RB_RIGHT(tmp, field)))\ + RB_COLOR(oright, field) = RB_BLACK;\ + RB_COLOR(tmp, field) = RB_RED; \ + RB_ROTATE_LEFT(head, tmp, oright, field);\ + tmp = RB_LEFT(parent, field); \ + } \ + RB_COLOR(tmp, field) = RB_COLOR(parent, field);\ + RB_COLOR(parent, field) = RB_BLACK; \ + if (RB_LEFT(tmp, field)) \ + RB_COLOR(RB_LEFT(tmp, field), field) = RB_BLACK;\ + RB_ROTATE_RIGHT(head, parent, tmp, field);\ + elm = RB_ROOT(head); \ + break; \ + } \ + } \ + } \ + if (elm) \ + RB_COLOR(elm, field) = RB_BLACK; \ +} \ + \ +attr struct type * \ +name##_RB_REMOVE(struct name *head, struct type *elm) \ +{ \ + struct type *child, *parent, *old = elm; \ + int color; \ + if (RB_LEFT(elm, field) == NULL) \ + child = RB_RIGHT(elm, field); \ + else if (RB_RIGHT(elm, field) == NULL) \ + child = RB_LEFT(elm, field); \ + else { \ + struct type *left; \ + elm = RB_RIGHT(elm, field); \ + while ((left = RB_LEFT(elm, field))) \ + elm = left; \ + child = RB_RIGHT(elm, field); \ + parent = RB_PARENT(elm, field); \ + color = RB_COLOR(elm, field); \ + if (child) \ + RB_PARENT(child, field) = parent; \ + if (parent) { \ + if (RB_LEFT(parent, field) == elm) \ + RB_LEFT(parent, field) = child; \ + else \ + RB_RIGHT(parent, field) = child; \ + RB_AUGMENT(parent); \ + } else \ + RB_ROOT(head) = child; \ + if (RB_PARENT(elm, field) == old) \ + parent = elm; \ + (elm)->field = (old)->field; \ + if (RB_PARENT(old, field)) { \ + if (RB_LEFT(RB_PARENT(old, field), field) == old)\ + RB_LEFT(RB_PARENT(old, field), field) = elm;\ + else \ + RB_RIGHT(RB_PARENT(old, field), field) = elm;\ + RB_AUGMENT(RB_PARENT(old, field)); \ + } else \ + RB_ROOT(head) = elm; \ + RB_PARENT(RB_LEFT(old, field), field) = elm; \ + if (RB_RIGHT(old, field)) \ + RB_PARENT(RB_RIGHT(old, field), field) = elm; \ + if (parent) { \ + left = parent; \ + do { \ + RB_AUGMENT(left); \ + } while ((left = RB_PARENT(left, field))); \ + } \ + goto color; \ + } \ + parent = RB_PARENT(elm, field); \ + color = RB_COLOR(elm, field); \ + if (child) \ + RB_PARENT(child, field) = parent; \ + if (parent) { \ + if (RB_LEFT(parent, field) == elm) \ + RB_LEFT(parent, field) = child; \ + else \ + RB_RIGHT(parent, field) = child; \ + RB_AUGMENT(parent); \ + } else \ + RB_ROOT(head) = child; \ +color: \ + if (color == RB_BLACK) \ + name##_RB_REMOVE_COLOR(head, parent, child); \ + return (old); \ +} \ + \ +/* Inserts a node into the RB tree */ \ +attr struct type * \ +name##_RB_INSERT(struct name *head, struct type *elm) \ +{ \ + struct type *tmp; \ + struct type *parent = NULL; \ + int comp = 0; \ + tmp = RB_ROOT(head); \ + while (tmp) { \ + parent = tmp; \ + comp = (cmp)(elm, parent); \ + if (comp < 0) \ + tmp = RB_LEFT(tmp, field); \ + else if (comp > 0) \ + tmp = RB_RIGHT(tmp, field); \ + else \ + return (tmp); \ + } \ + RB_SET(elm, parent, field); \ + if (parent != NULL) { \ + if (comp < 0) \ + RB_LEFT(parent, field) = elm; \ + else \ + RB_RIGHT(parent, field) = elm; \ + RB_AUGMENT(parent); \ + } else \ + RB_ROOT(head) = elm; \ + name##_RB_INSERT_COLOR(head, elm); \ + return (NULL); \ +} \ + \ +/* Finds the node with the same key as elm */ \ +attr struct type * \ +name##_RB_FIND(struct name *head, struct type *elm) \ +{ \ + struct type *tmp = RB_ROOT(head); \ + int comp; \ + while (tmp) { \ + comp = cmp(elm, tmp); \ + if (comp < 0) \ + tmp = RB_LEFT(tmp, field); \ + else if (comp > 0) \ + tmp = RB_RIGHT(tmp, field); \ + else \ + return (tmp); \ + } \ + return (NULL); \ +} \ + \ +/* Finds the first node greater than or equal to the search key */ \ +attr struct type * \ +name##_RB_NFIND(struct name *head, struct type *elm) \ +{ \ + struct type *tmp = RB_ROOT(head); \ + struct type *res = NULL; \ + int comp; \ + while (tmp) { \ + comp = cmp(elm, tmp); \ + if (comp < 0) { \ + res = tmp; \ + tmp = RB_LEFT(tmp, field); \ + } \ + else if (comp > 0) \ + tmp = RB_RIGHT(tmp, field); \ + else \ + return (tmp); \ + } \ + return (res); \ +} \ + \ +/* ARGSUSED */ \ +attr struct type * \ +name##_RB_NEXT(struct type *elm) \ +{ \ + if (RB_RIGHT(elm, field)) { \ + elm = RB_RIGHT(elm, field); \ + while (RB_LEFT(elm, field)) \ + elm = RB_LEFT(elm, field); \ + } else { \ + if (RB_PARENT(elm, field) && \ + (elm == RB_LEFT(RB_PARENT(elm, field), field))) \ + elm = RB_PARENT(elm, field); \ + else { \ + while (RB_PARENT(elm, field) && \ + (elm == RB_RIGHT(RB_PARENT(elm, field), field)))\ + elm = RB_PARENT(elm, field); \ + elm = RB_PARENT(elm, field); \ + } \ + } \ + return (elm); \ +} \ + \ +/* ARGSUSED */ \ +attr struct type * \ +name##_RB_PREV(struct type *elm) \ +{ \ + if (RB_LEFT(elm, field)) { \ + elm = RB_LEFT(elm, field); \ + while (RB_RIGHT(elm, field)) \ + elm = RB_RIGHT(elm, field); \ + } else { \ + if (RB_PARENT(elm, field) && \ + (elm == RB_RIGHT(RB_PARENT(elm, field), field))) \ + elm = RB_PARENT(elm, field); \ + else { \ + while (RB_PARENT(elm, field) && \ + (elm == RB_LEFT(RB_PARENT(elm, field), field)))\ + elm = RB_PARENT(elm, field); \ + elm = RB_PARENT(elm, field); \ + } \ + } \ + return (elm); \ +} \ + \ +attr struct type * \ +name##_RB_MINMAX(struct name *head, int val) \ +{ \ + struct type *tmp = RB_ROOT(head); \ + struct type *parent = NULL; \ + while (tmp) { \ + parent = tmp; \ + if (val < 0) \ + tmp = RB_LEFT(tmp, field); \ + else \ + tmp = RB_RIGHT(tmp, field); \ + } \ + return (parent); \ +} + +#define RB_NEGINF -1 +#define RB_INF 1 + +#define RB_INSERT(name, x, y) name##_RB_INSERT(x, y) +#define RB_REMOVE(name, x, y) name##_RB_REMOVE(x, y) +#define RB_FIND(name, x, y) name##_RB_FIND(x, y) +#define RB_NFIND(name, x, y) name##_RB_NFIND(x, y) +#define RB_NEXT(name, x, y) name##_RB_NEXT(y) +#define RB_PREV(name, x, y) name##_RB_PREV(y) +#define RB_MIN(name, x) name##_RB_MINMAX(x, RB_NEGINF) +#define RB_MAX(name, x) name##_RB_MINMAX(x, RB_INF) + +#define RB_FOREACH(x, name, head) \ + for ((x) = RB_MIN(name, head); \ + (x) != NULL; \ + (x) = name##_RB_NEXT(x)) + +#define RB_FOREACH_SAFE(x, name, head, y) \ + for ((x) = RB_MIN(name, head); \ + ((x) != NULL) && ((y) = name##_RB_NEXT(x), 1); \ + (x) = (y)) + +#define RB_FOREACH_REVERSE(x, name, head) \ + for ((x) = RB_MAX(name, head); \ + (x) != NULL; \ + (x) = name##_RB_PREV(x)) + +#define RB_FOREACH_REVERSE_SAFE(x, name, head, y) \ + for ((x) = RB_MAX(name, head); \ + ((x) != NULL) && ((y) = name##_RB_PREV(x), 1); \ + (x) = (y)) + +#endif /* _SYS_TREE_H_ */ diff --git a/lib/plist.c b/lib/plist.c index 23d580bd65..a854ad52b3 100644 --- a/lib/plist.c +++ b/lib/plist.c @@ -899,7 +899,7 @@ vty_prefix_list_install (struct vty *vty, afi_t afi, const char *name, struct prefix_list *plist; struct prefix_list_entry *pentry; struct prefix_list_entry *dup; - struct prefix p; + struct prefix p, p_tmp; int any = 0; int seqnum = -1; int lenum = 0; @@ -945,6 +945,11 @@ vty_prefix_list_install (struct vty *vty, afi_t afi, const char *name, vty_out (vty, "%% Malformed IPv4 prefix%s", VTY_NEWLINE); return CMD_WARNING; } + + /* make a copy to verify prefix matches mask length */ + prefix_copy (&p_tmp, &p); + apply_mask_ipv4 ((struct prefix_ipv4 *) &p_tmp); + break; case AFI_IP6: if (strncmp ("any", prefix, strlen (prefix)) == 0) @@ -962,6 +967,11 @@ vty_prefix_list_install (struct vty *vty, afi_t afi, const char *name, vty_out (vty, "%% Malformed IPv6 prefix%s", VTY_NEWLINE); return CMD_WARNING; } + + /* make a copy to verify prefix matches mask length */ + prefix_copy (&p_tmp, &p); + apply_mask_ipv6 ((struct prefix_ipv6 *) &p_tmp); + break; case AFI_ETHER: default: @@ -970,6 +980,18 @@ vty_prefix_list_install (struct vty *vty, afi_t afi, const char *name, break; } + /* If prefix has bits not under the mask, adjust it to fit */ + if (!prefix_same (&p_tmp, &p)) + { + char buf[PREFIX2STR_BUFFER]; + char buf_tmp[PREFIX2STR_BUFFER]; + prefix2str(&p, buf, sizeof(buf)); + prefix2str(&p_tmp, buf_tmp, sizeof(buf_tmp)); + zlog_warn ("Prefix-list %s prefix changed from %s to %s to match length", + name, buf, buf_tmp); + p = p_tmp; + } + /* ge and le check. */ if (genum && (genum <= p.prefixlen)) return vty_invalid_prefix_range (vty, prefix); @@ -995,14 +1017,6 @@ vty_prefix_list_install (struct vty *vty, afi_t afi, const char *name, if (dup) { prefix_list_entry_free (pentry); - vty_out (vty, "%% Insertion failed - prefix-list entry exists:%s", - VTY_NEWLINE); - vty_out (vty, " seq %u %s %s", dup->seq, typestr, prefix); - if (! any && genum) - vty_out (vty, " ge %d", genum); - if (! any && lenum) - vty_out (vty, " le %d", lenum); - vty_out (vty, "%s", VTY_NEWLINE); return CMD_SUCCESS; } @@ -3072,7 +3086,7 @@ prefix_bgp_show_prefix_list (struct vty *vty, afi_t afi, char *name, u_char use_ else json_object_object_add(json, "ipv6PrefixList", json_prefix); - vty_out (vty, "%s%s", json_object_to_json_string(json), VTY_NEWLINE); + vty_out (vty, "%s%s", json_object_to_json_string_ext(json, JSON_C_TO_STRING_PRETTY), VTY_NEWLINE); json_object_free(json); } else @@ -3195,17 +3209,6 @@ prefix_list_init_ipv4 (void) install_element (VIEW_NODE, &show_ip_prefix_list_detail_cmd); install_element (VIEW_NODE, &show_ip_prefix_list_detail_name_cmd); - install_element (ENABLE_NODE, &show_ip_prefix_list_cmd); - install_element (ENABLE_NODE, &show_ip_prefix_list_name_cmd); - install_element (ENABLE_NODE, &show_ip_prefix_list_name_seq_cmd); - install_element (ENABLE_NODE, &show_ip_prefix_list_prefix_cmd); - install_element (ENABLE_NODE, &show_ip_prefix_list_prefix_longer_cmd); - install_element (ENABLE_NODE, &show_ip_prefix_list_prefix_first_match_cmd); - install_element (ENABLE_NODE, &show_ip_prefix_list_summary_cmd); - install_element (ENABLE_NODE, &show_ip_prefix_list_summary_name_cmd); - install_element (ENABLE_NODE, &show_ip_prefix_list_detail_cmd); - install_element (ENABLE_NODE, &show_ip_prefix_list_detail_name_cmd); - install_element (ENABLE_NODE, &clear_ip_prefix_list_cmd); install_element (ENABLE_NODE, &clear_ip_prefix_list_name_cmd); install_element (ENABLE_NODE, &clear_ip_prefix_list_name_prefix_cmd); @@ -3272,17 +3275,6 @@ prefix_list_init_ipv6 (void) install_element (VIEW_NODE, &show_ipv6_prefix_list_detail_cmd); install_element (VIEW_NODE, &show_ipv6_prefix_list_detail_name_cmd); - install_element (ENABLE_NODE, &show_ipv6_prefix_list_cmd); - install_element (ENABLE_NODE, &show_ipv6_prefix_list_name_cmd); - install_element (ENABLE_NODE, &show_ipv6_prefix_list_name_seq_cmd); - install_element (ENABLE_NODE, &show_ipv6_prefix_list_prefix_cmd); - install_element (ENABLE_NODE, &show_ipv6_prefix_list_prefix_longer_cmd); - install_element (ENABLE_NODE, &show_ipv6_prefix_list_prefix_first_match_cmd); - install_element (ENABLE_NODE, &show_ipv6_prefix_list_summary_cmd); - install_element (ENABLE_NODE, &show_ipv6_prefix_list_summary_name_cmd); - install_element (ENABLE_NODE, &show_ipv6_prefix_list_detail_cmd); - install_element (ENABLE_NODE, &show_ipv6_prefix_list_detail_name_cmd); - install_element (ENABLE_NODE, &clear_ipv6_prefix_list_cmd); install_element (ENABLE_NODE, &clear_ipv6_prefix_list_name_cmd); install_element (ENABLE_NODE, &clear_ipv6_prefix_list_name_prefix_cmd); diff --git a/lib/prefix.c b/lib/prefix.c index 34bb1a493a..112dae5822 100644 --- a/lib/prefix.c +++ b/lib/prefix.c @@ -240,6 +240,8 @@ afi2str(afi_t afi) return "IPv6"; case AFI_ETHER: return "ethernet"; + case AFI_MAX: + return "bad-value"; default: break; } diff --git a/lib/privs.c b/lib/privs.c index 9228a56d35..6cf87c18d4 100644 --- a/lib/privs.c +++ b/lib/privs.c @@ -250,12 +250,6 @@ zprivs_caps_init (struct zebra_privs_t *zprivs) exit(1); } - if ( !zprivs_state.syscaps_p ) - { - fprintf (stderr, "privs_init: capabilities enabled, " - "but no capabilities supplied\n"); - } - /* we have caps, we have no need to ever change back the original user */ if (zprivs_state.zuid) { @@ -266,6 +260,9 @@ zprivs_caps_init (struct zebra_privs_t *zprivs) exit (1); } } + + if ( !zprivs_state.syscaps_p ) + return; if ( !(zprivs_state.caps = cap_init()) ) { diff --git a/lib/qobj.c b/lib/qobj.c new file mode 100644 index 0000000000..65b537f961 --- /dev/null +++ b/lib/qobj.c @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2015-16 David Lamparter, for NetDEF, Inc. + * + * This file is part of Quagga + * + * Quagga is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * Quagga is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Quagga; see the file COPYING. If not, write to the Free + * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ + +#include + +#include "thread.h" +#include "memory.h" +#include "hash.h" +#include "log.h" +#include "qobj.h" + +static struct hash *nodes = NULL; + +static unsigned int qobj_key (void *data) +{ + struct qobj_node *node = data; + return (unsigned int)node->nid; +} + +static int qobj_cmp (const void *a, const void *b) +{ + const struct qobj_node *na = a, *nb = b; + return na->nid == nb->nid; +} + +void qobj_reg(struct qobj_node *node, struct qobj_nodetype *type) +{ + node->type = type; + do + { + node->nid = (uint64_t)random(); + node->nid ^= (uint64_t)random() << 32; + } + while (hash_get (nodes, node, hash_alloc_intern) != node); +} + +void qobj_unreg(struct qobj_node *node) +{ + hash_release (nodes, node); +} + +struct qobj_node *qobj_get(uint64_t id) +{ + struct qobj_node dummy = { .nid = id }; + return hash_lookup (nodes, &dummy); +} + +void *qobj_get_typed(uint64_t id, struct qobj_nodetype *type) +{ + struct qobj_node *node = qobj_get(id); + if (!node || node->type != type) + return NULL; + return (char *)node - node->type->node_member_offset; +} + +void qobj_init (void) +{ + nodes = hash_create (qobj_key, qobj_cmp); +} + +void qobj_finish (void) +{ + hash_free (nodes); + nodes = NULL; +} diff --git a/lib/qobj.h b/lib/qobj.h new file mode 100644 index 0000000000..4a5c0c01ed --- /dev/null +++ b/lib/qobj.h @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2015-16 David Lamparter, for NetDEF, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _QOBJ_H +#define _QOBJ_H + +#include +#include +#include + +/* reserve a specific amount of bytes for a struct, which can grow up to + * that size (or be dummy'd out if not needed) + * + * note the padding's array size will be an error if it gets negative or zero; + * this is intentional to prevent the struct from growing beyond the allocated + * space. + */ +#define RESERVED_SPACE_STRUCT(name, fieldname, size) \ + struct { \ + struct name fieldname; \ + char padding ## fieldname[size - sizeof(struct name)]; \ + }; + +/* don't need struct definitions for these here. code actually using + * these needs to define the struct *before* including this header. + * HAVE_QOBJ_xxx should be defined to +1 in that case, like this: + * + * #if defined(HAVE_QOBJ_NODETYPE_CLI) && HAVE_QOBJ_NODETYPE_CLI < 0 + * #error include files are in wrong order + * #else + * #define HAVE_QOBJ_NODETYPE_CLI 1 + * struct qobj_nodetype_cli { ... } + * #endif + */ +#ifndef HAVE_QOBJ_NODETYPE_CLI +#define HAVE_QOBJ_NODETYPE_CLI -1 +struct qobj_nodetype_cli { int dummy; }; +#endif + +#ifndef HAVE_QOBJ_NODETYPE_CAPNP +#define HAVE_QOBJ_NODETYPE_CAPNP -1 +struct qobj_nodetype_capnp { int dummy; }; +#endif + +/* each different kind of object will have a global variable of this type, + * which can be used by various other pieces to store type-related bits. + * type equality can be tested as pointer equality. (cf. QOBJ_GET_TYPESAFE) + */ +struct qobj_nodetype { + ptrdiff_t node_member_offset; + RESERVED_SPACE_STRUCT(qobj_nodetype_cli, cli, 256) + RESERVED_SPACE_STRUCT(qobj_nodetype_capnp, capnp, 256) +}; + +/* anchor to be embedded somewhere in the object's struct */ +struct qobj_node { + uint64_t nid; + struct qobj_nodetype *type; +}; + +#define QOBJ_FIELDS \ + struct qobj_node qobj_node; + +/* call these at the end of any _create function (QOBJ_REG) + * and beginning of any _destroy function (QOBJ_UNREG) */ +#define QOBJ_REG(n, structname) \ + qobj_reg(&n->qobj_node, &qobj_t_ ## structname) +#define QOBJ_UNREG(n) \ + qobj_unreg(&n->qobj_node) + +/* internals - should not be directly used without a good reason*/ +void qobj_reg(struct qobj_node *node, struct qobj_nodetype *type); +void qobj_unreg(struct qobj_node *node); +struct qobj_node *qobj_get(uint64_t id); +void *qobj_get_typed(uint64_t id, struct qobj_nodetype *type); + +/* type declarations */ +#define DECLARE_QOBJ_TYPE(structname) \ + extern struct qobj_nodetype qobj_t_ ## structname; +#define DEFINE_QOBJ_TYPE(structname) \ + struct qobj_nodetype qobj_t_ ## structname = { \ + .node_member_offset = \ + (ptrdiff_t)offsetof(struct structname, qobj_node) \ + }; +#define DEFINE_QOBJ_TYPE_INIT(structname, ...) \ + struct qobj_nodetype qobj_t_ ## structname = { \ + .node_member_offset = \ + (ptrdiff_t)offsetof(struct structname, qobj_node), \ + __VA_ARGS__ \ + }; + +/* ID dereference with typecheck. + * will return NULL if id not found or wrong type. */ +#define QOBJ_GET_TYPESAFE(id, structname) \ + ((struct structname *)qobj_get_typed((id), &qobj_t_ ## structname)) + +#define QOBJ_ID(ptr) \ + ((ptr)->qobj_node.nid) + +void qobj_init(void); +void qobj_finish(void); + +#endif /* _QOBJ_H */ diff --git a/lib/route_types.txt b/lib/route_types.txt index 42f3b8f324..eed4d85036 100644 --- a/lib/route_types.txt +++ b/lib/route_types.txt @@ -60,6 +60,17 @@ ZEBRA_ROUTE_PIM, pim, pimd, 'P', 1, 0, "PIM" ZEBRA_ROUTE_HSLS, hsls, hslsd, 'H', 0, 0, "HSLS" ZEBRA_ROUTE_OLSR, olsr, olsrd, 'o', 0, 0, "OLSR" ZEBRA_ROUTE_TABLE, table, zebra, 'T', 1, 1, "Table" +ZEBRA_ROUTE_LDP, ldp, ldpd, 'L', 0, 0, "LDP" +#vnc when sent to zebra +ZEBRA_ROUTE_VNC, vnc, NULL, 'v', 1, 1, "VNC" +# vnc when sent to bgp +ZEBRA_ROUTE_VNC_DIRECT, vpn, NULL, 'V', 1, 1, "VPN" +# vnc when sent to bgp (remote next hop?) +ZEBRA_ROUTE_VNC_DIRECT_RH, vpn-rh, NULL, 'V', 0, 0, "VPN" +# bgp unicast -> vnc +ZEBRA_ROUTE_BGP_DIRECT, bgp-direct, NULL, 'b', 0, 0, "BGP-Direct" +# bgp unicast -> vnc +ZEBRA_ROUTE_BGP_DIRECT_EXT, bgp-direct-to-nve-groups, NULL, 'e', 0, 0, "BGP2VNC" ## help strings ZEBRA_ROUTE_SYSTEM, "Reserved route type, for internal use only" @@ -74,5 +85,8 @@ ZEBRA_ROUTE_ISIS, "Intermediate System to Intermediate System (IS-IS)" ZEBRA_ROUTE_BGP, "Border Gateway Protocol (BGP)" ZEBRA_ROUTE_PIM, "Protocol Independent Multicast (PIM)" ZEBRA_ROUTE_HSLS, "Hazy-Sighted Link State Protocol (HSLS)" +ZEBRA_ROUTE_VNC, "Virtual Network Control (VNC)" ZEBRA_ROUTE_OLSR, "Optimised Link State Routing (OLSR)" ZEBRA_ROUTE_TABLE, "Non-main Kernel Routing Table" +ZEBRA_ROUTE_LDP, "Label Distribution Protocol (LDP)" +ZEBRA_ROUTE_VNC_DIRECT, "VPN routes(VPN)" diff --git a/lib/routemap.c b/lib/routemap.c index a68b6210b3..fc2e6b7e52 100644 --- a/lib/routemap.c +++ b/lib/routemap.c @@ -38,6 +38,9 @@ DEFINE_MTYPE_STATIC(LIB, ROUTE_MAP_RULE_STR, "Route map rule str") DEFINE_MTYPE( LIB, ROUTE_MAP_COMPILED, "Route map compiled") DEFINE_MTYPE_STATIC(LIB, ROUTE_MAP_DEP, "Route map dependency") +DEFINE_QOBJ_TYPE(route_map_index) +DEFINE_QOBJ_TYPE(route_map) + /* Vector for route match rules. */ static vector route_match_vec; @@ -743,6 +746,7 @@ route_map_new (const char *name) new = XCALLOC (MTYPE_ROUTE_MAP, sizeof (struct route_map)); new->name = XSTRDUP (MTYPE_ROUTE_MAP_NAME, name); + QOBJ_REG (new, route_map); return new; } @@ -803,6 +807,8 @@ route_map_free_map (struct route_map *map) if (map != NULL) { + QOBJ_UNREG (map); + if (map->next) map->next->prev = map->prev; else @@ -1070,6 +1076,7 @@ route_map_index_new (void) new = XCALLOC (MTYPE_ROUTE_MAP_INDEX, sizeof (struct route_map_index)); new->exitpolicy = RMAP_EXIT; /* Default to Cisco-style */ + QOBJ_REG (new, route_map_index); return new; } @@ -1079,6 +1086,8 @@ route_map_index_delete (struct route_map_index *index, int notify) { struct route_map_rule *rule; + QOBJ_UNREG (index); + /* Free route match. */ while ((rule = index->match_list.head) != NULL) route_map_rule_delete (&index->match_list, rule); @@ -1929,7 +1938,7 @@ static void route_map_process_dependency (struct hash_backet *backet, void *data) { char *rmap_name; - route_map_event_t type = (route_map_event_t )data; + route_map_event_t type = (route_map_event_t)(ptrdiff_t)data; rmap_name = (char *)backet->data; @@ -1994,9 +2003,10 @@ DEFUN (match_interface, "Interface name\n") { int idx_word = 2; + VTY_DECLVAR_CONTEXT (route_map_index, index); if (rmap_match_set_hook.match_interface) - return rmap_match_set_hook.match_interface (vty, vty->index, "interface", argv[idx_word]->arg, RMAP_EVENT_MATCH_ADDED); + return rmap_match_set_hook.match_interface (vty, index, "interface", argv[idx_word]->arg, RMAP_EVENT_MATCH_ADDED); return CMD_SUCCESS; } @@ -2009,9 +2019,10 @@ DEFUN (no_match_interface, "Interface name\n") { char *iface = (argc == 4) ? argv[3]->arg : NULL; + VTY_DECLVAR_CONTEXT (route_map_index, index); if (rmap_match_set_hook.no_match_interface) - return rmap_match_set_hook.no_match_interface (vty, vty->index, "interface", iface, RMAP_EVENT_MATCH_DELETED); + return rmap_match_set_hook.no_match_interface (vty, index, "interface", iface, RMAP_EVENT_MATCH_DELETED); return CMD_SUCCESS; } @@ -2027,9 +2038,10 @@ DEFUN (match_ip_address, "IP Access-list name\n") { int idx_acl = 3; + VTY_DECLVAR_CONTEXT (route_map_index, index); if (rmap_match_set_hook.match_ip_address) - return rmap_match_set_hook.match_ip_address (vty, vty->index, "ip address", argv[idx_acl]->arg, + return rmap_match_set_hook.match_ip_address (vty, index, "ip address", argv[idx_acl]->arg, RMAP_EVENT_FILTER_ADDED); return CMD_SUCCESS; } @@ -2047,13 +2059,14 @@ DEFUN (no_match_ip_address, "IP Access-list name\n") { int idx_word = 4; + VTY_DECLVAR_CONTEXT (route_map_index, index); if (rmap_match_set_hook.no_match_ip_address) { if (argc <= idx_word) - return rmap_match_set_hook.no_match_ip_address (vty, vty->index, "ip address", NULL, + return rmap_match_set_hook.no_match_ip_address (vty, index, "ip address", NULL, RMAP_EVENT_FILTER_DELETED); - return rmap_match_set_hook.no_match_ip_address (vty, vty->index, "ip address", argv[idx_word]->arg, + return rmap_match_set_hook.no_match_ip_address (vty, index, "ip address", argv[idx_word]->arg, RMAP_EVENT_FILTER_DELETED); } return CMD_SUCCESS; @@ -2070,8 +2083,10 @@ DEFUN (match_ip_address_prefix_list, "IP prefix-list name\n") { int idx_word = 4; + VTY_DECLVAR_CONTEXT (route_map_index, index); + if (rmap_match_set_hook.match_ip_address_prefix_list) - return rmap_match_set_hook.match_ip_address_prefix_list (vty, vty->index, "ip address prefix-list", + return rmap_match_set_hook.match_ip_address_prefix_list (vty, index, "ip address prefix-list", argv[idx_word]->arg, RMAP_EVENT_PLIST_ADDED); return CMD_SUCCESS; } @@ -2088,13 +2103,14 @@ DEFUN (no_match_ip_address_prefix_list, "IP prefix-list name\n") { int idx_word = 5; + VTY_DECLVAR_CONTEXT (route_map_index, index); if (rmap_match_set_hook.no_match_ip_address_prefix_list) { if (argc <= idx_word) - return rmap_match_set_hook.no_match_ip_address_prefix_list (vty, vty->index, "ip address prefix-list", + return rmap_match_set_hook.no_match_ip_address_prefix_list (vty, index, "ip address prefix-list", NULL, RMAP_EVENT_PLIST_DELETED); - return rmap_match_set_hook.no_match_ip_address_prefix_list(vty, vty->index, "ip address prefix-list", + return rmap_match_set_hook.no_match_ip_address_prefix_list(vty, index, "ip address prefix-list", argv[idx_word]->arg, RMAP_EVENT_PLIST_DELETED); } return CMD_SUCCESS; @@ -2112,8 +2128,10 @@ DEFUN (match_ip_next_hop, "IP Access-list name\n") { int idx_acl = 3; + VTY_DECLVAR_CONTEXT (route_map_index, index); + if (rmap_match_set_hook.match_ip_next_hop) - return rmap_match_set_hook.match_ip_next_hop (vty, vty->index, "ip next-hop", argv[idx_acl]->arg, + return rmap_match_set_hook.match_ip_next_hop (vty, index, "ip next-hop", argv[idx_acl]->arg, RMAP_EVENT_FILTER_ADDED); return CMD_SUCCESS; } @@ -2130,14 +2148,15 @@ DEFUN (no_match_ip_next_hop, "IP access-list number (expanded range)\n" "IP Access-list name\n") { - int idx_word = 4; + int idx_word = 4; + VTY_DECLVAR_CONTEXT (route_map_index, index); if (rmap_match_set_hook.no_match_ip_next_hop) { if (argc <= idx_word) - return rmap_match_set_hook.no_match_ip_next_hop (vty, vty->index, "ip next-hop", NULL, + return rmap_match_set_hook.no_match_ip_next_hop (vty, index, "ip next-hop", NULL, RMAP_EVENT_FILTER_DELETED); - return rmap_match_set_hook.no_match_ip_next_hop (vty, vty->index, "ip next-hop", argv[idx_word]->arg, + return rmap_match_set_hook.no_match_ip_next_hop (vty, index, "ip next-hop", argv[idx_word]->arg, RMAP_EVENT_FILTER_DELETED); } return CMD_SUCCESS; @@ -2154,8 +2173,10 @@ DEFUN (match_ip_next_hop_prefix_list, "IP prefix-list name\n") { int idx_word = 4; + VTY_DECLVAR_CONTEXT (route_map_index, index); + if (rmap_match_set_hook.match_ip_next_hop_prefix_list) - return rmap_match_set_hook.match_ip_next_hop_prefix_list (vty, vty->index, "ip next-hop prefix-list", + return rmap_match_set_hook.match_ip_next_hop_prefix_list (vty, index, "ip next-hop prefix-list", argv[idx_word]->arg, RMAP_EVENT_PLIST_ADDED); return CMD_SUCCESS; } @@ -2171,13 +2192,14 @@ DEFUN (no_match_ip_next_hop_prefix_list, "IP prefix-list name\n") { int idx_word = 5; + VTY_DECLVAR_CONTEXT (route_map_index, index); if (rmap_match_set_hook.no_match_ip_next_hop) { if (argc <= idx_word) - return rmap_match_set_hook.no_match_ip_next_hop (vty, vty->index, "ip next-hop prefix-list", + return rmap_match_set_hook.no_match_ip_next_hop (vty, index, "ip next-hop prefix-list", NULL, RMAP_EVENT_PLIST_DELETED); - return rmap_match_set_hook.no_match_ip_next_hop (vty, vty->index, "ip next-hop prefix-list", + return rmap_match_set_hook.no_match_ip_next_hop (vty, index, "ip next-hop prefix-list", argv[idx_word]->arg, RMAP_EVENT_PLIST_DELETED); } return CMD_SUCCESS; @@ -2193,8 +2215,10 @@ DEFUN (match_ipv6_address, "IPv6 access-list name\n") { int idx_word = 3; + VTY_DECLVAR_CONTEXT (route_map_index, index); + if (rmap_match_set_hook.match_ipv6_address) - return rmap_match_set_hook.match_ipv6_address (vty, vty->index, "ipv6 address", argv[idx_word]->arg, + return rmap_match_set_hook.match_ipv6_address (vty, index, "ipv6 address", argv[idx_word]->arg, RMAP_EVENT_FILTER_ADDED); return CMD_SUCCESS; } @@ -2209,8 +2233,10 @@ DEFUN (no_match_ipv6_address, "IPv6 access-list name\n") { int idx_word = 4; + VTY_DECLVAR_CONTEXT (route_map_index, index); + if (rmap_match_set_hook.no_match_ipv6_address) - return rmap_match_set_hook.no_match_ipv6_address (vty, vty->index, "ipv6 address", argv[idx_word]->arg, + return rmap_match_set_hook.no_match_ipv6_address (vty, index, "ipv6 address", argv[idx_word]->arg, RMAP_EVENT_FILTER_DELETED); return CMD_SUCCESS; } @@ -2226,8 +2252,10 @@ DEFUN (match_ipv6_address_prefix_list, "IP prefix-list name\n") { int idx_word = 4; + VTY_DECLVAR_CONTEXT (route_map_index, index); + if (rmap_match_set_hook.match_ipv6_address_prefix_list) - return rmap_match_set_hook.match_ipv6_address_prefix_list (vty, vty->index, "ipv6 address prefix-list", + return rmap_match_set_hook.match_ipv6_address_prefix_list (vty, index, "ipv6 address prefix-list", argv[idx_word]->arg, RMAP_EVENT_PLIST_ADDED); return CMD_SUCCESS; } @@ -2243,8 +2271,10 @@ DEFUN (no_match_ipv6_address_prefix_list, "IP prefix-list name\n") { int idx_word = 5; + VTY_DECLVAR_CONTEXT (route_map_index, index); + if (rmap_match_set_hook.no_match_ipv6_address_prefix_list) - return rmap_match_set_hook.no_match_ipv6_address_prefix_list(vty, vty->index, "ipv6 address prefix-list", + return rmap_match_set_hook.no_match_ipv6_address_prefix_list(vty, index, "ipv6 address prefix-list", argv[idx_word]->arg, RMAP_EVENT_PLIST_DELETED); return CMD_SUCCESS; } @@ -2258,8 +2288,10 @@ DEFUN (match_metric, "Metric value\n") { int idx_number = 2; + VTY_DECLVAR_CONTEXT (route_map_index, index); + if (rmap_match_set_hook.match_metric) - return rmap_match_set_hook.match_metric(vty, vty->index, "metric", argv[idx_number]->arg, + return rmap_match_set_hook.match_metric(vty, index, "metric", argv[idx_number]->arg, RMAP_EVENT_MATCH_ADDED); return CMD_SUCCESS; } @@ -2274,12 +2306,14 @@ DEFUN (no_match_metric, "Metric value\n") { int idx_number = 3; + VTY_DECLVAR_CONTEXT (route_map_index, index); + if (rmap_match_set_hook.no_match_metric) { if (argc <= idx_number) - return rmap_match_set_hook.no_match_metric (vty, vty->index, "metric", + return rmap_match_set_hook.no_match_metric (vty, index, "metric", NULL, RMAP_EVENT_MATCH_DELETED); - return rmap_match_set_hook.no_match_metric(vty, vty->index, "metric", + return rmap_match_set_hook.no_match_metric(vty, index, "metric", argv[idx_number]->arg, RMAP_EVENT_MATCH_DELETED); } @@ -2289,14 +2323,16 @@ DEFUN (no_match_metric, DEFUN (match_tag, match_tag_cmd, - "match tag (1-65535)", + "match tag (1-4294967295)", MATCH_STR "Match tag of route\n" "Tag value\n") { int idx_number = 2; + VTY_DECLVAR_CONTEXT (route_map_index, index); + if (rmap_match_set_hook.match_tag) - return rmap_match_set_hook.match_tag(vty, vty->index, "tag", argv[idx_number]->arg, + return rmap_match_set_hook.match_tag(vty, index, "tag", argv[idx_number]->arg, RMAP_EVENT_MATCH_ADDED); return CMD_SUCCESS; } @@ -2304,14 +2340,16 @@ DEFUN (match_tag, DEFUN (no_match_tag, no_match_tag_cmd, - "no match tag [(1-65535)]", + "no match tag [(1-4294967295)]", NO_STR MATCH_STR "Match tag of route\n" "Tag value\n") { + VTY_DECLVAR_CONTEXT (route_map_index, index); + if (rmap_match_set_hook.no_match_tag) - return rmap_match_set_hook.no_match_tag (vty, vty->index, "tag", argv[3]->arg, + return rmap_match_set_hook.no_match_tag (vty, index, "tag", argv[3]->arg, RMAP_EVENT_MATCH_DELETED); return CMD_SUCCESS; } @@ -2328,6 +2366,7 @@ DEFUN (set_ip_nexthop, int idx_ipv4 = 3; union sockunion su; int ret; + VTY_DECLVAR_CONTEXT (route_map_index, index); ret = str2sockunion (argv[idx_ipv4]->arg, &su); if (ret < 0) @@ -2344,7 +2383,7 @@ DEFUN (set_ip_nexthop, } if (rmap_match_set_hook.set_ip_nexthop) - return rmap_match_set_hook.set_ip_nexthop(vty, vty->index, "ip next-hop", argv[idx_ipv4]->arg); + return rmap_match_set_hook.set_ip_nexthop(vty, index, "ip next-hop", argv[idx_ipv4]->arg); return CMD_SUCCESS; } @@ -2359,12 +2398,13 @@ DEFUN (no_set_ip_nexthop, "IP address of next hop\n") { int idx_peer = 4; + VTY_DECLVAR_CONTEXT (route_map_index, index); if (rmap_match_set_hook.no_set_ip_nexthop) { if (argc <= idx_peer) - return rmap_match_set_hook.no_set_ip_nexthop (vty, vty->index, "ip next-hop", NULL); - return rmap_match_set_hook.no_set_ip_nexthop (vty, vty->index, "ip next-hop", argv[idx_peer]->arg); + return rmap_match_set_hook.no_set_ip_nexthop (vty, index, "ip next-hop", NULL); + return rmap_match_set_hook.no_set_ip_nexthop (vty, index, "ip next-hop", argv[idx_peer]->arg); } return CMD_SUCCESS; } @@ -2382,6 +2422,7 @@ DEFUN (set_ipv6_nexthop_local, int idx_ipv6 = 4; struct in6_addr addr; int ret; + VTY_DECLVAR_CONTEXT (route_map_index, index); ret = inet_pton (AF_INET6, argv[idx_ipv6]->arg, &addr); if (!ret) @@ -2396,7 +2437,7 @@ DEFUN (set_ipv6_nexthop_local, } if (rmap_match_set_hook.set_ipv6_nexthop_local) - return rmap_match_set_hook.set_ipv6_nexthop_local (vty, vty->index, "ipv6 next-hop local", argv[idx_ipv6]->arg); + return rmap_match_set_hook.set_ipv6_nexthop_local (vty, index, "ipv6 next-hop local", argv[idx_ipv6]->arg); return CMD_SUCCESS; } @@ -2412,11 +2453,13 @@ DEFUN (no_set_ipv6_nexthop_local, "IPv6 address of next hop\n") { int idx_ipv6 = 5; + VTY_DECLVAR_CONTEXT (route_map_index, index); + if (rmap_match_set_hook.no_set_ipv6_nexthop_local) { if (argc <= idx_ipv6) - return rmap_match_set_hook.no_set_ipv6_nexthop_local (vty, vty->index, "ipv6 next-hop local", NULL); - return rmap_match_set_hook.no_set_ipv6_nexthop_local (vty, vty->index, "ipv6 next-hop local", argv[5]->arg); + return rmap_match_set_hook.no_set_ipv6_nexthop_local (vty, index, "ipv6 next-hop local", NULL); + return rmap_match_set_hook.no_set_ipv6_nexthop_local (vty, index, "ipv6 next-hop local", argv[5]->arg); } return CMD_SUCCESS; } @@ -2434,8 +2477,10 @@ DEFUN (set_metric, "Subtract metric\n") { int idx_number = 2; + VTY_DECLVAR_CONTEXT (route_map_index, index); + if (rmap_match_set_hook.set_metric) - return rmap_match_set_hook.set_metric (vty, vty->index, "metric", argv[idx_number]->arg); + return rmap_match_set_hook.set_metric (vty, index, "metric", argv[idx_number]->arg); return CMD_SUCCESS; } @@ -2449,11 +2494,13 @@ DEFUN (no_set_metric, "Metric value\n") { int idx_number = 3; + VTY_DECLVAR_CONTEXT (route_map_index, index); + if (rmap_match_set_hook.no_set_metric) { if (argc <= idx_number) - return rmap_match_set_hook.no_set_metric (vty, vty->index, "metric", NULL); - return rmap_match_set_hook.no_set_metric (vty, vty->index, "metric", argv[idx_number]->arg); + return rmap_match_set_hook.no_set_metric (vty, index, "metric", NULL); + return rmap_match_set_hook.no_set_metric (vty, index, "metric", argv[idx_number]->arg); } return CMD_SUCCESS; } @@ -2461,32 +2508,36 @@ DEFUN (no_set_metric, DEFUN (set_tag, set_tag_cmd, - "set tag (1-65535)", + "set tag (1-4294967295)", SET_STR "Tag value for routing protocol\n" "Tag value\n") { + VTY_DECLVAR_CONTEXT (route_map_index, index); + int idx_number = 2; if (rmap_match_set_hook.set_tag) - return rmap_match_set_hook.set_tag (vty, vty->index, "tag", argv[idx_number]->arg); + return rmap_match_set_hook.set_tag (vty, index, "tag", argv[idx_number]->arg); return CMD_SUCCESS; } DEFUN (no_set_tag, no_set_tag_cmd, - "no set tag [(1-65535)]", + "no set tag [(1-4294967295)]", NO_STR SET_STR "Tag value for routing protocol\n" "Tag value\n") { + VTY_DECLVAR_CONTEXT (route_map_index, index); + int idx_number = 3; if (rmap_match_set_hook.no_set_tag) { if (argc <= idx_number) - return rmap_match_set_hook.no_set_tag (vty, vty->index, "tag", NULL); - return rmap_match_set_hook.no_set_tag (vty, vty->index, "tag", argv[idx_number]->arg); + return rmap_match_set_hook.no_set_tag (vty, index, "tag", NULL); + return rmap_match_set_hook.no_set_tag (vty, index, "tag", argv[idx_number]->arg); } return CMD_SUCCESS; } @@ -2516,8 +2567,7 @@ DEFUN (route_map, map = route_map_get (mapname); index = route_map_index_get (map, permit, pref); - vty->index = index; - vty->node = RMAP_NODE; + VTY_PUSH_CONTEXT_COMPAT (RMAP_NODE, index); return CMD_SUCCESS; } @@ -2598,9 +2648,7 @@ DEFUN (rmap_onmatch_next, "Exit policy on matches\n" "Next clause\n") { - struct route_map_index *index; - - index = vty->index; + struct route_map_index *index = VTY_GET_CONTEXT (route_map_index); if (index) { @@ -2623,9 +2671,7 @@ DEFUN (no_rmap_onmatch_next, "Exit policy on matches\n" "Next clause\n") { - struct route_map_index *index; - - index = vty->index; + struct route_map_index *index = VTY_GET_CONTEXT (route_map_index); if (index) index->exitpolicy = RMAP_EXIT; @@ -2643,8 +2689,9 @@ DEFUN (rmap_onmatch_goto, int idx_number = 2; char *num = NULL; num = argv[idx_number]->arg; - - struct route_map_index *index = vty->index; + + + struct route_map_index *index = VTY_GET_CONTEXT (route_map_index); int d = 0; if (index) @@ -2684,9 +2731,7 @@ DEFUN (no_rmap_onmatch_goto, "Exit policy on matches\n" "Goto Clause number\n") { - struct route_map_index *index; - - index = vty->index; + struct route_map_index *index = VTY_GET_CONTEXT (route_map_index); if (index) index->exitpolicy = RMAP_EXIT; @@ -2736,10 +2781,9 @@ DEFUN (rmap_call, "Target route-map name\n") { int idx_word = 1; - struct route_map_index *index; + struct route_map_index *index = VTY_GET_CONTEXT (route_map_index); const char *rmap = argv[idx_word]->arg; - index = vty->index; if (index) { if (index->nextrm) @@ -2765,9 +2809,7 @@ DEFUN (no_rmap_call, NO_STR "Jump to another Route-Map after match+set\n") { - struct route_map_index *index; - - index = vty->index; + struct route_map_index *index = VTY_GET_CONTEXT (route_map_index); if (index->nextrm) { @@ -2788,9 +2830,8 @@ DEFUN (rmap_description, "Comment describing this route-map rule\n") { int idx_line = 1; - struct route_map_index *index; + struct route_map_index *index = VTY_GET_CONTEXT (route_map_index); - index = vty->index; if (index) { if (index->description) @@ -2806,9 +2847,8 @@ DEFUN (no_rmap_description, NO_STR "Route-map comment\n") { - struct route_map_index *index; + struct route_map_index *index = VTY_GET_CONTEXT (route_map_index); - index = vty->index; if (index) { if (index->description) @@ -2883,6 +2923,32 @@ route_map_init_dep_hashes (void) route_map_dep_hash_cmp); } +/* Common route map rules */ + +void * +route_map_rule_tag_compile (const char *arg) +{ + unsigned long int tmp; + char *endptr; + route_tag_t *tag; + + errno = 0; + tmp = strtoul(arg, &endptr, 0); + if (arg[0] == '\0' || *endptr != '\0' || errno || tmp > ROUTE_TAG_MAX) + return NULL; + + tag = XMALLOC(MTYPE_ROUTE_MAP_COMPILED, sizeof(*tag)); + *tag = tmp; + + return tag; +} + +void +route_map_rule_tag_free (void *rule) +{ + XFREE (MTYPE_ROUTE_MAP_COMPILED, rule); +} + /* Initialization of route map vector. */ void route_map_init_vty (void) diff --git a/lib/routemap.h b/lib/routemap.h index a6d3123335..86d72ce474 100644 --- a/lib/routemap.h +++ b/lib/routemap.h @@ -24,6 +24,7 @@ #include "prefix.h" #include "memory.h" +#include "qobj.h" DECLARE_MTYPE(ROUTE_MAP_NAME) DECLARE_MTYPE(ROUTE_MAP_RULE) DECLARE_MTYPE(ROUTE_MAP_COMPILED) @@ -152,7 +153,10 @@ struct route_map_index /* Make linked list. */ struct route_map_index *next; struct route_map_index *prev; + + QOBJ_FIELDS }; +DECLARE_QOBJ_TYPE(route_map_index) /* Route map list structure. */ struct route_map @@ -171,7 +175,10 @@ struct route_map /* Maintain update info */ int to_be_processed; /* True if modification isn't acted on yet */ int deleted; /* If 1, then this node will be deleted */ + + QOBJ_FIELDS }; +DECLARE_QOBJ_TYPE(route_map) /* Prototypes. */ extern void route_map_init (void); @@ -393,4 +400,8 @@ extern void route_map_no_set_tag_hook (int (*func) (struct vty *vty, struct route_map_index *index, const char *command, const char *arg)); + +extern void *route_map_rule_tag_compile (const char *arg); +extern void route_map_rule_tag_free (void *rule); + #endif /* _ZEBRA_ROUTEMAP_H */ diff --git a/lib/skiplist.c b/lib/skiplist.c new file mode 100644 index 0000000000..2a90b2c7c6 --- /dev/null +++ b/lib/skiplist.c @@ -0,0 +1,685 @@ +/* + * Copyright 1990 William Pugh + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * Permission to include in quagga provide on March 31, 2016 + */ + +/* + * Skip List impementation based on code from William Pugh. + * ftp://ftp.cs.umd.edu/pub/skipLists/ + * + * Skip Lists are a probabilistic alternative to balanced trees, as + * described in the June 1990 issue of CACM and were invented by + * William Pugh in 1987. + * + * This file contains source code to implement a dictionary using + * skip lists and a test driver to test the routines. + * + * A couple of comments about this implementation: + * The routine randomLevel has been hard-coded to generate random + * levels using p=0.25. It can be easily changed. + * + * The insertion routine has been implemented so as to use the + * dirty hack described in the CACM paper: if a random level is + * generated that is more than the current maximum level, the + * current maximum level plus one is used instead. + * + * Levels start at zero and go up to MaxLevel (which is equal to + * (MaxNumberOfLevels-1). + * + * The run-time flag SKIPLIST_FLAG_ALLOW_DUPLICATES determines whether or + * not duplicates are allowed for a given list. If set, duplicates are + * allowed and act in a FIFO manner. If not set, an insertion of a value + * already in the list updates the previously existing binding. + * + * BitsInRandom is defined to be the number of bits returned by a call to + * random(). For most all machines with 32-bit integers, this is 31 bits + * as currently set. + */ + + +#include + +#include "memory.h" +#include "log.h" +#include "vty.h" +#include "skiplist.h" + +DEFINE_MTYPE_STATIC(LIB, SKIP_LIST, "Skip List") +DEFINE_MTYPE_STATIC(LIB, SKIP_LIST_NODE, "Skip Node") + +#define BitsInRandom 31 + +#define MaxNumberOfLevels 16 +#define MaxLevel (MaxNumberOfLevels-1) +#define newNodeOfLevel(l) XCALLOC(MTYPE_SKIP_LIST_NODE, sizeof(struct skiplistnode)+(l)*sizeof(struct skiplistnode *)) + +static int randomsLeft; +static int randomBits; +static struct skiplist *skiplist_last_created; /* debugging hack */ + +#if 1 +#define CHECKLAST(sl) do {\ + if ((sl)->header->forward[0] && !(sl)->last) assert(0); \ + if (!(sl)->header->forward[0] && (sl)->last) assert(0); \ +} while (0) +#else +#define CHECKLAST(sl) +#endif + + +static int +randomLevel() +{ + register int level = 0; + register int b; + + do { + if (randomsLeft <= 0) { + randomBits = random(); + randomsLeft = BitsInRandom/2; + } + b = randomBits&3; + randomBits>>=2; + --randomsLeft; + + if (!b) { + level++; + if (level >= MaxLevel) + return MaxLevel; + } + } while (!b); + + return level; +} + +static int +default_cmp(void *key1, void *key2) +{ + if (key1 < key2) + return -1; + if (key1 > key2) + return 1; + return 0; +} + +unsigned int +skiplist_count(struct skiplist *l) +{ + return l->count; +} + +struct skiplist * +skiplist_new( + int flags, + int (*cmp) (void *key1, void *key2), + void (*del) (void *val)) +{ + struct skiplist *new; + + new = XCALLOC (MTYPE_SKIP_LIST, sizeof (struct skiplist)); + assert(new); + + new->level = 0; + new->count = 0; + new->header = newNodeOfLevel(MaxNumberOfLevels); + new->stats = newNodeOfLevel(MaxNumberOfLevels); + + new->flags = flags; + if (cmp) + new->cmp = cmp; + else + new->cmp = default_cmp; + + if (del) + new->del = del; + + skiplist_last_created = new; /* debug */ + + return new; +} + +void +skiplist_free(struct skiplist *l) +{ + register struct skiplistnode *p, *q; + + p = l->header; + + do { + q = p->forward[0]; + if (l->del && p != l->header) + (*l->del)(p->value); + XFREE(MTYPE_SKIP_LIST_NODE, p); + p = q; + } while (p); + + XFREE(MTYPE_SKIP_LIST_NODE, l->stats); + XFREE(MTYPE_SKIP_LIST, l); +} + + +int +skiplist_insert( + register struct skiplist *l, + register void *key, + register void *value) +{ + register int k; + struct skiplistnode *update[MaxNumberOfLevels]; + register struct skiplistnode *p, *q; + + CHECKLAST(l); + +/* DEBUG */ + if (!key) { + zlog_err("%s: key is 0, value is %p", __func__, value); + } + + p = l->header; + k = l->level; + do { + while (q = p->forward[k], q && (*l->cmp)(q->key, key) < 0) p = q; + update[k] = p; + } while (--k >= 0); + + if (!(l->flags & SKIPLIST_FLAG_ALLOW_DUPLICATES) && + q && ((*l->cmp)(q->key, key) == 0)) { + + return -1; + } + + k = randomLevel(); + if (k>l->level) { + k = ++l->level; + update[k] = l->header; + } + + q = newNodeOfLevel(k); + q->key = key; + q->value = value; +#if SKIPLIST_0TIMER_DEBUG + q->flags = SKIPLIST_NODE_FLAG_INSERTED; /* debug */ +#endif + + ++(l->stats->forward[k]); +#if SKIPLIST_DEBUG + zlog_debug("%s: incremented stats @%p:%d, now %ld", __func__, l, k, + l->stats->forward[k] - (struct skiplistnode *)NULL); +#endif + + do { + p = update[k]; + q->forward[k] = p->forward[k]; + p->forward[k] = q; + } while(--k>=0); + + /* + * If this is the last item in the list, update the "last" pointer + */ + if (!q->forward[0]) { + l->last = q; + } + + ++(l->count); + + CHECKLAST(l); + + return 0; +} + +int +skiplist_delete( + register struct skiplist *l, + register void *key, + register void *value) /* used only if duplicates allowed */ +{ + register int k,m; + struct skiplistnode *update[MaxNumberOfLevels]; + register struct skiplistnode *p, *q; + + CHECKLAST(l); + + /* to make debugging easier */ + for (k = 0; k < MaxNumberOfLevels; ++k) + update[k] = NULL; + + p = l->header; + k = m = l->level; + do { + while (q = p->forward[k], q && (*l->cmp)(q->key, key) < 0) p = q; + update[k] = p; + } while(--k>=0); + + if (l->flags & SKIPLIST_FLAG_ALLOW_DUPLICATES) { + while (q && ((*l->cmp)(q->key, key) == 0) && (q->value != value)) { + int i; + for (i = 0; i <= l->level; ++i) { + if (update[i]->forward[i] == q) + update[i] = q; + } + q = q->forward[0]; + } + } + + if (q && (*l->cmp)(q->key, key) == 0) { + if (!(l->flags & SKIPLIST_FLAG_ALLOW_DUPLICATES) || + (q->value == value)) { + + /* + * found node to delete + */ +#if SKIPLIST_0TIMER_DEBUG + q->flags &= ~SKIPLIST_NODE_FLAG_INSERTED; +#endif + /* + * If we are deleting the last element of the list, + * update the list's "last" pointer. + */ + if (l->last == q) { + if (update[0] == l->header) + l->last = NULL; + else + l->last = update[0]; + } + + for(k=0; k<=m && (p=update[k])->forward[k] == q; k++) { + p->forward[k] = q->forward[k]; + } + --(l->stats->forward[k-1]); +#if SKIPLIST_DEBUG + zlog_debug("%s: decremented stats @%p:%d, now %ld", + __func__, l, k-1, + l->stats->forward[k-1] - (struct skiplistnode *)NULL); +#endif + if (l->del) + (*l->del)(q->value); + XFREE(MTYPE_SKIP_LIST_NODE, q); + while( l->header->forward[m] == NULL && m > 0 ) + m--; + l->level = m; + CHECKLAST(l); + --(l->count); + return 0; + } + } + + CHECKLAST(l); + return -1; +} + +/* + * Obtain first value matching "key". Unless SKIPLIST_FLAG_ALLOW_DUPLICATES + * is set, this will also be the only value matching "key". + * + * Also set a cursor for use with skiplist_next_value. + */ +int +skiplist_first_value( + register struct skiplist *l, /* in */ + register void *key, /* in */ + void **valuePointer, /* out */ + void **cursor) /* out */ +{ + register int k; + register struct skiplistnode *p, *q; + + p = l->header; + k = l->level; + + do { + while (q = p->forward[k], q && (*l->cmp)(q->key, key) < 0) + p = q; + + } while (--k >= 0); + + if (!q || (*l->cmp)(q->key, key)) + return -1; + + if (valuePointer) + *valuePointer = q->value; + + if (cursor) + *cursor = q; + + return 0; +} + +int +skiplist_search( + register struct skiplist *l, + register void *key, + void **valuePointer) +{ + return skiplist_first_value(l, key, valuePointer, NULL); +} + + +/* + * Caller supplies key and value of an existing item in the list. + * Function returns the value of the next list item that has the + * same key (useful when SKIPLIST_FLAG_ALLOW_DUPLICATES is set). + * + * Returns 0 on success. If the caller-supplied key and value + * do not correspond to a list element, or if they specify the + * last element with the given key, -1 is returned. + */ +int +skiplist_next_value( + register struct skiplist *l, /* in */ + register void *key, /* in */ + void **valuePointer, /* in/out */ + void **cursor) /* in/out */ +{ + register int k,m; + register struct skiplistnode *p, *q; + + CHECKLAST(l); + + if (!(l->flags & SKIPLIST_FLAG_ALLOW_DUPLICATES)) { + return -1; + } + + if (!cursor || !*cursor) { + p = l->header; + k = m = l->level; + + /* + * Find matching key + */ + do { + while (q = p->forward[k], q && (*l->cmp)(q->key, key) < 0) + p = q; + } while(--k>=0); + + /* + * Find matching value + */ + while (q && ((*l->cmp)(q->key, key) == 0) && (q->value != *valuePointer)) { + q = q->forward[0]; + } + + if (!q || ((*l->cmp)(q->key, key) != 0) || (q->value != *valuePointer)) { + /* + * No matching value + */ + CHECKLAST(l); + return -1; + } + } else { + q = (struct skiplistnode *)*cursor; + } + + /* + * Advance cursor + */ + q = q->forward[0]; + + /* + * If we reached end-of-list or if the key is no longer the same, + * then return error + */ + if (!q || ((*l->cmp)(q->key, key) != 0)) + return -1; + + *valuePointer = q->value; + *cursor = q; + CHECKLAST(l); + return 0; +} + +int +skiplist_first( + register struct skiplist *l, + void **keyPointer, + void **valuePointer) +{ + register struct skiplistnode *p; + + CHECKLAST(l); + p = l->header->forward[0]; + if (!p) + return -1; + + if (keyPointer) + *keyPointer = p->key; + + if (valuePointer) + *valuePointer = p->value; + + CHECKLAST(l); + + return 0; +} + +int +skiplist_last( + register struct skiplist *l, + void **keyPointer, + void **valuePointer) +{ + CHECKLAST(l); + if (l->last) { + if (keyPointer) + *keyPointer = l->last->key; + if (valuePointer) + *valuePointer = l->last->value; + return 0; + } + return -1; +} + +/* + * true = empty + */ +int +skiplist_empty( + register struct skiplist *l) +{ + CHECKLAST(l); + if (l->last) + return 0; + return 1; +} + +/* + * Use this to walk the list. Caller sets *cursor to NULL to obtain + * first element. Return value of 0 indicates valid cursor/element + * returned, otherwise NULL cursor arg or EOL. + */ +int +skiplist_next( + register struct skiplist *l, /* in */ + void **keyPointer, /* out */ + void **valuePointer, /* out */ + void **cursor) /* in/out */ +{ + struct skiplistnode *p; + + if (!cursor) + return -1; + + CHECKLAST(l); + + if (!*cursor) { + p = l->header->forward[0]; + } else { + p = *cursor; + p = p->forward[0]; + } + *cursor = p; + + if (!p) + return -1; + + if (keyPointer) + *keyPointer = p->key; + + if (valuePointer) + *valuePointer = p->value; + + CHECKLAST(l); + + return 0; +} + +int +skiplist_delete_first( + register struct skiplist *l) +{ + register int k; + register struct skiplistnode *p, *q; + int nodelevel = 0; + + CHECKLAST(l); + + p = l->header; + q = l->header->forward[0]; + + if (!q) + return -1; + + for (k = l->level; k >= 0; --k) { + if (p->forward[k] == q) { + p->forward[k] = q->forward[k]; + if ((k == l->level) && (p->forward[k] == NULL) && (l->level > 0)) + --(l->level); + if (!nodelevel) + nodelevel = k; + } + } + +#if SKIPLIST_0TIMER_DEBUG + q->flags &= ~SKIPLIST_NODE_FLAG_INSERTED; +#endif + /* + * If we are deleting the last element of the list, + * update the list's "last" pointer. + */ + if (l->last == q) { + l->last = NULL; + } + + --(l->stats->forward[nodelevel]); +#if SKIPLIST_DEBUG + zlog_debug("%s: decremented stats @%p:%d, now %ld", __func__, l, nodelevel, + l->stats->forward[nodelevel] - (struct skiplistnode *)NULL); +#endif + + if (l->del) + (*l->del)(q->value); + + XFREE(MTYPE_SKIP_LIST_NODE, q); + + CHECKLAST(l); + + --(l->count); + + return 0; +} + +void +skiplist_debug(struct vty *vty, struct skiplist *l) +{ + int i; + + if (!l) + l = skiplist_last_created; + vty_out(vty, "Skiplist %p has max level %d%s", l, l->level, VTY_NEWLINE); + for (i = l->level; i >= 0; --i) + vty_out(vty, " @%d: %ld%s", + i, (long)((l->stats->forward[i]) - (struct skiplistnode *)NULL), + VTY_NEWLINE); +} + +static void * +scramble(int i) +{ + uintptr_t result; + + result = (i & 0xff) << 24; + result |= (i >> 8); + + return (void *)result; +} + +#define sampleSize 65536 +void +skiplist_test(struct vty *vty) { + struct skiplist *l; + register int i,k; + void *keys[sampleSize]; + void *v; + + zlog_debug("%s: entry", __func__); + + l= skiplist_new(SKIPLIST_FLAG_ALLOW_DUPLICATES, NULL, NULL); + + zlog_debug("%s: skiplist_new returned %p", __func__, l); + + for (i=0; i < 4; i++) { + + for (k=0; k < sampleSize; k++) { + if (!(k%1000)) { + zlog_debug("%s: (%d:%d)", __func__, i, k); + } + //keys[k] = (void *)random(); + keys[k] = (void *)scramble(k); + if (skiplist_insert(l, keys[k], keys[k])) + zlog_debug("error in insert #%d,#%d",i,k); + } + + zlog_debug("%s: inserts done", __func__); + + for (k=0; k < sampleSize; k++) { + + if (!(k % 1000)) + zlog_debug("[%d:%d]", i, k); + if (skiplist_search(l, keys[k], &v)) + zlog_debug("error in search #%d,#%d",i,k); + + if (v != keys[k]) + zlog_debug("search returned wrong value"); + } + + + + for (k=0; k < sampleSize; k++) { + + if (!(k % 1000)) + zlog_debug("<%d:%d>", i, k); + if (skiplist_delete(l, keys[k], keys[k])) + zlog_debug("error in delete"); + keys[k] = (void *)scramble(k ^ 0xf0f0f0f0); + if (skiplist_insert(l, keys[k], keys[k])) + zlog_debug("error in insert #%d,#%d",i,k); + } + + for (k=0; k < sampleSize; k++) { + + if (!(k % 1000)) + zlog_debug("{%d:%d}", i, k); + if (skiplist_delete_first(l)) + zlog_debug("error in delete_first"); + } + } + + skiplist_free(l); +} + diff --git a/lib/skiplist.h b/lib/skiplist.h new file mode 100644 index 0000000000..25775f7543 --- /dev/null +++ b/lib/skiplist.h @@ -0,0 +1,159 @@ +/* + * Copyright 1990 William Pugh + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * Permission to include in quagga provide on March 31, 2016 + */ + +/* + * Skip List impementation based on code from William Pugh. + * ftp://ftp.cs.umd.edu/pub/skipLists/ + */ + +/* skiplist.h */ + + +#ifndef _ZEBRA_SKIPLIST_H +#define _ZEBRA_SKIPLIST_H + +#define SKIPLIST_0TIMER_DEBUG 1 + +/* + * skiplistnodes must always contain data to be valid. Adding an + * empty node to a list is invalid + */ +struct skiplistnode +{ + void *key; + void *value; +#if SKIPLIST_0TIMER_DEBUG + int flags; +#define SKIPLIST_NODE_FLAG_INSERTED 0x00000001 +#endif + + struct skiplistnode *forward[1]; /* variable sized */ +}; + +struct skiplist +{ + int flags; + +#define SKIPLIST_FLAG_ALLOW_DUPLICATES 0x00000001 + + int level; /* max lvl (1 + current # of levels in list) */ + unsigned int count; + struct skiplistnode *header; + struct skiplistnode *stats; + struct skiplistnode *last; /* last real list item (NULL if empty list) */ + + /* + * Returns -1 if val1 < val2, 0 if equal?, 1 if val1 > val2. + * Used as definition of sorted for listnode_add_sort + */ + int (*cmp) (void *val1, void *val2); + + /* callback to free user-owned data when listnode is deleted. supplying + * this callback is very much encouraged! + */ + void (*del) (void *val); +}; + + +/* Prototypes. */ +extern struct skiplist * +skiplist_new( /* encouraged: set list.del callback on new lists */ + int flags, + int (*cmp) (void *key1, void *key2), /* NULL => default cmp */ + void (*del) (void *val)); /* NULL => no auto val free */ + +extern void +skiplist_free (struct skiplist *); + +extern int +skiplist_insert( + register struct skiplist *l, + register void *key, + register void *value); + +extern int +skiplist_delete( + register struct skiplist *l, + register void *key, + register void *value); + +extern int +skiplist_search( + register struct skiplist *l, + register void *key, + void **valuePointer); + +extern int +skiplist_first_value( + register struct skiplist *l, /* in */ + register void *key, /* in */ + void **valuePointer, /* in/out */ + void **cursor); /* out */ + +extern int +skiplist_next_value( + register struct skiplist *l, /* in */ + register void *key, /* in */ + void **valuePointer, /* in/out */ + void **cursor); /* in/out */ + +extern int +skiplist_first( + register struct skiplist *l, + void **keyPointer, + void **valuePointer); + +extern int +skiplist_last( + register struct skiplist *l, + void **keyPointer, + void **valuePointer); + +extern int +skiplist_delete_first( + register struct skiplist *l); + +extern int +skiplist_next( + register struct skiplist *l, /* in */ + void **keyPointer, /* out */ + void **valuePointer, /* out */ + void **cursor); /* in/out */ + +extern int +skiplist_empty( + register struct skiplist *l); /* in */ + +extern unsigned int +skiplist_count( + register struct skiplist *l); /* in */ + +extern void +skiplist_debug( + struct vty *vty, + struct skiplist *l); + +extern void +skiplist_test( + struct vty *vty); + +#endif /* _ZEBRA_SKIPLIST_H */ diff --git a/lib/sockopt.c b/lib/sockopt.c index 31b2edbacf..c480cee0d7 100644 --- a/lib/sockopt.c +++ b/lib/sockopt.c @@ -20,6 +20,11 @@ */ #include + +#ifdef SUNOS_5 +#include +#endif + #include "log.h" #include "sockopt.h" #include "sockunion.h" @@ -346,6 +351,35 @@ setsockopt_ipv4_multicast_if(int sock, struct in_addr if_addr, #endif return setsockopt (sock, IPPROTO_IP, IP_MULTICAST_IF, (void *)&m, sizeof(m)); +#elif defined(SUNOS_5) + char ifname[IF_NAMESIZE]; + struct ifaddrs *ifa, *ifap; + struct in_addr ifaddr; + + if (if_indextoname(ifindex, ifname) == NULL) + return -1; + + if (getifaddrs(&ifa) != 0) + return -1; + + for (ifap = ifa; ifap != NULL; ifap = ifap->ifa_next) + { + struct sockaddr_in *sa; + + if (strcmp(ifap->ifa_name, ifname) != 0) + continue; + if (ifap->ifa_addr->sa_family != AF_INET) + continue; + sa = (struct sockaddr_in*)ifap->ifa_addr; + memcpy(&ifaddr, &sa->sin_addr, sizeof(ifaddr)); + break; + } + + freeifaddrs(ifa); + if (!ifap) /* This means we did not find an IP */ + return -1; + + return setsockopt(sock, IPPROTO_IP, IP_MULTICAST_IF, (void *)&ifaddr, sizeof(ifaddr)); #else #error "Unsupported multicast API" #endif diff --git a/lib/sockunion.h b/lib/sockunion.h index 105b11a24c..7dbd247dca 100644 --- a/lib/sockunion.h +++ b/lib/sockunion.h @@ -25,12 +25,18 @@ #include "privs.h" #include "if.h" +#ifdef __OpenBSD__ +#include +#endif union sockunion { struct sockaddr sa; struct sockaddr_in sin; struct sockaddr_in6 sin6; +#ifdef __OpenBSD__ + struct sockaddr_mpls smpls; +#endif }; enum connect_result diff --git a/lib/table.c b/lib/table.c index 8858aea0fd..d0e084ead2 100644 --- a/lib/table.c +++ b/lib/table.c @@ -523,6 +523,12 @@ static route_table_delegate_t default_delegate = { .destroy_node = route_node_destroy }; +route_table_delegate_t * +route_table_get_default_delegate(void) +{ + return &default_delegate; +} + /* * route_table_init */ diff --git a/lib/table.h b/lib/table.h index 34c196aa47..e6cdcfef1e 100644 --- a/lib/table.h +++ b/lib/table.h @@ -144,6 +144,9 @@ extern struct route_table *route_table_init (void); extern struct route_table * route_table_init_with_delegate (route_table_delegate_t *); +extern route_table_delegate_t * +route_table_get_default_delegate(void); + extern void route_table_finish (struct route_table *); extern void route_unlock_node (struct route_node *node); extern struct route_node *route_top (struct route_table *); diff --git a/lib/thread.c b/lib/thread.c index 76acd07789..5dc296f2cc 100644 --- a/lib/thread.c +++ b/lib/thread.c @@ -222,12 +222,12 @@ vty_out_cpu_thread_history(struct vty* vty, struct cpu_thread_history *a) { #ifdef HAVE_RUSAGE - vty_out(vty, "%7ld.%03ld %9d %8ld %9ld %8ld %9ld", + vty_out(vty, "%10ld.%03ld %9d %8ld %9ld %8ld %9ld", a->cpu.total/1000, a->cpu.total%1000, a->total_calls, a->cpu.total/a->total_calls, a->cpu.max, a->real.total/a->total_calls, a->real.max); #else - vty_out(vty, "%7ld.%03ld %9d %8ld %9ld", + vty_out(vty, "%10ld.%03ld %9d %8ld %9ld", a->real.total/1000, a->real.total%1000, a->total_calls, a->real.total/a->total_calls, a->real.max); #endif diff --git a/lib/vrf.c b/lib/vrf.c index 6de224f8c3..63adea4aec 100644 --- a/lib/vrf.c +++ b/lib/vrf.c @@ -33,6 +33,8 @@ DEFINE_MTYPE_STATIC(LIB, VRF, "VRF") DEFINE_MTYPE_STATIC(LIB, VRF_BITMAP, "VRF bit-map") +DEFINE_QOBJ_TYPE(vrf) + /* * Turn on/off debug code * for vrf. @@ -124,6 +126,7 @@ vrf_get (vrf_id_t vrf_id, const char *name) strcpy (vrf->name, name); listnode_add_sort (vrf_list, vrf); if_init (&vrf->iflist); + QOBJ_REG (vrf, vrf); if (vrf_master.vrf_new_hook) { (*vrf_master.vrf_new_hook) (vrf_id, name, &vrf->info); @@ -212,6 +215,7 @@ vrf_get (vrf_id_t vrf_id, const char *name) strcpy (vrf->name, name); listnode_add_sort (vrf_list, vrf); if_init (&vrf->iflist); + QOBJ_REG (vrf, vrf); if (vrf_master.vrf_new_hook) { (*vrf_master.vrf_new_hook) (vrf_id, name, &vrf->info); @@ -249,6 +253,7 @@ vrf_get (vrf_id_t vrf_id, const char *name) vrf->node = rn; vrf->vrf_id = vrf_id; if_init (&vrf->iflist); + QOBJ_REG (vrf, vrf); if (debug_vrf) zlog_debug("Vrf Created: %p", vrf); return vrf; @@ -275,6 +280,7 @@ vrf_delete (struct vrf *vrf) if (vrf_master.vrf_delete_hook) (*vrf_master.vrf_delete_hook) (vrf->vrf_id, vrf->name, &vrf->info); + QOBJ_UNREG (vrf); if_terminate (&vrf->iflist); if (vrf->node) @@ -738,7 +744,7 @@ vrf_socket (int domain, int type, int protocol, vrf_id_t vrf_id) * Debug CLI for vrf's */ DEFUN (vrf_debug, - vrf_debug_cmd, + vrf_debug_cmd, "debug vrf", DEBUG_STR "VRF Debugging\n") @@ -749,7 +755,7 @@ DEFUN (vrf_debug, } DEFUN (no_vrf_debug, - no_vrf_debug_cmd, + no_vrf_debug_cmd, "no debug vrf", NO_STR DEBUG_STR diff --git a/lib/vrf.h b/lib/vrf.h index e0fd25b9b2..f1fbad9ff5 100644 --- a/lib/vrf.h +++ b/lib/vrf.h @@ -24,6 +24,7 @@ #define _ZEBRA_VRF_H #include "linklist.h" +#include "qobj.h" /* The default NS ID */ #define NS_DEFAULT 0 @@ -81,7 +82,10 @@ struct vrf /* User data */ void *info; + + QOBJ_FIELDS }; +DECLARE_QOBJ_TYPE(vrf) extern struct list *vrf_list; diff --git a/lib/vty.c b/lib/vty.c index ee7ea579a7..78bf0e720d 100644 --- a/lib/vty.c +++ b/lib/vty.c @@ -83,14 +83,11 @@ char *vty_cwd = NULL; /* Configure lock. */ static int vty_config; +static int vty_config_is_lockless = 0; /* Login password check. */ static int no_password_check = 0; -/* Restrict unauthenticated logins? */ -static const u_char restricted_mode_default = 0; -static u_char restricted_mode = 0; - /* Integrated configuration file path */ char integrate_default[] = SYSCONFDIR INTEGRATE_DEFAULT_CONFIG; @@ -394,7 +391,7 @@ vty_auth (struct vty *vty, char *buf) /* AUTH_ENABLE_NODE */ vty->fail = 0; vty_out (vty, "%% Bad enable passwords, too many failures!%s", VTY_NEWLINE); - vty->node = restricted_mode ? RESTRICTED_NODE : VIEW_NODE; + vty->status = VTY_CLOSE; } } } @@ -734,7 +731,6 @@ vty_end_config (struct vty *vty) { case VIEW_NODE: case ENABLE_NODE: - case RESTRICTED_NODE: /* Nothing to do. */ break; case CONFIG_NODE: @@ -747,6 +743,9 @@ vty_end_config (struct vty *vty) case BGP_VPNV6_NODE: case BGP_ENCAP_NODE: case BGP_ENCAPV6_NODE: + case BGP_VNC_DEFAULTS_NODE: + case BGP_VNC_NVE_GROUP_NODE: + case BGP_VNC_L2_GROUP_NODE: case BGP_IPV4_NODE: case BGP_IPV4M_NODE: case BGP_IPV6_NODE: @@ -754,6 +753,13 @@ vty_end_config (struct vty *vty) case RMAP_NODE: case OSPF_NODE: case OSPF6_NODE: + case LDP_NODE: + case LDP_IPV4_NODE: + case LDP_IPV6_NODE: + case LDP_IPV4_IFACE_NODE: + case LDP_IPV6_IFACE_NODE: + case LDP_L2VPN_NODE: + case LDP_PSEUDOWIRE_NODE: case ISIS_NODE: case KEYCHAIN_NODE: case KEYCHAIN_KEY_NODE: @@ -1144,7 +1150,6 @@ vty_stop_input (struct vty *vty) { case VIEW_NODE: case ENABLE_NODE: - case RESTRICTED_NODE: /* Nothing to do. */ break; case CONFIG_NODE: @@ -1156,6 +1161,13 @@ vty_stop_input (struct vty *vty) case RMAP_NODE: case OSPF_NODE: case OSPF6_NODE: + case LDP_NODE: + case LDP_IPV4_NODE: + case LDP_IPV6_NODE: + case LDP_IPV4_IFACE_NODE: + case LDP_IPV6_IFACE_NODE: + case LDP_L2VPN_NODE: + case LDP_PSEUDOWIRE_NODE: case ISIS_NODE: case KEYCHAIN_NODE: case KEYCHAIN_KEY_NODE: @@ -1697,9 +1709,7 @@ vty_create (int vty_sock, union sockunion *su) strcpy (vty->address, buf); if (no_password_check) { - if (restricted_mode) - vty->node = RESTRICTED_NODE; - else if (host.advanced) + if (host.advanced) vty->node = ENABLE_NODE; else vty->node = VIEW_NODE; @@ -2620,6 +2630,8 @@ vty_log_fixed (char *buf, size_t len) int vty_config_lock (struct vty *vty) { + if (vty_config_is_lockless) + return 1; if (vty_config == 0) { vty->config = 1; @@ -2631,6 +2643,8 @@ vty_config_lock (struct vty *vty) int vty_config_unlock (struct vty *vty) { + if (vty_config_is_lockless) + return 0; if (vty_config == 1 && vty->config == 1) { vty->config = 0; @@ -2639,6 +2653,12 @@ vty_config_unlock (struct vty *vty) return vty->config; } +void +vty_config_lockless (void) +{ + vty_config_is_lockless = 1; +} + /* Master of the threads. */ static struct thread_master *vty_master; @@ -2884,26 +2904,6 @@ DEFUN (no_vty_login, return CMD_SUCCESS; } -/* initial mode. */ -DEFUN (vty_restricted_mode, - vty_restricted_mode_cmd, - "anonymous restricted", - "Restrict view commands available in anonymous, unauthenticated vty\n") -{ - restricted_mode = 1; - return CMD_SUCCESS; -} - -DEFUN (vty_no_restricted_mode, - vty_no_restricted_mode_cmd, - "no anonymous restricted", - NO_STR - "Enable password checking\n") -{ - restricted_mode = 0; - return CMD_SUCCESS; -} - DEFUN (service_advanced_vty, service_advanced_vty_cmd, "service advanced-vty", @@ -3017,14 +3017,6 @@ vty_config_write (struct vty *vty) if (no_password_check) vty_out (vty, " no login%s", VTY_NEWLINE); - if (restricted_mode != restricted_mode_default) - { - if (restricted_mode_default) - vty_out (vty, " no anonymous restricted%s", VTY_NEWLINE); - else - vty_out (vty, " anonymous restricted%s", VTY_NEWLINE); - } - if (do_log_commands) vty_out (vty, "log commands%s", VTY_NEWLINE); @@ -3153,11 +3145,8 @@ vty_init (struct thread_master *master_thread) /* Install bgp top node. */ install_node (&vty_node, vty_config_write); - install_element (RESTRICTED_NODE, &config_who_cmd); - install_element (RESTRICTED_NODE, &show_history_cmd); install_element (VIEW_NODE, &config_who_cmd); install_element (VIEW_NODE, &show_history_cmd); - install_element (ENABLE_NODE, &config_who_cmd); install_element (CONFIG_NODE, &line_vty_cmd); install_element (CONFIG_NODE, &service_advanced_vty_cmd); install_element (CONFIG_NODE, &no_service_advanced_vty_cmd); @@ -3166,7 +3155,6 @@ vty_init (struct thread_master *master_thread) install_element (ENABLE_NODE, &terminal_monitor_cmd); install_element (ENABLE_NODE, &terminal_no_monitor_cmd); install_element (ENABLE_NODE, &no_terminal_monitor_cmd); - install_element (ENABLE_NODE, &show_history_cmd); install_default (VTY_NODE); install_element (VTY_NODE, &exec_timeout_min_cmd); @@ -3176,8 +3164,6 @@ vty_init (struct thread_master *master_thread) install_element (VTY_NODE, &no_vty_access_class_cmd); install_element (VTY_NODE, &vty_login_cmd); install_element (VTY_NODE, &no_vty_login_cmd); - install_element (VTY_NODE, &vty_restricted_mode_cmd); - install_element (VTY_NODE, &vty_no_restricted_mode_cmd); #ifdef HAVE_IPV6 install_element (VTY_NODE, &vty_ipv6_access_class_cmd); install_element (VTY_NODE, &no_vty_ipv6_access_class_cmd); @@ -3197,3 +3183,29 @@ vty_terminate (void) vector_free (Vvty_serv_thread); } } + +/* Utility functions to get arguments from commands generated + by the xml2cli.pl script. */ +const char * +vty_get_arg_value (struct vty_arg *args[], const char *arg) +{ + while (*args) + { + if (strcmp ((*args)->name, arg) == 0) + return (*args)->value; + args++; + } + return NULL; +} + +struct vty_arg * +vty_get_arg (struct vty_arg *args[], const char *arg) +{ + while (*args) + { + if (strcmp ((*args)->name, arg) == 0) + return *args; + args++; + } + return NULL; +} diff --git a/lib/vty.h b/lib/vty.h index fe051f053a..3870e59b72 100644 --- a/lib/vty.h +++ b/lib/vty.h @@ -24,12 +24,20 @@ Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA #include "thread.h" #include "log.h" #include "sockunion.h" +#include "qobj.h" #define VTY_BUFSIZ 512 #define VTY_MAXHIST 20 +#if defined(VTY_DEPRECATE_INDEX) && defined(__GNUC__) && \ + (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) +#define INDEX_WARNING __attribute__((deprecated)) +#else +#define INDEX_WARNING +#endif + /* VTY struct. */ -struct vty +struct vty { /* File descripter of this vty. */ int fd; @@ -75,10 +83,13 @@ struct vty /* For current referencing point of interface, route-map, access-list etc... */ - void *index; + void *index INDEX_WARNING; - /* For multiple level index treatment such as key chain and key. */ - void *index_sub; + /* qobj object ID (replacement for "index") */ + uint64_t qobj_index; + + /* qobj second-level object ID (replacement for "index_sub") */ + uint64_t qobj_index_sub; /* For escape character. */ unsigned char escape; @@ -127,6 +138,64 @@ struct vty char address[SU_ADDRSTRLEN]; }; +#undef INDEX_WARNING + +static inline void vty_push_context(struct vty *vty, + int node, uint64_t id, void *idx) +{ + vty->node = node; + vty->qobj_index = id; +#if defined(VTY_DEPRECATE_INDEX) && defined(__GNUC__) && \ + (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" + vty->index = idx; +#pragma GCC diagnostic pop +#else + vty->index = idx; +#endif +} + +#define VTY_PUSH_CONTEXT(nodeval, ptr) \ + vty_push_context(vty, nodeval, QOBJ_ID(ptr), NULL) +#define VTY_PUSH_CONTEXT_COMPAT(nodeval, ptr) \ + vty_push_context(vty, nodeval, QOBJ_ID(ptr), ptr) +#define VTY_PUSH_CONTEXT_SUB(nodeval, ptr) do { \ + vty->node = nodeval; \ + /* qobj_index stays untouched */ \ + vty->qobj_index_sub = QOBJ_ID(ptr); \ + } while (0) + +/* can return NULL if context is invalid! */ +#define VTY_GET_CONTEXT(structname) \ + QOBJ_GET_TYPESAFE(vty->qobj_index, structname) +#define VTY_GET_CONTEXT_SUB(structname) \ + QOBJ_GET_TYPESAFE(vty->qobj_index_sub, structname) + +/* will return if ptr is NULL. */ +#define VTY_CHECK_CONTEXT(ptr) \ + if (!ptr) { \ + vty_out (vty, "Current configuration object was deleted " \ + "by another process.%s", VTY_NEWLINE); \ + return CMD_WARNING; \ + } + +/* struct structname *ptr = ; ptr will never be NULL. */ +#define VTY_DECLVAR_CONTEXT(structname, ptr) \ + struct structname *ptr = VTY_GET_CONTEXT(structname); \ + VTY_CHECK_CONTEXT(ptr); +#define VTY_DECLVAR_CONTEXT_SUB(structname, ptr) \ + struct structname *ptr = VTY_GET_CONTEXT_SUB(structname); \ + VTY_CHECK_CONTEXT(ptr); + +struct vty_arg +{ + const char *name; + const char *value; + const char **argv; + int argc; +}; + /* Integrated configuration file. */ #define INTEGRATE_DEFAULT_CONFIG "Quagga.conf" @@ -231,33 +300,33 @@ do { \ VTY_GET_INTEGER_RANGE_HEART(NAME,tmpl,STR,MIN,MAX); \ } while (0) -#define VTY_GET_INTEGER(NAME,V,STR) \ +#define VTY_GET_INTEGER(NAME,V,STR) \ VTY_GET_INTEGER_RANGE(NAME,V,STR,0U,UINT32_MAX) -#define VTY_GET_IPV4_ADDRESS(NAME,V,STR) \ -do { \ - int retv; \ - retv = inet_aton ((STR), &(V)); \ - if (!retv) \ - { \ - vty_out (vty, "%% Invalid %s value%s", NAME, VTY_NEWLINE); \ - return CMD_WARNING; \ - } \ +#define VTY_GET_IPV4_ADDRESS(NAME,V,STR) \ +do { \ + int retv; \ + retv = inet_aton ((STR), &(V)); \ + if (!retv) \ + { \ + vty_out (vty, "%% Invalid %s value%s", NAME, VTY_NEWLINE); \ + return CMD_WARNING; \ + } \ } while (0) -#define VTY_GET_IPV4_PREFIX(NAME,V,STR) \ -do { \ - int retv; \ - retv = str2prefix_ipv4 ((STR), &(V)); \ - if (retv <= 0) \ - { \ - vty_out (vty, "%% Invalid %s value%s", NAME, VTY_NEWLINE); \ - return CMD_WARNING; \ - } \ +#define VTY_GET_IPV4_PREFIX(NAME,V,STR) \ +do { \ + int retv; \ + retv = str2prefix_ipv4 ((STR), &(V)); \ + if (retv <= 0) \ + { \ + vty_out (vty, "%% Invalid %s value%s", NAME, VTY_NEWLINE); \ + return CMD_WARNING; \ + } \ } while (0) -#define VTY_WARN_EXPERIMENTAL() \ -do { \ +#define VTY_WARN_EXPERIMENTAL() \ +do { \ vty_out (vty, "%% WARNING: this command is experimental. Both its name and" \ " parameters may%s%% change in a future version of Quagga," \ " possibly breaking your configuration!%s", \ @@ -280,10 +349,11 @@ extern void vty_time_print (struct vty *, int); extern void vty_serv_sock (const char *, unsigned short, const char *); extern void vty_close (struct vty *); extern char *vty_get_cwd (void); -extern void vty_log (const char *level, const char *proto, +extern void vty_log (const char *level, const char *proto, const char *fmt, struct timestamp_control *, va_list); extern int vty_config_lock (struct vty *); extern int vty_config_unlock (struct vty *); +extern void vty_config_lockless (void); extern int vty_shell (struct vty *); extern int vty_shell_serv (struct vty *); extern void vty_hello (struct vty *); @@ -292,4 +362,7 @@ extern void vty_hello (struct vty *); an async-signal-safe function. */ extern void vty_log_fixed (char *buf, size_t len); +extern const char *vty_get_arg_value (struct vty_arg **, const char *); +extern struct vty_arg *vty_get_arg (struct vty_arg **, const char *); + #endif /* _ZEBRA_VTY_H */ diff --git a/lib/zclient.c b/lib/zclient.c index 057fa77580..fa8150c5a1 100644 --- a/lib/zclient.c +++ b/lib/zclient.c @@ -140,6 +140,9 @@ redist_del_instance (struct redist_proto *red, u_short instance) void zclient_stop (struct zclient *zclient) { + afi_t afi; + int i; + if (zclient_debug) zlog_debug ("zclient stopped"); @@ -162,6 +165,15 @@ zclient_stop (struct zclient *zclient) zclient->sock = -1; } zclient->fail = 0; + + for (afi = AFI_IP; afi < AFI_MAX; afi++) + for (i = 0; i < ZEBRA_ROUTE_MAX; i++) + { + vrf_bitmap_free(zclient->redist[afi][i]); + zclient->redist[afi][i] = VRF_BITMAP_NULL; + } + vrf_bitmap_free(zclient->default_information); + zclient->default_information = VRF_BITMAP_NULL; } void @@ -710,7 +722,7 @@ zclient_connect (struct thread *t) * If ZAPI_MESSAGE_METRIC is set, the metric value is written as an 8 * byte value. * - * If ZAPI_MESSAGE_TAG is set, the tag value is written as a 2 byte value + * If ZAPI_MESSAGE_TAG is set, the tag value is written as a 4 byte value * * If ZAPI_MESSAGE_MTU is set, the mtu value is written as a 4 byte value * @@ -733,7 +745,7 @@ zapi_ipv4_route (u_char cmd, struct zclient *zclient, struct prefix_ipv4 *p, /* Put type and nexthop. */ stream_putc (s, api->type); stream_putw (s, api->instance); - stream_putc (s, api->flags); + stream_putl (s, api->flags); stream_putc (s, api->message); stream_putw (s, api->safi); @@ -773,7 +785,7 @@ zapi_ipv4_route (u_char cmd, struct zclient *zclient, struct prefix_ipv4 *p, if (CHECK_FLAG (api->message, ZAPI_MESSAGE_METRIC)) stream_putl (s, api->metric); if (CHECK_FLAG (api->message, ZAPI_MESSAGE_TAG)) - stream_putw (s, api->tag); + stream_putl (s, api->tag); if (CHECK_FLAG (api->message, ZAPI_MESSAGE_MTU)) stream_putl (s, api->mtu); @@ -801,7 +813,7 @@ zapi_ipv4_route_ipv6_nexthop (u_char cmd, struct zclient *zclient, /* Put type and nexthop. */ stream_putc (s, api->type); stream_putw (s, api->instance); - stream_putc (s, api->flags); + stream_putl (s, api->flags); stream_putc (s, api->message); stream_putw (s, api->safi); @@ -840,7 +852,7 @@ zapi_ipv4_route_ipv6_nexthop (u_char cmd, struct zclient *zclient, if (CHECK_FLAG (api->message, ZAPI_MESSAGE_METRIC)) stream_putl (s, api->metric); if (CHECK_FLAG (api->message, ZAPI_MESSAGE_TAG)) - stream_putw (s, api->tag); + stream_putl (s, api->tag); if (CHECK_FLAG (api->message, ZAPI_MESSAGE_MTU)) stream_putl (s, api->mtu); @@ -867,7 +879,7 @@ zapi_ipv6_route (u_char cmd, struct zclient *zclient, struct prefix_ipv6 *p, /* Put type and nexthop. */ stream_putc (s, api->type); stream_putw (s, api->instance); - stream_putc (s, api->flags); + stream_putl (s, api->flags); stream_putc (s, api->message); stream_putw (s, api->safi); @@ -906,7 +918,7 @@ zapi_ipv6_route (u_char cmd, struct zclient *zclient, struct prefix_ipv6 *p, if (CHECK_FLAG (api->message, ZAPI_MESSAGE_METRIC)) stream_putl (s, api->metric); if (CHECK_FLAG (api->message, ZAPI_MESSAGE_TAG)) - stream_putw (s, api->tag); + stream_putl (s, api->tag); if (CHECK_FLAG (api->message, ZAPI_MESSAGE_MTU)) stream_putl (s, api->mtu); @@ -942,18 +954,30 @@ zebra_redistribute_send (int command, struct zclient *zclient, afi_t afi, int ty return zclient_send_message(zclient); } +/* Get prefix in ZServ format; family should be filled in on prefix */ +static void +zclient_stream_get_prefix (struct stream *s, struct prefix *p) +{ + size_t plen = prefix_blen (p); + u_char c; + p->prefixlen = 0; + + if (plen == 0) + return; + + stream_get (&p->u.prefix, s, plen); + c = stream_getc(s); + p->prefixlen = MIN(plen * 8, c); +} + /* Router-id update from zebra daemon. */ void zebra_router_id_update_read (struct stream *s, struct prefix *rid) { - int plen; - /* Fetch interface address. */ rid->family = stream_getc (s); - - plen = prefix_blen (rid); - stream_get (&rid->u.prefix, s, plen); - rid->prefixlen = stream_getc (s); + + zclient_stream_get_prefix (s, rid); } /* Interface addition from zebra daemon. */ @@ -1263,8 +1287,7 @@ zebra_interface_address_read (int type, struct stream *s, vrf_id_t vrf_id) ifindex_t ifindex; struct interface *ifp; struct connected *ifc; - struct prefix p, d; - int family; + struct prefix p, d, *dp; int plen; u_char ifc_flags; @@ -1288,24 +1311,24 @@ zebra_interface_address_read (int type, struct stream *s, vrf_id_t vrf_id) ifc_flags = stream_getc (s); /* Fetch interface address. */ - family = p.family = stream_getc (s); - - plen = prefix_blen (&p); - stream_get (&p.u.prefix, s, plen); - p.prefixlen = stream_getc (s); + d.family = p.family = stream_getc (s); + plen = prefix_blen (&d); + + zclient_stream_get_prefix (s, &p); /* Fetch destination address. */ stream_get (&d.u.prefix, s, plen); - d.family = family; - + + /* N.B. NULL destination pointers are encoded as all zeroes */ + dp = memconstant(&d.u.prefix,0,plen) ? NULL : &d; + if (type == ZEBRA_INTERFACE_ADDRESS_ADD) { ifc = connected_lookup_prefix_exact (ifp, &p); if (!ifc) { /* N.B. NULL destination pointers are encoded as all zeroes */ - ifc = connected_add_by_prefix(ifp, &p, (memconstant(&d.u.prefix,0,plen) ? - NULL : &d)); + ifc = connected_add_by_prefix(ifp, &p, dp); } if (ifc) { @@ -1583,22 +1606,6 @@ zclient_read (struct thread *thread) if (zclient->interface_vrf_update) (*zclient->interface_vrf_update) (command, zclient, length, vrf_id); break; - case ZEBRA_IPV4_ROUTE_ADD: - if (zclient->ipv4_route_add) - (*zclient->ipv4_route_add) (command, zclient, length, vrf_id); - break; - case ZEBRA_IPV4_ROUTE_DELETE: - if (zclient->ipv4_route_delete) - (*zclient->ipv4_route_delete) (command, zclient, length, vrf_id); - break; - case ZEBRA_IPV6_ROUTE_ADD: - if (zclient->ipv6_route_add) - (*zclient->ipv6_route_add) (command, zclient, length, vrf_id); - break; - case ZEBRA_IPV6_ROUTE_DELETE: - if (zclient->ipv6_route_delete) - (*zclient->ipv6_route_delete) (command, zclient, length, vrf_id); - break; case ZEBRA_NEXTHOP_UPDATE: if (zclient_debug) zlog_debug("zclient rcvd nexthop update\n"); diff --git a/lib/zclient.h b/lib/zclient.h index b95d18ec1a..f122b233b9 100644 --- a/lib/zclient.h +++ b/lib/zclient.h @@ -98,10 +98,6 @@ struct zclient int (*interface_nbr_address_add) (int, struct zclient *, uint16_t, vrf_id_t); int (*interface_nbr_address_delete) (int, struct zclient *, uint16_t, vrf_id_t); int (*interface_vrf_update) (int, struct zclient *, uint16_t, vrf_id_t); - int (*ipv4_route_add) (int, struct zclient *, uint16_t, vrf_id_t); - int (*ipv4_route_delete) (int, struct zclient *, uint16_t, vrf_id_t); - int (*ipv6_route_add) (int, struct zclient *, uint16_t, vrf_id_t); - int (*ipv6_route_delete) (int, struct zclient *, uint16_t, vrf_id_t); int (*nexthop_update) (int, struct zclient *, uint16_t, vrf_id_t); int (*import_check_update) (int, struct zclient *, uint16_t, vrf_id_t); int (*bfd_dest_replay) (int, struct zclient *, uint16_t, vrf_id_t); @@ -138,7 +134,7 @@ struct zapi_ipv4 u_char type; u_short instance; - u_char flags; + u_int32_t flags; u_char message; @@ -154,7 +150,7 @@ struct zapi_ipv4 u_int32_t metric; - u_short tag; + route_tag_t tag; u_int32_t mtu; @@ -226,7 +222,7 @@ struct zapi_ipv6 u_char type; u_short instance; - u_char flags; + u_int32_t flags; u_char message; @@ -242,7 +238,7 @@ struct zapi_ipv6 u_int32_t metric; - u_short tag; + route_tag_t tag; u_int32_t mtu; diff --git a/lib/zebra.h b/lib/zebra.h index d7a441c2e9..08c50c68bc 100644 --- a/lib/zebra.h +++ b/lib/zebra.h @@ -352,18 +352,21 @@ struct in_pktinfo #endif /* ndef BYTE_ORDER */ /* MAX / MIN are not commonly defined, but useful */ -#ifndef MAX +/* note: glibc sys/param.h has #define MIN(a,b) (((a)<(b))?(a):(b)) */ +#ifdef MAX +#undef MAX +#endif #define MAX(a, b) \ ({ typeof (a) _a = (a); \ typeof (b) _b = (b); \ _a > _b ? _a : _b; }) +#ifdef MIN +#undef MIN #endif -#ifndef MIN #define MIN(a, b) \ ({ typeof (a) _a = (a); \ typeof (b) _b = (b); \ _a < _b ? _a : _b; }) -#endif #define ZEBRA_NUM_OF(x) (sizeof (x) / sizeof (x[0])) @@ -422,6 +425,12 @@ typedef enum { ZEBRA_INTERFACE_DISABLE_RADV, ZEBRA_IPV4_NEXTHOP_LOOKUP_MRIB, ZEBRA_INTERFACE_LINK_PARAMS, + ZEBRA_MPLS_LABELS_ADD, + ZEBRA_MPLS_LABELS_DELETE, + ZEBRA_IPV4_NEXTHOP_ADD, + ZEBRA_IPV4_NEXTHOP_DELETE, + ZEBRA_IPV6_NEXTHOP_ADD, + ZEBRA_IPV6_NEXTHOP_DELETE, } zebra_message_types_t; /* Marker value used in new Zserv, in the byte location corresponding @@ -469,6 +478,7 @@ extern const char *zserv_command_string (unsigned int command); #define ZEBRA_FLAG_STATIC 0x40 #define ZEBRA_FLAG_REJECT 0x80 #define ZEBRA_FLAG_SCOPE_LINK 0x100 +#define ZEBRA_FLAG_FIB_OVERRIDE 0x200 #ifndef INADDR_LOOPBACK #define INADDR_LOOPBACK 0x7f000001 /* Internet address 127.0.0.1. */ @@ -518,4 +528,8 @@ typedef u_int16_t zebra_command_t; /* VRF ID type. */ typedef u_int16_t vrf_id_t; +typedef uint32_t route_tag_t; +#define ROUTE_TAG_MAX UINT32_MAX +#define ROUTE_TAG_PRI PRIu32 + #endif /* _ZEBRA_H */ diff --git a/ospf6d/ospf6_area.c b/ospf6d/ospf6_area.c index 3449ec3ff7..de395e8b57 100644 --- a/ospf6d/ospf6_area.c +++ b/ospf6d/ospf6_area.c @@ -1053,10 +1053,6 @@ ospf6_area_init (void) install_element (VIEW_NODE, &show_ipv6_ospf6_area_spf_tree_cmd); install_element (VIEW_NODE, &show_ipv6_ospf6_simulate_spf_tree_root_cmd); - install_element (ENABLE_NODE, &show_ipv6_ospf6_spf_tree_cmd); - install_element (ENABLE_NODE, &show_ipv6_ospf6_area_spf_tree_cmd); - install_element (ENABLE_NODE, &show_ipv6_ospf6_simulate_spf_tree_root_cmd); - install_element (OSPF6_NODE, &area_range_cmd); install_element (OSPF6_NODE, &no_area_range_cmd); install_element (OSPF6_NODE, &ospf6_area_stub_no_summary_cmd); diff --git a/ospf6d/ospf6_asbr.c b/ospf6d/ospf6_asbr.c index 07ddb9cc88..3ade69c30f 100644 --- a/ospf6d/ospf6_asbr.c +++ b/ospf6d/ospf6_asbr.c @@ -93,6 +93,9 @@ ospf6_as_external_lsa_originate (struct ospf6_route *route) UNSET_FLAG (as_external_lsa->bits_metric, OSPF6_ASBR_BIT_F); /* external route tag */ + if (info->tag) + SET_FLAG (as_external_lsa->bits_metric, OSPF6_ASBR_BIT_T); + else UNSET_FLAG (as_external_lsa->bits_metric, OSPF6_ASBR_BIT_T); /* Set metric */ @@ -123,7 +126,10 @@ ospf6_as_external_lsa_originate (struct ospf6_route *route) /* External Route Tag */ if (CHECK_FLAG (as_external_lsa->bits_metric, OSPF6_ASBR_BIT_T)) { - /* xxx */ + route_tag_t network_order = htonl(info->tag); + + memcpy (p, &network_order, sizeof(network_order)); + p += sizeof(network_order); } /* Fill LSA Header */ @@ -146,6 +152,29 @@ ospf6_as_external_lsa_originate (struct ospf6_route *route) ospf6_lsa_originate_process (lsa, ospf6); } +static route_tag_t +ospf6_as_external_lsa_get_tag (struct ospf6_lsa *lsa) +{ + struct ospf6_as_external_lsa *external; + ptrdiff_t tag_offset; + route_tag_t network_order; + + if (!lsa) + return 0; + + external = (struct ospf6_as_external_lsa *) + OSPF6_LSA_HEADER_END (lsa->header); + + if (!CHECK_FLAG (external->bits_metric, OSPF6_ASBR_BIT_T)) + return 0; + + tag_offset = sizeof(*external) + OSPF6_PREFIX_SPACE(external->prefix.prefix_length); + if (CHECK_FLAG (external->bits_metric, OSPF6_ASBR_BIT_F)) + tag_offset += sizeof(struct in6_addr); + + memcpy(&network_order, (caddr_t)external + tag_offset, sizeof(network_order)); + return ntohl(network_order); +} void ospf6_asbr_lsa_add (struct ospf6_lsa *lsa) @@ -222,6 +251,8 @@ ospf6_asbr_lsa_add (struct ospf6_lsa *lsa) route->path.u.cost_e2 = 0; } + route->path.tag = ospf6_as_external_lsa_get_tag (lsa); + ospf6_route_copy_nexthops (route, asbr_entry); if (IS_OSPF6_DEBUG_EXAMIN (AS_EXTERNAL)) @@ -427,7 +458,7 @@ ospf6_asbr_send_externals_to_area (struct ospf6_area *oa) void ospf6_asbr_redistribute_add (int type, ifindex_t ifindex, struct prefix *prefix, - u_int nexthop_num, struct in6_addr *nexthop) + u_int nexthop_num, struct in6_addr *nexthop, route_tag_t tag) { int ret; struct ospf6_route troute; @@ -469,6 +500,7 @@ ospf6_asbr_redistribute_add (int type, ifindex_t ifindex, struct prefix *prefix, memset (&tinfo, 0, sizeof (tinfo)); troute.route_option = &tinfo; tinfo.ifindex = ifindex; + tinfo.tag = tag; ret = route_map_apply (ospf6->rmap[type].map, prefix, RMAP_OSPF6, &troute); @@ -495,6 +527,12 @@ ospf6_asbr_redistribute_add (int type, ifindex_t ifindex, struct prefix *prefix, if (! IN6_IS_ADDR_UNSPECIFIED (&tinfo.forwarding)) memcpy (&info->forwarding, &tinfo.forwarding, sizeof (struct in6_addr)); + info->tag = tinfo.tag; + } + else + { + /* If there is no route-map, simply update the tag */ + info->tag = tag; } info->type = type; @@ -542,6 +580,12 @@ ospf6_asbr_redistribute_add (int type, ifindex_t ifindex, struct prefix *prefix, if (! IN6_IS_ADDR_UNSPECIFIED (&tinfo.forwarding)) memcpy (&info->forwarding, &tinfo.forwarding, sizeof (struct in6_addr)); + info->tag = tinfo.tag; + } + else + { + /* If there is no route-map, simply set the tag */ + info->tag = tag; } info->type = type; @@ -644,8 +688,7 @@ DEFUN (ospf6_redistribute, ospf6_redistribute_cmd, "redistribute ", "Redistribute\n" - QUAGGA_REDIST_HELP_STR_OSPF6D - ) + QUAGGA_REDIST_HELP_STR_OSPF6D) { int type; @@ -664,8 +707,7 @@ DEFUN (ospf6_redistribute_routemap, "Redistribute\n" QUAGGA_REDIST_HELP_STR_OSPF6D "Route map reference\n" - "Route map name\n" - ) + "Route map name\n") { int idx_protocol = 1; int idx_word = 3; @@ -857,6 +899,30 @@ ospf6_routemap_rule_match_interface_cmd = ospf6_routemap_rule_match_interface_free }; +/* Match function for matching route tags */ +static route_map_result_t +ospf6_routemap_rule_match_tag (void *rule, struct prefix *prefix, + route_map_object_t type, void *object) +{ + route_tag_t *tag = rule; + struct ospf6_route *route = object; + struct ospf6_external_info *info = route->route_option; + + if (type == RMAP_OSPF6 && info->tag == *tag) + return RMAP_MATCH; + + return RMAP_NOMATCH; +} + +static struct route_map_rule_cmd +ospf6_routemap_rule_match_tag_cmd = +{ + "tag", + ospf6_routemap_rule_match_tag, + route_map_rule_tag_compile, + route_map_rule_tag_free, +}; + static route_map_result_t ospf6_routemap_rule_set_metric_type (void *rule, struct prefix *prefix, route_map_object_t type, void *object) @@ -982,6 +1048,30 @@ ospf6_routemap_rule_set_forwarding_cmd = ospf6_routemap_rule_set_forwarding_free, }; +static route_map_result_t +ospf6_routemap_rule_set_tag (void *rule, struct prefix *prefix, + route_map_object_t type, void *object) +{ + route_tag_t *tag = rule; + struct ospf6_route *route = object; + struct ospf6_external_info *info = route->route_option; + + if (type != RMAP_OSPF6) + return RMAP_OKAY; + + info->tag = *tag; + return RMAP_OKAY; +} + +static struct route_map_rule_cmd +ospf6_routemap_rule_set_tag_cmd = +{ + "tag", + ospf6_routemap_rule_set_tag, + route_map_rule_tag_compile, + route_map_rule_tag_free, +}; + static int route_map_command_status (struct vty *vty, int ret) { @@ -1053,8 +1143,8 @@ DEFUN (ospf6_routemap_match_interface, DEFUN (ospf6_routemap_no_match_interface, ospf6_routemap_no_match_interface_cmd, "no match interface [WORD]", - MATCH_STR NO_STR + MATCH_STR "Match first hop interface of route\n" "Interface name\n") { @@ -1130,6 +1220,34 @@ DEFUN (ospf6_routemap_no_set_forwarding, return route_map_command_status (vty, ret); } +/* add "set tag" */ +DEFUN (ospf6_routemap_set_tag, + ospf6_routemap_set_tag_cmd, + "set tag (1-4294967295)", + "Set value\n" + "Tag value for routing protocol\n" + "Tag value\n") +{ + int ret = route_map_add_set ((struct route_map_index *) vty->index, + "tag", argv[2]->arg); + return route_map_command_status (vty, ret); +} + +/* delete "set tag" */ +DEFUN (ospf6_routemap_no_set_tag, + ospf6_routemap_no_set_tag_cmd, + "no set tag [(1-4294967295)]", + NO_STR + "Set value\n" + "Tag value for routing protocol\n" + "Tag value\n") +{ + char *tag = (argc == 4) ? argv[3]->arg : NULL; + VTY_DECLVAR_CONTEXT (route_map_index, index); + int ret = route_map_delete_set (index, "tag", tag); + return route_map_command_status (vty, ret); +} + static void ospf6_routemap_init (void) { @@ -1143,10 +1261,12 @@ ospf6_routemap_init (void) route_map_install_match (&ospf6_routemap_rule_match_address_prefixlist_cmd); route_map_install_match (&ospf6_routemap_rule_match_interface_cmd); + route_map_install_match (&ospf6_routemap_rule_match_tag_cmd); route_map_install_set (&ospf6_routemap_rule_set_metric_type_cmd); route_map_install_set (&ospf6_routemap_rule_set_metric_cmd); route_map_install_set (&ospf6_routemap_rule_set_forwarding_cmd); + route_map_install_set (&ospf6_routemap_rule_set_tag_cmd); /* Match address prefix-list */ install_element (RMAP_NODE, &ospf6_routemap_match_address_prefixlist_cmd); @@ -1163,6 +1283,10 @@ ospf6_routemap_init (void) /* ASE Metric */ install_element (RMAP_NODE, &ospf6_routemap_set_forwarding_cmd); install_element (RMAP_NODE, &ospf6_routemap_no_set_forwarding_cmd); + + /* Tag */ + install_element (RMAP_NODE, &ospf6_routemap_set_tag_cmd); + install_element (RMAP_NODE, &ospf6_routemap_no_set_tag_cmd); } @@ -1240,6 +1364,13 @@ ospf6_as_external_lsa_show (struct vty *vty, struct ospf6_lsa *lsa) VNL); } + /* Tag */ + if (CHECK_FLAG (external->bits_metric, OSPF6_ASBR_BIT_T)) + { + vty_out (vty, " Tag: %"ROUTE_TAG_PRI"%s", + ospf6_as_external_lsa_get_tag (lsa), VNL); + } + return 0; } @@ -1306,7 +1437,6 @@ ospf6_asbr_init (void) ospf6_install_lsa_handler (&as_external_handler); install_element (VIEW_NODE, &show_ipv6_ospf6_redistribute_cmd); - install_element (ENABLE_NODE, &show_ipv6_ospf6_redistribute_cmd); install_element (OSPF6_NODE, &ospf6_redistribute_cmd); install_element (OSPF6_NODE, &ospf6_redistribute_routemap_cmd); diff --git a/ospf6d/ospf6_asbr.h b/ospf6d/ospf6_asbr.h index 645e8fd9cf..da6bbdd9c3 100644 --- a/ospf6d/ospf6_asbr.h +++ b/ospf6d/ospf6_asbr.h @@ -47,7 +47,8 @@ struct ospf6_external_info u_int32_t id; struct in6_addr forwarding; - /* u_int32_t tag; */ + + route_tag_t tag; ifindex_t ifindex; }; @@ -82,7 +83,8 @@ extern int ospf6_asbr_is_asbr (struct ospf6 *o); extern void ospf6_asbr_redistribute_add (int type, ifindex_t ifindex, struct prefix *prefix, u_int nexthop_num, - struct in6_addr *nexthop); + struct in6_addr *nexthop, + route_tag_t tag); extern void ospf6_asbr_redistribute_remove (int type, ifindex_t ifindex, struct prefix *prefix); diff --git a/ospf6d/ospf6_interface.c b/ospf6d/ospf6_interface.c index 17c701f10f..45977c616e 100644 --- a/ospf6d/ospf6_interface.c +++ b/ospf6d/ospf6_interface.c @@ -1850,9 +1850,6 @@ ospf6_interface_init (void) install_element (VIEW_NODE, &show_ipv6_ospf6_interface_prefix_cmd); install_element (VIEW_NODE, &show_ipv6_ospf6_interface_ifname_cmd); install_element (VIEW_NODE, &show_ipv6_ospf6_interface_ifname_prefix_cmd); - install_element (ENABLE_NODE, &show_ipv6_ospf6_interface_prefix_cmd); - install_element (ENABLE_NODE, &show_ipv6_ospf6_interface_ifname_cmd); - install_element (ENABLE_NODE, &show_ipv6_ospf6_interface_ifname_prefix_cmd); install_element (CONFIG_NODE, &interface_cmd); install_default (INTERFACE_NODE); diff --git a/ospf6d/ospf6_neighbor.c b/ospf6d/ospf6_neighbor.c index 91c7f7c62c..385232f7f8 100644 --- a/ospf6d/ospf6_neighbor.c +++ b/ospf6d/ospf6_neighbor.c @@ -913,7 +913,6 @@ void ospf6_neighbor_init (void) { install_element (VIEW_NODE, &show_ipv6_ospf6_neighbor_cmd); - install_element (ENABLE_NODE, &show_ipv6_ospf6_neighbor_cmd); } DEFUN (debug_ospf6_neighbor, diff --git a/ospf6d/ospf6_route.h b/ospf6d/ospf6_route.h index d0126b30d6..c7c21a6579 100644 --- a/ospf6d/ospf6_route.h +++ b/ospf6d/ospf6_route.h @@ -100,6 +100,7 @@ struct ospf6_path u_int32_t cost_e2; u_int32_t cost_config; } u; + u_int32_t tag; }; #define OSPF6_PATH_TYPE_NONE 0 diff --git a/ospf6d/ospf6_snmp.c b/ospf6d/ospf6_snmp.c index 382cf62f72..86cfd17c83 100644 --- a/ospf6d/ospf6_snmp.c +++ b/ospf6d/ospf6_snmp.c @@ -625,7 +625,8 @@ ospfv3WwLsdbEntry (struct variable *v, oid *name, size_t *length, int exact, size_t *var_len, WriteMethod **write_method) { struct ospf6_lsa *lsa = NULL; - ifindex_t ifindex, area_id, id, instid, adv_router; + ifindex_t ifindex; + uint32_t area_id, id, instid, adv_router; u_int16_t type; int len; oid *offset; diff --git a/ospf6d/ospf6_top.c b/ospf6d/ospf6_top.c index 48b6cb949a..31cdbd3c5d 100644 --- a/ospf6d/ospf6_top.c +++ b/ospf6d/ospf6_top.c @@ -924,7 +924,6 @@ ospf6_top_init (void) install_node (&ospf6_node, config_write_ospf6); install_element (VIEW_NODE, &show_ipv6_ospf6_cmd); - install_element (ENABLE_NODE, &show_ipv6_ospf6_cmd); install_element (CONFIG_NODE, &router_ospf6_cmd); install_element (CONFIG_NODE, &no_router_ospf6_cmd); @@ -932,10 +931,6 @@ ospf6_top_init (void) install_element (VIEW_NODE, &show_ipv6_ospf6_route_match_cmd); install_element (VIEW_NODE, &show_ipv6_ospf6_route_match_detail_cmd); install_element (VIEW_NODE, &show_ipv6_ospf6_route_type_detail_cmd); - install_element (ENABLE_NODE, &show_ipv6_ospf6_route_cmd); - install_element (ENABLE_NODE, &show_ipv6_ospf6_route_match_cmd); - install_element (ENABLE_NODE, &show_ipv6_ospf6_route_match_detail_cmd); - install_element (ENABLE_NODE, &show_ipv6_ospf6_route_type_detail_cmd); install_default (OSPF6_NODE); install_element (OSPF6_NODE, &ospf6_router_id_cmd); diff --git a/ospf6d/ospf6_zebra.c b/ospf6d/ospf6_zebra.c index fd87c5a56f..c3b8739532 100644 --- a/ospf6d/ospf6_zebra.c +++ b/ospf6d/ospf6_zebra.c @@ -229,13 +229,13 @@ ospf6_zebra_read_ipv6 (int command, struct zclient *zclient, /* Type, flags, message. */ api.type = stream_getc (s); api.instance = stream_getw (s); - api.flags = stream_getc (s); + api.flags = stream_getl (s); api.message = stream_getc (s); /* IPv6 prefix. */ memset (&p, 0, sizeof (struct prefix_ipv6)); p.family = AF_INET6; - p.prefixlen = stream_getc (s); + p.prefixlen = MIN(IPV6_MAX_PREFIXLEN, stream_getc (s)); stream_get (&p.prefix, s, PSIZE (p.prefixlen)); /* Nexthop, ifindex, distance, metric. */ @@ -260,6 +260,11 @@ ospf6_zebra_read_ipv6 (int command, struct zclient *zclient, else api.metric = 0; + if (CHECK_FLAG (api.message, ZAPI_MESSAGE_TAG)) + api.tag = stream_getl (s); + else + api.tag = 0; + if (IS_OSPF6_DEBUG_ZEBRA (RECV)) { char prefixstr[PREFIX2STR_BUFFER], nexthopstr[128]; @@ -269,14 +274,14 @@ ospf6_zebra_read_ipv6 (int command, struct zclient *zclient, else snprintf (nexthopstr, sizeof (nexthopstr), "::"); - zlog_debug ("Zebra Receive route %s: %s %s nexthop %s ifindex %ld", + zlog_debug ("Zebra Receive route %s: %s %s nexthop %s ifindex %ld tag %"ROUTE_TAG_PRI, (command == ZEBRA_IPV6_ROUTE_ADD ? "add" : "delete"), - zebra_route_string(api.type), prefixstr, nexthopstr, ifindex); + zebra_route_string(api.type), prefixstr, nexthopstr, ifindex, api.tag); } if (command == ZEBRA_REDISTRIBUTE_IPV6_ADD) ospf6_asbr_redistribute_add (api.type, ifindex, (struct prefix *) &p, - api.nexthop_num, nexthop); + api.nexthop_num, nexthop, api.tag); else ospf6_asbr_redistribute_remove (api.type, ifindex, (struct prefix *) &p); @@ -444,6 +449,11 @@ ospf6_zebra_route_update (int type, struct ospf6_route *request) SET_FLAG (api.message, ZAPI_MESSAGE_METRIC); api.metric = (request->path.metric_type == 2 ? request->path.u.cost_e2 : request->path.cost); + if (request->path.tag) + { + SET_FLAG (api.message, ZAPI_MESSAGE_TAG); + api.tag = request->path.tag; + } dest = (struct prefix_ipv6 *) &request->prefix; if (type == REM) @@ -653,12 +663,8 @@ ospf6_zebra_init (struct thread_master *master) zclient->interface_down = ospf6_zebra_if_state_update; zclient->interface_address_add = ospf6_zebra_if_address_update_add; zclient->interface_address_delete = ospf6_zebra_if_address_update_delete; - zclient->ipv4_route_add = NULL; - zclient->ipv4_route_delete = NULL; zclient->redistribute_route_ipv4_add = NULL; zclient->redistribute_route_ipv4_del = NULL; - zclient->ipv6_route_add = ospf6_zebra_read_ipv6; - zclient->ipv6_route_delete = ospf6_zebra_read_ipv6; zclient->redistribute_route_ipv6_add = ospf6_zebra_read_ipv6; zclient->redistribute_route_ipv6_del = ospf6_zebra_read_ipv6; @@ -670,7 +676,6 @@ ospf6_zebra_init (struct thread_master *master) /* Install command element for zebra node. */ install_element (VIEW_NODE, &show_zebra_cmd); - install_element (ENABLE_NODE, &show_zebra_cmd); install_default (ZEBRA_NODE); install_element (ZEBRA_NODE, &redistribute_ospf6_cmd); install_element (ZEBRA_NODE, &no_redistribute_ospf6_cmd); diff --git a/ospf6d/ospf6d.c b/ospf6d/ospf6d.c index 8042c73225..349dae5c76 100644 --- a/ospf6d/ospf6d.c +++ b/ospf6d/ospf6d.c @@ -1235,15 +1235,11 @@ ospf6_init (void) install_element_ospf6_clear_interface (); install_element (VIEW_NODE, &show_version_ospf6_cmd); - install_element (ENABLE_NODE, &show_version_ospf6_cmd); install_element (VIEW_NODE, &show_ipv6_ospf6_border_routers_cmd); - install_element (ENABLE_NODE, &show_ipv6_ospf6_border_routers_cmd); install_element (VIEW_NODE, &show_ipv6_ospf6_linkstate_cmd); install_element (VIEW_NODE, &show_ipv6_ospf6_linkstate_detail_cmd); - install_element (ENABLE_NODE, &show_ipv6_ospf6_linkstate_cmd); - install_element (ENABLE_NODE, &show_ipv6_ospf6_linkstate_detail_cmd); #define INSTALL(n,c) \ install_element (n ## _NODE, &show_ipv6_ospf6_ ## c) @@ -1263,21 +1259,6 @@ ospf6_init (void) INSTALL (VIEW, database_type_id_self_originated_cmd); INSTALL (VIEW, database_type_self_originated_linkstate_id_cmd); - INSTALL (ENABLE, database_cmd); - INSTALL (ENABLE, database_type_cmd); - INSTALL (ENABLE, database_id_cmd); - INSTALL (ENABLE, database_router_cmd); - INSTALL (ENABLE, database_type_id_cmd); - INSTALL (ENABLE, database_type_router_cmd); - INSTALL (ENABLE, database_adv_router_linkstate_id_cmd); - INSTALL (ENABLE, database_id_router_cmd); - INSTALL (ENABLE, database_type_id_router_cmd); - INSTALL (ENABLE, database_type_adv_router_linkstate_id_cmd); - INSTALL (ENABLE, database_self_originated_cmd); - INSTALL (ENABLE, database_type_self_originated_cmd); - INSTALL (ENABLE, database_type_id_self_originated_cmd); - INSTALL (ENABLE, database_type_self_originated_linkstate_id_cmd); - /* Make ospf protocol socket. */ ospf6_serv_sock (); thread_add_read (master, ospf6_receive, NULL, ospf6_sock); diff --git a/ospfd/ospf_asbr.c b/ospfd/ospf_asbr.c index 21f99af128..284c564688 100644 --- a/ospfd/ospf_asbr.c +++ b/ospfd/ospf_asbr.c @@ -137,7 +137,7 @@ ospf_route_map_set_compare (struct route_map_set_values *values1, struct external_info * ospf_external_info_add (u_char type, u_short instance, struct prefix_ipv4 p, ifindex_t ifindex, struct in_addr nexthop, - u_short tag) + route_tag_t tag) { struct external_info *new; struct route_node *rn; diff --git a/ospfd/ospf_asbr.h b/ospfd/ospf_asbr.h index 25a53aad4f..e2fa367455 100644 --- a/ospfd/ospf_asbr.h +++ b/ospfd/ospf_asbr.h @@ -47,7 +47,7 @@ struct external_info struct in_addr nexthop; /* Additional Route tag. */ - u_int32_t tag; + route_tag_t tag; struct route_map_set_values route_map_set; #define ROUTEMAP_METRIC(E) (E)->route_map_set.metric @@ -65,7 +65,7 @@ extern struct external_info *ospf_external_info_add (u_char, u_short, struct prefix_ipv4, ifindex_t, struct in_addr, - u_short); + route_tag_t); extern void ospf_external_info_delete (u_char, u_short, struct prefix_ipv4); extern struct external_info *ospf_external_info_lookup (u_char, u_short, struct prefix_ipv4 *); diff --git a/ospfd/ospf_ase.c b/ospfd/ospf_ase.c index 74c1711ef1..fe40b10171 100644 --- a/ospfd/ospf_ase.c +++ b/ospfd/ospf_ase.c @@ -598,6 +598,10 @@ ospf_ase_route_match_same (struct route_table *rt, struct prefix *prefix, if (op->ifindex != newop->ifindex) return 0; } + + if (or->u.ext.tag != newor->u.ext.tag) + return 0; + return 1; } diff --git a/ospfd/ospf_dump.c b/ospfd/ospf_dump.c index 07a55ebaa8..0b3267a233 100644 --- a/ospfd/ospf_dump.c +++ b/ospfd/ospf_dump.c @@ -493,7 +493,7 @@ ospf_as_external_lsa_dump (struct stream *s, u_int16_t length) IS_EXTERNAL_METRIC (al->e[i].tos) ? "E" : "-", al->e[i].tos & 0x7f, GET_METRIC (al->e[i].metric)); zlog_debug (" Forwarding address %s", inet_ntoa (al->e[i].fwd_addr)); - zlog_debug (" External Route Tag %d", al->e[i].route_tag); + zlog_debug (" External Route Tag %"ROUTE_TAG_PRI, al->e[i].route_tag); } } diff --git a/ospfd/ospf_lsa.c b/ospfd/ospf_lsa.c index b96ed452c1..916d4d01c9 100644 --- a/ospfd/ospf_lsa.c +++ b/ospfd/ospf_lsa.c @@ -1673,7 +1673,7 @@ ospf_external_lsa_body_set (struct stream *s, struct external_info *ei, /* Put forwarding address. */ stream_put_ipv4 (s, fwd_addr.s_addr); - /* Put route tag -- only first 16bits are used for compatibility */ + /* Put route tag */ stream_putl (s, ei->tag); } diff --git a/ospfd/ospf_main.c b/ospfd/ospf_main.c index 0ed39af411..43aa683fa4 100644 --- a/ospfd/ospf_main.c +++ b/ospfd/ospf_main.c @@ -203,6 +203,8 @@ main (int argc, char **argv) ospf_apiserver_enable = 0; #endif /* SUPPORT_OSPF_API */ + strcpy(pid_file, PATH_OSPFD_PID); + /* get program name */ progname = ((p = strrchr (argv[0], '/')) ? ++p : argv[0]); @@ -363,7 +365,6 @@ main (int argc, char **argv) } else { - strcpy(pid_file, PATH_OSPFD_PID); strcpy(vty_path, OSPF_VTYSH_PATH); } /* Process id file create. */ diff --git a/ospfd/ospf_ri.c b/ospfd/ospf_ri.c index bcb1cd8e20..b3d20dca75 100644 --- a/ospfd/ospf_ri.c +++ b/ospfd/ospf_ri.c @@ -1621,8 +1621,6 @@ ospf_router_info_register_vty (void) { install_element (VIEW_NODE, &show_ip_ospf_router_info_cmd); install_element (VIEW_NODE, &show_ip_ospf_router_info_pce_cmd); - install_element (ENABLE_NODE, &show_ip_ospf_router_info_cmd); - install_element (ENABLE_NODE, &show_ip_ospf_router_info_pce_cmd); install_element (OSPF_NODE, &router_info_area_cmd); install_element (OSPF_NODE, &no_router_info_cmd); diff --git a/ospfd/ospf_routemap.c b/ospfd/ospf_routemap.c index ebe426cee0..6e88515d3e 100644 --- a/ospfd/ospf_routemap.c +++ b/ospfd/ospf_routemap.c @@ -397,7 +397,7 @@ static route_map_result_t route_match_tag (void *rule, struct prefix *prefix, route_map_object_t type, void *object) { - u_short *tag; + route_tag_t *tag; struct external_info *ei; if (type == RMAP_OSPF) @@ -411,45 +411,13 @@ route_match_tag (void *rule, struct prefix *prefix, return RMAP_NOMATCH; } -/* Route map `match tag' match statement. `arg' is TAG value */ -static void * -route_match_tag_compile (const char *arg) -{ - u_short *tag; - u_short tmp; - - /* tag value shoud be integer. */ - if (! all_digit (arg)) - return NULL; - - tmp = atoi(arg); - if (tmp < 1) - return NULL; - - tag = XMALLOC (MTYPE_ROUTE_MAP_COMPILED, sizeof (u_short)); - - if (!tag) - return tag; - - *tag = tmp; - - return tag; -} - -/* Free route map's compiled 'match tag' value. */ -static void -route_match_tag_free (void *rule) -{ - XFREE (MTYPE_ROUTE_MAP_COMPILED, rule); -} - /* Route map commands for tag matching. */ -struct route_map_rule_cmd route_match_tag_cmd = +static struct route_map_rule_cmd route_match_tag_cmd = { "tag", route_match_tag, - route_match_tag_compile, - route_match_tag_free, + route_map_rule_tag_compile, + route_map_rule_tag_free, }; @@ -587,7 +555,7 @@ static route_map_result_t route_set_tag (void *rule, struct prefix *prefix, route_map_object_t type, void *object) { - u_short *tag; + route_tag_t *tag; struct external_info *ei; if (type == RMAP_OSPF) @@ -602,46 +570,13 @@ route_set_tag (void *rule, struct prefix *prefix, return RMAP_OKAY; } -/* Route map `tag' compile function. Given string is converted to u_short. */ -static void * -route_set_tag_compile (const char *arg) -{ - u_short *tag; - u_short tmp; - - /* tag value shoud be integer. */ - if (! all_digit (arg)) - return NULL; - - tmp = atoi(arg); - - if (tmp < 1) - return NULL; - - tag = XMALLOC (MTYPE_ROUTE_MAP_COMPILED, sizeof (u_short)); - - if (!tag) - return tag; - - *tag = tmp; - - return tag; -} - -/* Free route map's tag value. */ -static void -route_set_tag_free (void *rule) -{ - XFREE (MTYPE_ROUTE_MAP_COMPILED, rule); -} - /* Route map commands for tag set. */ -struct route_map_rule_cmd route_set_tag_cmd = +static struct route_map_rule_cmd route_set_tag_cmd = { "tag", route_set_tag, - route_set_tag_compile, - route_set_tag_free, + route_map_rule_tag_compile, + route_map_rule_tag_free, }; DEFUN (match_ip_nexthop, @@ -673,7 +608,6 @@ DEFUN (no_match_ip_nexthop, return ospf_route_match_delete (vty, vty->index, "ip next-hop", al); } - DEFUN (set_metric_type, set_metric_type_cmd, "set metric-type ", diff --git a/ospfd/ospf_spf.c b/ospfd/ospf_spf.c index 5f8ef6f993..5dfd41dd1e 100644 --- a/ospfd/ospf_spf.c +++ b/ospfd/ospf_spf.c @@ -1283,7 +1283,7 @@ ospf_spf_calculate (struct ospf_area *area, struct route_table *new_table, area->ts_spf = area->ospf->ts_spf; if (IS_DEBUG_OSPF_EVENT) - zlog_debug ("ospf_spf_calculate: Stop. %ld vertices", + zlog_debug ("ospf_spf_calculate: Stop. %zd vertices", mtype_stats_alloc(MTYPE_OSPF_VERTEX)); /* Free SPF vertices, but not the list. List has ospf_vertex_free diff --git a/ospfd/ospf_te.c b/ospfd/ospf_te.c index 26034fd360..43b0b0ebfa 100644 --- a/ospfd/ospf_te.c +++ b/ospfd/ospf_te.c @@ -2647,8 +2647,6 @@ ospf_mpls_te_register_vty (void) { install_element (VIEW_NODE, &show_ip_ospf_mpls_te_router_cmd); install_element (VIEW_NODE, &show_ip_ospf_mpls_te_link_cmd); - install_element (ENABLE_NODE, &show_ip_ospf_mpls_te_router_cmd); - install_element (ENABLE_NODE, &show_ip_ospf_mpls_te_link_cmd); install_element (OSPF_NODE, &ospf_mpls_te_on_cmd); install_element (OSPF_NODE, &no_ospf_mpls_te_cmd); diff --git a/ospfd/ospf_vty.c b/ospfd/ospf_vty.c index 359189b345..8261a96a20 100644 --- a/ospfd/ospf_vty.c +++ b/ospfd/ospf_vty.c @@ -3450,7 +3450,7 @@ show_ip_ospf_common (struct vty *vty, struct ospf *ospf, u_char use_json) if (use_json) { json_object_object_add(json, "areas", json_areas); - vty_out (vty, "%s%s", json_object_to_json_string(json), VTY_NEWLINE); + vty_out (vty, "%s%s", json_object_to_json_string_ext(json, JSON_C_TO_STRING_PRETTY), VTY_NEWLINE); json_object_free(json); } else @@ -3859,7 +3859,7 @@ show_ip_ospf_interface_common (struct vty *vty, struct ospf *ospf, int argc, if (use_json) { - vty_out (vty, "%s%s", json_object_to_json_string(json), VTY_NEWLINE); + vty_out (vty, "%s%s", json_object_to_json_string_ext(json, JSON_C_TO_STRING_PRETTY), VTY_NEWLINE); json_object_free(json); } else @@ -4017,7 +4017,7 @@ show_ip_ospf_neighbor_common (struct vty *vty, struct ospf *ospf, u_char use_jso if (use_json) { - vty_out (vty, "%s%s", json_object_to_json_string(json), VTY_NEWLINE); + vty_out (vty, "%s%s", json_object_to_json_string_ext(json, JSON_C_TO_STRING_PRETTY), VTY_NEWLINE); json_object_free(json); } else @@ -4129,7 +4129,7 @@ show_ip_ospf_neighbor_all_common (struct vty *vty, struct ospf *ospf, u_char use if (use_json) { - vty_out (vty, "%s%s", json_object_to_json_string(json), VTY_NEWLINE); + vty_out (vty, "%s%s", json_object_to_json_string_ext(json, JSON_C_TO_STRING_PRETTY), VTY_NEWLINE); json_object_free(json); } else @@ -4224,7 +4224,7 @@ show_ip_ospf_neighbor_int_common (struct vty *vty, struct ospf *ospf, int arg_ba if (use_json) { - vty_out (vty, "%s%s", json_object_to_json_string(json), VTY_NEWLINE); + vty_out (vty, "%s%s", json_object_to_json_string_ext(json, JSON_C_TO_STRING_PRETTY), VTY_NEWLINE); json_object_free(json); } else @@ -4591,7 +4591,7 @@ show_ip_ospf_neighbor_id_common (struct vty *vty, struct ospf *ospf, if (use_json) { - vty_out (vty, "%s%s", json_object_to_json_string(json), VTY_NEWLINE); + vty_out (vty, "%s%s", json_object_to_json_string_ext(json, JSON_C_TO_STRING_PRETTY), VTY_NEWLINE); json_object_free(json); } else @@ -4683,7 +4683,7 @@ show_ip_ospf_neighbor_detail_common (struct vty *vty, struct ospf *ospf, u_char if (use_json) { - vty_out (vty, "%s%s", json_object_to_json_string(json), VTY_NEWLINE); + vty_out (vty, "%s%s", json_object_to_json_string_ext(json, JSON_C_TO_STRING_PRETTY), VTY_NEWLINE); json_object_free(json); } else @@ -4779,7 +4779,7 @@ show_ip_ospf_neighbor_detail_all_common (struct vty *vty, struct ospf *ospf, u_c if (use_json) { - vty_out (vty, "%s%s", json_object_to_json_string(json), VTY_NEWLINE); + vty_out (vty, "%s%s", json_object_to_json_string_ext(json, JSON_C_TO_STRING_PRETTY), VTY_NEWLINE); json_object_free(json); } else @@ -4884,7 +4884,7 @@ show_ip_ospf_neighbor_int_detail_common (struct vty *vty, struct ospf *ospf, if (use_json) { - vty_out (vty, "%s%s", json_object_to_json_string(json), VTY_NEWLINE); + vty_out (vty, "%s%s", json_object_to_json_string_ext(json, JSON_C_TO_STRING_PRETTY), VTY_NEWLINE); json_object_free(json); } else @@ -5234,8 +5234,8 @@ show_as_external_lsa_detail (struct vty *vty, struct ospf_lsa *lsa) vty_out (vty, " Forward Address: %s%s", inet_ntoa (al->e[0].fwd_addr), VTY_NEWLINE); - vty_out (vty, " External Route Tag: %lu%s%s", - (u_long)ntohl (al->e[0].route_tag), VTY_NEWLINE, VTY_NEWLINE); + vty_out (vty, " External Route Tag: %"ROUTE_TAG_PRI"%s%s", + (route_tag_t)ntohl (al->e[0].route_tag), VTY_NEWLINE, VTY_NEWLINE); } return 0; @@ -5259,8 +5259,8 @@ show_as_external_lsa_stdvty (struct ospf_lsa *lsa) zlog_debug( " Forward Address: %s%s", inet_ntoa (al->e[0].fwd_addr), "\n"); - zlog_debug( " External Route Tag: %u%s%s", - ntohl (al->e[0].route_tag), "\n", "\n"); + zlog_debug( " External Route Tag: %"ROUTE_TAG_PRI"%s%s", + (route_tag_t)ntohl (al->e[0].route_tag), "\n", "\n"); return 0; } @@ -5286,8 +5286,8 @@ show_as_nssa_lsa_detail (struct vty *vty, struct ospf_lsa *lsa) vty_out (vty, " NSSA: Forward Address: %s%s", inet_ntoa (al->e[0].fwd_addr), VTY_NEWLINE); - vty_out (vty, " External Route Tag: %u%s%s", - ntohl (al->e[0].route_tag), VTY_NEWLINE, VTY_NEWLINE); + vty_out (vty, " External Route Tag: %"ROUTE_TAG_PRI"%s%s", + (route_tag_t)ntohl (al->e[0].route_tag), VTY_NEWLINE, VTY_NEWLINE); } return 0; @@ -9058,11 +9058,11 @@ show_ip_ospf_route_external (struct vty *vty, struct route_table *rt) switch (er->path_type) { case OSPF_PATH_TYPE1_EXTERNAL: - vty_out (vty, "N E1 %-18s [%d] tag: %u%s", buf1, + vty_out (vty, "N E1 %-18s [%d] tag: %"ROUTE_TAG_PRI"%s", buf1, er->cost, er->u.ext.tag, VTY_NEWLINE); break; case OSPF_PATH_TYPE2_EXTERNAL: - vty_out (vty, "N E2 %-18s [%d/%d] tag: %u%s", buf1, er->cost, + vty_out (vty, "N E2 %-18s [%d/%d] tag: %"ROUTE_TAG_PRI"%s", buf1, er->cost, er->u.ext.type2_cost, er->u.ext.tag, VTY_NEWLINE); break; } @@ -9969,32 +9969,22 @@ ospf_vty_show_init (void) { /* "show ip ospf" commands. */ install_element (VIEW_NODE, &show_ip_ospf_cmd); - install_element (ENABLE_NODE, &show_ip_ospf_cmd); install_element (VIEW_NODE, &show_ip_ospf_instance_cmd); - install_element (ENABLE_NODE, &show_ip_ospf_instance_cmd); /* "show ip ospf database" commands. */ install_element (VIEW_NODE, &show_ip_ospf_database_type_adv_router_cmd); install_element (VIEW_NODE, &show_ip_ospf_database_cmd); install_element (VIEW_NODE, &show_ip_ospf_database_max_cmd); - install_element (ENABLE_NODE, &show_ip_ospf_database_type_adv_router_cmd); - install_element (ENABLE_NODE, &show_ip_ospf_database_cmd); - install_element (ENABLE_NODE, &show_ip_ospf_database_max_cmd); install_element (VIEW_NODE, &show_ip_ospf_instance_database_type_adv_router_cmd); install_element (VIEW_NODE, &show_ip_ospf_instance_database_cmd); install_element (VIEW_NODE, &show_ip_ospf_instance_database_max_cmd); - install_element (ENABLE_NODE, &show_ip_ospf_instance_database_type_adv_router_cmd); - install_element (ENABLE_NODE, &show_ip_ospf_instance_database_cmd); - install_element (ENABLE_NODE, &show_ip_ospf_instance_database_max_cmd); /* "show ip ospf interface" commands. */ install_element (VIEW_NODE, &show_ip_ospf_interface_cmd); - install_element (ENABLE_NODE, &show_ip_ospf_interface_cmd); install_element (VIEW_NODE, &show_ip_ospf_instance_interface_cmd); - install_element (ENABLE_NODE, &show_ip_ospf_instance_interface_cmd); /* "show ip ospf neighbor" commands. */ install_element (VIEW_NODE, &show_ip_ospf_neighbor_int_detail_cmd); @@ -10004,13 +9994,6 @@ ospf_vty_show_init (void) install_element (VIEW_NODE, &show_ip_ospf_neighbor_detail_cmd); install_element (VIEW_NODE, &show_ip_ospf_neighbor_cmd); install_element (VIEW_NODE, &show_ip_ospf_neighbor_all_cmd); - install_element (ENABLE_NODE, &show_ip_ospf_neighbor_int_detail_cmd); - install_element (ENABLE_NODE, &show_ip_ospf_neighbor_int_cmd); - install_element (ENABLE_NODE, &show_ip_ospf_neighbor_id_cmd); - install_element (ENABLE_NODE, &show_ip_ospf_neighbor_detail_all_cmd); - install_element (ENABLE_NODE, &show_ip_ospf_neighbor_detail_cmd); - install_element (ENABLE_NODE, &show_ip_ospf_neighbor_cmd); - install_element (ENABLE_NODE, &show_ip_ospf_neighbor_all_cmd); install_element (VIEW_NODE, &show_ip_ospf_instance_neighbor_int_detail_cmd); install_element (VIEW_NODE, &show_ip_ospf_instance_neighbor_int_cmd); @@ -10019,24 +10002,13 @@ ospf_vty_show_init (void) install_element (VIEW_NODE, &show_ip_ospf_instance_neighbor_detail_cmd); install_element (VIEW_NODE, &show_ip_ospf_instance_neighbor_cmd); install_element (VIEW_NODE, &show_ip_ospf_instance_neighbor_all_cmd); - install_element (ENABLE_NODE, &show_ip_ospf_instance_neighbor_int_detail_cmd); - install_element (ENABLE_NODE, &show_ip_ospf_instance_neighbor_int_cmd); - install_element (ENABLE_NODE, &show_ip_ospf_instance_neighbor_id_cmd); - install_element (ENABLE_NODE, &show_ip_ospf_instance_neighbor_detail_all_cmd); - install_element (ENABLE_NODE, &show_ip_ospf_instance_neighbor_detail_cmd); - install_element (ENABLE_NODE, &show_ip_ospf_instance_neighbor_cmd); - install_element (ENABLE_NODE, &show_ip_ospf_instance_neighbor_all_cmd); /* "show ip ospf route" commands. */ install_element (VIEW_NODE, &show_ip_ospf_route_cmd); - install_element (ENABLE_NODE, &show_ip_ospf_route_cmd); install_element (VIEW_NODE, &show_ip_ospf_border_routers_cmd); - install_element (ENABLE_NODE, &show_ip_ospf_border_routers_cmd); install_element (VIEW_NODE, &show_ip_ospf_instance_route_cmd); - install_element (ENABLE_NODE, &show_ip_ospf_instance_route_cmd); install_element (VIEW_NODE, &show_ip_ospf_instance_border_routers_cmd); - install_element (ENABLE_NODE, &show_ip_ospf_instance_border_routers_cmd); } diff --git a/ospfd/ospf_zebra.c b/ospfd/ospf_zebra.c index c0b94a3360..b0ff5d0e3c 100644 --- a/ospfd/ospf_zebra.c +++ b/ospfd/ospf_zebra.c @@ -355,7 +355,7 @@ ospf_zebra_add (struct prefix_ipv4 *p, struct ospf_route *or) { u_char message; u_char distance; - u_char flags; + u_int32_t flags; int psize; struct stream *s; struct ospf_path *path; @@ -379,10 +379,10 @@ ospf_zebra_add (struct prefix_ipv4 *p, struct ospf_route *or) if (distance) SET_FLAG (message, ZAPI_MESSAGE_DISTANCE); - /* Check if path type is ASE and use only 16bit tags */ + /* Check if path type is ASE */ if (((or->path_type == OSPF_PATH_TYPE1_EXTERNAL) || (or->path_type == OSPF_PATH_TYPE2_EXTERNAL)) && - (or->u.ext.tag > 0) && (or->u.ext.tag < UINT16_MAX)) + (or->u.ext.tag > 0) && (or->u.ext.tag <= ROUTE_TAG_MAX)) SET_FLAG (message, ZAPI_MESSAGE_TAG); /* Make packet. */ @@ -393,7 +393,7 @@ ospf_zebra_add (struct prefix_ipv4 *p, struct ospf_route *or) zclient_create_header (s, ZEBRA_IPV4_ROUTE_ADD, VRF_DEFAULT); stream_putc (s, ZEBRA_ROUTE_OSPF); stream_putw (s, ospf->instance); - stream_putc (s, flags); + stream_putl (s, flags); stream_putc (s, message); stream_putw (s, SAFI_UNICAST); @@ -479,7 +479,7 @@ ospf_zebra_add (struct prefix_ipv4 *p, struct ospf_route *or) } if (CHECK_FLAG (message, ZAPI_MESSAGE_TAG)) - stream_putw (s, (u_short)or->u.ext.tag); + stream_putl (s, or->u.ext.tag); stream_putw_at (s, 0, stream_get_endp (s)); @@ -492,7 +492,7 @@ ospf_zebra_delete (struct prefix_ipv4 *p, struct ospf_route *or) { u_char message; u_char distance; - u_char flags; + u_int32_t flags; int psize; struct stream *s; struct ospf_path *path; @@ -516,7 +516,7 @@ ospf_zebra_delete (struct prefix_ipv4 *p, struct ospf_route *or) zclient_create_header (s, ZEBRA_IPV4_ROUTE_DELETE, VRF_DEFAULT); stream_putc (s, ZEBRA_ROUTE_OSPF); stream_putw (s, ospf->instance); - stream_putc (s, flags); + stream_putl (s, flags); stream_putc (s, message); stream_putw (s, SAFI_UNICAST); @@ -1064,13 +1064,13 @@ ospf_zebra_read_ipv4 (int command, struct zclient *zclient, /* Type, flags, message. */ api.type = stream_getc (s); api.instance = stream_getw (s); - api.flags = stream_getc (s); + api.flags = stream_getl (s); api.message = stream_getc (s); /* IPv4 prefix. */ memset (&p, 0, sizeof (struct prefix_ipv4)); p.family = AF_INET; - p.prefixlen = stream_getc (s); + p.prefixlen = MIN(IPV4_MAX_PREFIXLEN, stream_getc (s)); stream_get (&p.prefix, s, PSIZE (p.prefixlen)); if (IPV4_NET127(ntohl(p.prefix.s_addr))) @@ -1093,7 +1093,7 @@ ospf_zebra_read_ipv4 (int command, struct zclient *zclient, if (CHECK_FLAG (api.message, ZAPI_MESSAGE_METRIC)) api.metric = stream_getl (s); if (CHECK_FLAG (api.message, ZAPI_MESSAGE_TAG)) - api.tag = stream_getw (s); + api.tag = stream_getl (s); else api.tag = 0; @@ -1601,8 +1601,6 @@ ospf_zebra_init (struct thread_master *master, u_short instance) zclient->interface_address_delete = ospf_interface_address_delete; zclient->interface_link_params = ospf_interface_link_params; - zclient->ipv4_route_add = ospf_zebra_read_ipv4; - zclient->ipv4_route_delete = ospf_zebra_read_ipv4; zclient->redistribute_route_ipv4_add = ospf_zebra_read_ipv4; zclient->redistribute_route_ipv4_del = ospf_zebra_read_ipv4; diff --git a/ospfd/ospfd.h b/ospfd/ospfd.h index bc4acf993b..93b5ab766a 100644 --- a/ospfd/ospfd.h +++ b/ospfd/ospfd.h @@ -269,7 +269,7 @@ struct ospf struct list *redist[ZEBRA_ROUTE_MAX + 1]; /* Redistribute tag info. */ - u_short dtag[ZEBRA_ROUTE_MAX + 1]; //Pending: cant configure as of now + route_tag_t dtag[ZEBRA_ROUTE_MAX + 1]; //Pending: cant configure as of now int default_metric; /* Default metric for redistribute. */ diff --git a/pimd/pim_cmd.c b/pimd/pim_cmd.c index 8cc5cf95db..318bd1ba1c 100644 --- a/pimd/pim_cmd.c +++ b/pimd/pim_cmd.c @@ -4903,45 +4903,14 @@ void pim_cmd_init() install_element (VIEW_NODE, &show_ip_ssmpingd_cmd); install_element (VIEW_NODE, &show_debugging_pim_cmd); + install_element (ENABLE_NODE, &show_ip_pim_address_cmd); + install_element (ENABLE_NODE, &clear_ip_interfaces_cmd); install_element (ENABLE_NODE, &clear_ip_igmp_interfaces_cmd); install_element (ENABLE_NODE, &clear_ip_mroute_cmd); install_element (ENABLE_NODE, &clear_ip_pim_interfaces_cmd); install_element (ENABLE_NODE, &clear_ip_pim_oil_cmd); - install_element (ENABLE_NODE, &show_ip_igmp_interface_cmd); - install_element (ENABLE_NODE, &show_ip_igmp_join_cmd); - install_element (ENABLE_NODE, &show_ip_igmp_parameters_cmd); - install_element (ENABLE_NODE, &show_ip_igmp_groups_cmd); - install_element (ENABLE_NODE, &show_ip_igmp_groups_retransmissions_cmd); - install_element (ENABLE_NODE, &show_ip_igmp_sources_cmd); - install_element (ENABLE_NODE, &show_ip_igmp_sources_retransmissions_cmd); - install_element (ENABLE_NODE, &show_ip_igmp_querier_cmd); - install_element (ENABLE_NODE, &show_ip_pim_address_cmd); - install_element (ENABLE_NODE, &show_ip_pim_assert_cmd); - install_element (ENABLE_NODE, &show_ip_pim_assert_internal_cmd); - install_element (ENABLE_NODE, &show_ip_pim_assert_metric_cmd); - install_element (ENABLE_NODE, &show_ip_pim_assert_winner_metric_cmd); - install_element (ENABLE_NODE, &show_ip_pim_dr_cmd); - install_element (ENABLE_NODE, &show_ip_pim_hello_cmd); - install_element (ENABLE_NODE, &show_ip_pim_interface_cmd); - install_element (ENABLE_NODE, &show_ip_pim_join_cmd); - install_element (ENABLE_NODE, &show_ip_pim_jp_override_interval_cmd); - install_element (ENABLE_NODE, &show_ip_pim_lan_prune_delay_cmd); - install_element (ENABLE_NODE, &show_ip_pim_local_membership_cmd); - install_element (ENABLE_NODE, &show_ip_pim_neighbor_cmd); - install_element (ENABLE_NODE, &show_ip_pim_rpf_cmd); - install_element (ENABLE_NODE, &show_ip_pim_secondary_cmd); - install_element (ENABLE_NODE, &show_ip_pim_upstream_cmd); - install_element (ENABLE_NODE, &show_ip_pim_upstream_join_desired_cmd); - install_element (ENABLE_NODE, &show_ip_pim_upstream_rpf_cmd); - install_element (ENABLE_NODE, &show_ip_multicast_cmd); - install_element (ENABLE_NODE, &show_ip_mroute_cmd); - install_element (ENABLE_NODE, &show_ip_mroute_count_cmd); - install_element (ENABLE_NODE, &show_ip_rib_cmd); - install_element (ENABLE_NODE, &show_ip_ssmpingd_cmd); - install_element (ENABLE_NODE, &show_debugging_pim_cmd); - install_element (ENABLE_NODE, &test_igmp_receive_report_cmd); install_element (ENABLE_NODE, &test_pim_receive_assert_cmd); install_element (ENABLE_NODE, &test_pim_receive_dump_cmd); diff --git a/pimd/pim_zebra.c b/pimd/pim_zebra.c index d357e5cc83..1822de2299 100644 --- a/pimd/pim_zebra.c +++ b/pimd/pim_zebra.c @@ -552,7 +552,7 @@ static int redist_read_ipv4_route(int command, struct zclient *zclient, /* Type, flags, message. */ api.type = stream_getc(s); api.instance = stream_getw (s); - api.flags = stream_getc(s); + api.flags = stream_getl(s); api.message = stream_getc(s); /* IPv4 prefix length. */ @@ -610,13 +610,12 @@ static int redist_read_ipv4_route(int command, struct zclient *zclient, 0; if (CHECK_FLAG (api.message, ZAPI_MESSAGE_TAG)) - api.tag = stream_getw (s); + api.tag = stream_getl (s); else api.tag = 0; switch (command) { case ZEBRA_REDISTRIBUTE_IPV4_ADD: - case ZEBRA_IPV4_ROUTE_ADD: if (PIM_DEBUG_ZEBRA) { char buf[2][INET_ADDRSTRLEN]; zlog_debug("%s: add %s %s/%d " @@ -634,7 +633,6 @@ static int redist_read_ipv4_route(int command, struct zclient *zclient, } break; case ZEBRA_REDISTRIBUTE_IPV4_DEL: - case ZEBRA_IPV4_ROUTE_DELETE: if (PIM_DEBUG_ZEBRA) { char buf[2][INET_ADDRSTRLEN]; zlog_debug("%s: delete %s %s/%d " @@ -690,8 +688,6 @@ void pim_zebra_init(char *zebra_sock_path) qpim_zclient_update->interface_down = pim_zebra_if_state_down; qpim_zclient_update->interface_address_add = pim_zebra_if_address_add; qpim_zclient_update->interface_address_delete = pim_zebra_if_address_del; - qpim_zclient_update->ipv4_route_add = redist_read_ipv4_route; - qpim_zclient_update->ipv4_route_delete = redist_read_ipv4_route; qpim_zclient_update->redistribute_route_ipv4_add = redist_read_ipv4_route; qpim_zclient_update->redistribute_route_ipv4_del = redist_read_ipv4_route; diff --git a/qpb/.gitignore b/qpb/.gitignore new file mode 100644 index 0000000000..b133c52a42 --- /dev/null +++ b/qpb/.gitignore @@ -0,0 +1,15 @@ +Makefile +Makefile.in +*.o +tags +TAGS +.deps +.nfs* +*.lo +*.la +*.a +*.libs +.arch-inventory +.arch-ids +*~ +*.loT diff --git a/qpb/Makefile.am b/qpb/Makefile.am new file mode 100644 index 0000000000..0fbda61f3c --- /dev/null +++ b/qpb/Makefile.am @@ -0,0 +1,30 @@ +include ../common.am + +AM_CPPFLAGS = -I.. -I$(top_srcdir) -I$(top_srcdir)/lib -I$(top_builddir)/lib $(Q_PROTOBUF_C_CLIENT_INCLUDES) + +PROTOBUF_INCLUDES=-I$(top_srcdir) +PROTOBUF_PACKAGE = qpb + +lib_LTLIBRARIES = libquagga_pb.la +libquagga_pb_la_LDFLAGS = -version-info 0:0:0 + +if HAVE_PROTOBUF +protobuf_srcs = \ + qpb_allocator.c + +protobuf_srcs_nodist = \ + qpb.pb-c.c +endif + +libquagga_pb_la_SOURCES = \ + linear_allocator.h \ + qpb.h \ + qpb.c \ + qpb_allocator.h \ + $(protobuf_srcs) + +nodist_libquagga_pb_la_SOURCES = $(protobuf_srcs_nodist) + +CLEANFILES = $(Q_CLEANFILES) +BUILT_SOURCES = $(Q_PROTOBUF_SRCS) +EXTRA_DIST = qpb.proto diff --git a/qpb/README.txt b/qpb/README.txt new file mode 100644 index 0000000000..99ccd05511 --- /dev/null +++ b/qpb/README.txt @@ -0,0 +1 @@ +Protobuf definitions and code that is applicable to all of quagga. diff --git a/qpb/linear_allocator.h b/qpb/linear_allocator.h new file mode 100644 index 0000000000..e3ebbc64f3 --- /dev/null +++ b/qpb/linear_allocator.h @@ -0,0 +1,207 @@ +/* + * linear_allocator.h + * + * @copyright Copyright (C) 2016 Sproute Networks, Inc. + * + * @author Avneesh Sachdev + * + * This file is part of Quagga. + * + * Quagga is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * Quagga is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Quagga; see the file COPYING. If not, write to the Free + * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ + +/* + * Header file for the linear allocator. + * + * An allocator that allocates memory by walking down towards the end + * of a buffer. No attempt is made to reuse blocks that are freed + * subsequently. The assumption is that the buffer is big enough to + * cover allocations for a given purpose. + */ +#include +#include +#include +#include + +/* + * Alignment for block allocated by the allocator. Must be a power of 2. + */ +#define LINEAR_ALLOCATOR_ALIGNMENT 8 + +#define LINEAR_ALLOCATOR_ALIGN(value) \ + (((value) + LINEAR_ALLOCATOR_ALIGNMENT - 1) & ~(LINEAR_ALLOCATOR_ALIGNMENT - 1)); + +/* + * linear_allocator_align_ptr + */ +static inline char * +linear_allocator_align_ptr (char *ptr) +{ + return (char *) LINEAR_ALLOCATOR_ALIGN ((intptr_t) ptr); +} + +typedef struct linear_allocator_t_ +{ + char *buf; + + /* + * Current location in the buffer. + */ + char *cur; + + /* + * End of buffer. + */ + char *end; + + /* + * Version number of the allocator, this is bumped up when the allocator + * is reset and helps identifies bad frees. + */ + uint32_t version; + + /* + * The number of blocks that are currently allocated. + */ + int num_allocated; +} linear_allocator_t; + +/* + * linear_allocator_block_t + * + * Header structure at the begining of each block. + */ +typedef struct linear_allocator_block_t_ +{ + uint32_t flags; + + /* + * The version of the allocator when this block was allocated. + */ + uint32_t version; + char data[0]; +} linear_allocator_block_t; + +#define LINEAR_ALLOCATOR_BLOCK_IN_USE 0x01 + +#define LINEAR_ALLOCATOR_HDR_SIZE (sizeof(linear_allocator_block_t)) + +/* + * linear_allocator_block_size + * + * The total amount of space a block will take in the buffer, + * including the size of the header. + */ +static inline size_t +linear_allocator_block_size (size_t user_size) +{ + return LINEAR_ALLOCATOR_ALIGN (LINEAR_ALLOCATOR_HDR_SIZE + user_size); +} + +/* + * linear_allocator_ptr_to_block + */ +static inline linear_allocator_block_t * +linear_allocator_ptr_to_block (void *ptr) +{ + void *block_ptr; + block_ptr = ((char *) ptr) - offsetof (linear_allocator_block_t, data); + return block_ptr; +} + +/* + * linear_allocator_init + */ +static inline void +linear_allocator_init (linear_allocator_t * allocator, char *buf, + size_t buf_len) +{ + memset (allocator, 0, sizeof (*allocator)); + + assert (linear_allocator_align_ptr (buf) == buf); + allocator->buf = buf; + allocator->cur = buf; + allocator->end = buf + buf_len; +} + +/* + * linear_allocator_reset + * + * Prepare an allocator for reuse. + * + * *** NOTE ** This implicitly frees all the blocks in the allocator. + */ +static inline void +linear_allocator_reset (linear_allocator_t *allocator) +{ + allocator->num_allocated = 0; + allocator->version++; + allocator->cur = allocator->buf; +} + +/* + * linear_allocator_alloc + */ +static inline void * +linear_allocator_alloc (linear_allocator_t *allocator, size_t user_size) +{ + size_t block_size; + linear_allocator_block_t *block; + + block_size = linear_allocator_block_size (user_size); + + if (allocator->cur + block_size > allocator->end) + { + return NULL; + } + + block = (linear_allocator_block_t *) allocator->cur; + allocator->cur += block_size; + + block->flags = LINEAR_ALLOCATOR_BLOCK_IN_USE; + block->version = allocator->version; + allocator->num_allocated++; + return block->data; +} + +/* + * linear_allocator_free + */ +static inline void +linear_allocator_free (linear_allocator_t *allocator, void *ptr) +{ + linear_allocator_block_t *block; + + if (((char *) ptr) < allocator->buf || ((char *) ptr) >= allocator->end) + { + assert (0); + return; + } + + block = linear_allocator_ptr_to_block (ptr); + if (block->version != allocator->version) + { + assert (0); + return; + } + + block->flags = block->flags & ~LINEAR_ALLOCATOR_BLOCK_IN_USE; + + if (--allocator->num_allocated < 0) + { + assert (0); + } +} diff --git a/qpb/qpb.c b/qpb/qpb.c new file mode 100644 index 0000000000..1b2b47fce5 --- /dev/null +++ b/qpb/qpb.c @@ -0,0 +1,29 @@ +/* + * qpb.c + * + * @copyright Copyright (C) 2016 Sproute Networks, Inc. + * + * @author Avneesh Sachdev + * + * This file is part of Quagga. + * + * Quagga is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * Quagga is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Quagga; see the file COPYING. If not, write to the Free + * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ + +/* + * Main file for the qpb library. + */ + diff --git a/qpb/qpb.h b/qpb/qpb.h new file mode 100644 index 0000000000..ad5bdc8b8e --- /dev/null +++ b/qpb/qpb.h @@ -0,0 +1,373 @@ +/* + * qpb.h + * + * @copyright Copyright (C) 2016 Sproute Networks, Inc. + * + * @author Avneesh Sachdev + * + * This file is part of Quagga. + * + * Quagga is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * Quagga is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Quagga; see the file COPYING. If not, write to the Free + * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ + +/* + * Main public header file for the quagga protobuf library. + */ + +#ifndef _QPB_H +#define _QPB_H + +#include "prefix.h" + +#include "qpb/qpb.pb-c.h" + +#include "qpb/qpb_allocator.h" + +/* + * qpb__address_family__set + */ +#define qpb_address_family_set qpb__address_family__set +static inline int +qpb__address_family__set (Qpb__AddressFamily *pb_family, u_char family) +{ + switch (family) { + case AF_INET: + *pb_family = QPB__ADDRESS_FAMILY__IPV4; + return 1; + + case AF_INET6: + *pb_family = QPB__ADDRESS_FAMILY__IPV6; + return 1; + + default: + *pb_family = QPB__ADDRESS_FAMILY__UNKNOWN_AF; + } + + return 0; +} + +/* + * qpb__address_family__get + */ +#define qpb_address_family_get qpb__address_family__get +static inline int +qpb__address_family__get (Qpb__AddressFamily pb_family, u_char *family) +{ + + switch (pb_family) { + case QPB__ADDRESS_FAMILY__IPV4: + *family = AF_INET; + return 1; + + case QPB__ADDRESS_FAMILY__IPV6: + *family = AF_INET6; + return 1; + + case QPB__ADDRESS_FAMILY__UNKNOWN_AF: + return 0; + default: /* protobuf "magic value" _QPB__ADDRESS_FAMILY_IS_INT_SIZE */ + return 0; + } + + return 0; +} + +/* + * qpb__l3_prefix__create + */ +#define qpb_l3_prefix_create qpb__l3_prefix__create +static inline Qpb__L3Prefix * +qpb__l3_prefix__create (qpb_allocator_t *allocator, struct prefix *p) +{ + Qpb__L3Prefix *prefix; + + prefix = QPB_ALLOC(allocator, typeof(*prefix)); + if (!prefix) { + return NULL; + } + qpb__l3_prefix__init(prefix); + prefix->length = p->prefixlen; + prefix->bytes.len = (p->prefixlen + 7)/8; + prefix->bytes.data = qpb_alloc(allocator, prefix->bytes.len); + if (!prefix->bytes.data) { + return NULL; + } + + memcpy(prefix->bytes.data, &p->u.prefix, prefix->bytes.len); + + return prefix; +} + +/* + * qpb__l3_prefix__get + */ +#define qpb_l3_prefix_get qpb__l3_prefix__get +static inline int +qpb__l3_prefix__get (const Qpb__L3Prefix *pb_prefix, u_char family, + struct prefix *prefix) +{ + + switch (family) + { + + case AF_INET: + memset(prefix, 0, sizeof(struct prefix_ipv4)); + break; + + case AF_INET6: + memset(prefix, 0, sizeof(struct prefix_ipv6)); + break; + + default: + memset(prefix, 0, sizeof(*prefix)); + } + + prefix->prefixlen = pb_prefix->length; + prefix->family = family; + memcpy(&prefix->u.prefix, pb_prefix->bytes.data, pb_prefix->bytes.len); + return 1; +} + +/* + * qpb__protocol__set + * + * Translate a quagga route type to a protobuf protocol. + */ +#define qpb_protocol_set qpb__protocol__set +static inline int +qpb__protocol__set (Qpb__Protocol *pb_proto, int route_type) +{ + switch (route_type) { + case ZEBRA_ROUTE_KERNEL: + *pb_proto = QPB__PROTOCOL__KERNEL; + break; + + case ZEBRA_ROUTE_CONNECT: + *pb_proto = QPB__PROTOCOL__CONNECTED; + break; + + case ZEBRA_ROUTE_STATIC: + *pb_proto = QPB__PROTOCOL__STATIC; + break; + + case ZEBRA_ROUTE_RIP: + *pb_proto = QPB__PROTOCOL__RIP; + break; + + case ZEBRA_ROUTE_RIPNG: + *pb_proto = QPB__PROTOCOL__RIPNG; + break; + + case ZEBRA_ROUTE_OSPF: + case ZEBRA_ROUTE_OSPF6: + *pb_proto = QPB__PROTOCOL__OSPF; + break; + + case ZEBRA_ROUTE_ISIS: + *pb_proto = QPB__PROTOCOL__ISIS; + break; + + case ZEBRA_ROUTE_BGP: + *pb_proto = QPB__PROTOCOL__BGP; + break; + + case ZEBRA_ROUTE_HSLS: + case ZEBRA_ROUTE_OLSR: + case ZEBRA_ROUTE_MAX: + case ZEBRA_ROUTE_SYSTEM: + default: + *pb_proto = QPB__PROTOCOL__OTHER; + } + + return 1; +} + +/* + * qpb__ipv4_address__create + */ +static inline Qpb__Ipv4Address * +qpb__ipv4_address__create (qpb_allocator_t *allocator, + struct in_addr *addr) +{ + Qpb__Ipv4Address *v4; + + v4 = QPB_ALLOC(allocator, typeof(*v4)); + if (!v4) { + return NULL; + } + qpb__ipv4_address__init(v4); + + v4->value = ntohl(addr->s_addr); + return v4; +} + +/* + * qpb__ipv4_address__get + */ +static inline int +qpb__ipv4_address__get (const Qpb__Ipv4Address *v4, struct in_addr *addr) +{ + addr->s_addr = htonl(v4->value); + return 1; +} + +/* + * qpb__ipv6_address__create + */ +static inline Qpb__Ipv6Address * +qpb__ipv6_address__create (qpb_allocator_t *allocator, struct in6_addr *addr) +{ + Qpb__Ipv6Address *v6; + + v6 = QPB_ALLOC(allocator, typeof(*v6)); + if (!v6) + return NULL; + + qpb__ipv6_address__init(v6); + v6->bytes.len = 16; + v6->bytes.data = qpb_alloc(allocator, 16); + if (!v6->bytes.data) + return NULL; + + memcpy(v6->bytes.data, addr->s6_addr, v6->bytes.len); + return v6; +} + +/* + * qpb__ipv6_address__get + * + * Read out information from a protobuf ipv6 address structure. + */ +static inline int +qpb__ipv6_address__get (const Qpb__Ipv6Address *v6, struct in6_addr *addr) +{ + if (v6->bytes.len != 16) + return 0; + + memcpy(addr->s6_addr, v6->bytes.data, v6->bytes.len); + return 1; +} + +/* + * qpb__l3_address__create + */ +#define qpb_l3_address_create qpb__l3_address__create +static inline Qpb__L3Address * +qpb__l3_address__create (qpb_allocator_t *allocator, union g_addr *addr, + u_char family) +{ + Qpb__L3Address *l3_addr; + + l3_addr = QPB_ALLOC(allocator, typeof(*l3_addr)); + if (!l3_addr) + return NULL; + + qpb__l3_address__init(l3_addr); + + switch (family) { + + case AF_INET: + l3_addr->v4 = qpb__ipv4_address__create (allocator, &addr->ipv4); + if (!l3_addr->v4) + return NULL; + + break; + + case AF_INET6: + l3_addr->v6 = qpb__ipv6_address__create (allocator, &addr->ipv6); + if (!l3_addr->v6) + return NULL; + + break; + } + return l3_addr; +} + +/* + * qpb__l3_address__get + * + * Read out a gateway address from a protobuf l3 address. + */ +#define qpb_l3_address_get qpb__l3_address__get +static inline int +qpb__l3_address__get (const Qpb__L3Address *l3_addr, + u_char *family, union g_addr *addr) +{ + if (l3_addr->v4) + { + qpb__ipv4_address__get (l3_addr->v4, &addr->ipv4); + *family = AF_INET; + return 1; + } + + if (l3_addr->v6) + { + qpb__ipv6_address__get(l3_addr->v6, &addr->ipv6); + *family = AF_INET6; + return 1; + } + + return 0; +} + +/* + * qpb__if_identifier__create + */ +#define qpb_if_identifier_create qpb__if_identifier__create +static inline Qpb__IfIdentifier * +qpb__if_identifier__create (qpb_allocator_t *allocator, uint if_index) +{ + Qpb__IfIdentifier *if_id; + + if_id = QPB_ALLOC(allocator, typeof(*if_id)); + if (!if_id) { + return NULL; + } + qpb__if_identifier__init(if_id); + if_id->has_index = 1; + if_id->index = if_index; + return if_id; +} + +/* + * qpb__if_identifier__get + * + * Get interface name and/or if_index from an if identifier. + */ +#define qpb_if_identifier_get qpb__if_identifier__get +static inline int +qpb__if_identifier__get (Qpb__IfIdentifier *if_id, uint *if_index, + char **name) +{ + char *str; + uint ix; + + if (!if_index) + if_index = &ix; + + if (!name) + name = &str; + + if (if_id->has_index) + *if_index = if_id->index; + else + *if_index = 0; + + *name = if_id->name; + return 1; +} + +#endif diff --git a/qpb/qpb.proto b/qpb/qpb.proto new file mode 100644 index 0000000000..8323d3ed73 --- /dev/null +++ b/qpb/qpb.proto @@ -0,0 +1,90 @@ +/* + * qpb.proto + * + * @copyright Copyright (C) 2016 Sproute Networks, Inc. + * + * @author Avneesh Sachdev + * + * Permission to use, copy, modify, and/or distribute this software + * for any purpose with or without fee is hereby granted, provided + * that the above copyright notice and this permission notice appear + * in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS + * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, + * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * Protobuf definitions pertaining to the Quagga Protobuf component. + */ +package qpb; + +enum AddressFamily { + UNKNOWN_AF = 0; + IPV4 = 1; // IP version 4 + IPV6 = 2; // IP version 6 +}; + +enum SubAddressFamily { + UNKNOWN_SAF = 0; + UNICAST = 1; + MULTICAST = 2; +}; + +// +// An IP version 4 address, such as 10.1.1.1. +// +message Ipv4Address { + required fixed32 value = 1 ; +}; + +message Ipv6Address { + + // 16 bytes. + required bytes bytes = 1; +}; + +// +// An IP version 4 or IP version 6 address. +// +message L3Address { + optional Ipv4Address v4 = 1; + optional Ipv6Address v6 = 2; +}; + +// +// An IP prefix, such as 10.1/16. +// We use the message below to represent both IPv4 and IPv6 prefixes. +message L3Prefix { + required uint32 length = 1; + required bytes bytes = 2; +}; + +// +// Something that identifies an interface on a machine. It can either +// be a name (for instance, 'eth0') or a number currently. +// +message IfIdentifier { + optional uint32 index = 1; + optional string name = 2; +}; + +enum Protocol { + UNKNOWN_PROTO = 0; + LOCAL = 1; + CONNECTED = 2; + KERNEL = 3; + STATIC = 4; + RIP = 5; + RIPNG = 6; + OSPF = 7; + ISIS = 8; + BGP = 9; + OTHER = 10; +} \ No newline at end of file diff --git a/qpb/qpb_allocator.c b/qpb/qpb_allocator.c new file mode 100644 index 0000000000..4b4830a476 --- /dev/null +++ b/qpb/qpb_allocator.c @@ -0,0 +1,67 @@ +/* + * qpb_allocator.c + * + * @copyright Copyright (C) 2016 Sproute Networks, Inc. + * + * @author Avneesh Sachdev + * + * This file is part of Quagga. + * + * Quagga is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * Quagga is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Quagga; see the file COPYING. If not, write to the Free + * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ + +#include "linear_allocator.h" + +#include "qpb_allocator.h" + +/* + * _qpb_alloc + */ +static void * +_qpb_alloc (void *allocator_data, size_t size) +{ + return linear_allocator_alloc (allocator_data, size); +} + +/* + * _qpb_free + */ +static void +_qpb_free (void *allocator_data, void *ptr) +{ + linear_allocator_free (allocator_data, ptr); +} + +static ProtobufCAllocator allocator_template = { + _qpb_alloc, + _qpb_free, + NULL, + 8192, + NULL +}; + +/* + * qpb_allocator_init_linear + * + * Initialize qpb_allocator_t with the given linear allocator. + */ +void +qpb_allocator_init_linear (qpb_allocator_t *allocator, + linear_allocator_t *linear_allocator) +{ + *allocator = allocator_template; + allocator->allocator_data = linear_allocator; +} diff --git a/qpb/qpb_allocator.h b/qpb/qpb_allocator.h new file mode 100644 index 0000000000..83ddf56cbc --- /dev/null +++ b/qpb/qpb_allocator.h @@ -0,0 +1,113 @@ +/* + * qpb_allocator.h + * + * @copyright Copyright (C) 2016 Sproute Networks, Inc. + * + * @author Avneesh Sachdev + * + * This file is part of Quagga. + * + * Quagga is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * Quagga is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Quagga; see the file COPYING. If not, write to the Free + * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ + +/* + * Header file for quagga protobuf memory management code. + */ + +#ifndef _QPB_ALLOCATOR_H_ +#define _QPB_ALLOCATOR_H_ + +#include + +struct linear_allocator_t_; + +/* + * Alias for ProtobufCAllocator that is easier on the fingers. + */ +typedef ProtobufCAllocator qpb_allocator_t; + +/* + * qpb_alloc + */ +static inline void * +qpb_alloc (qpb_allocator_t *allocator, size_t size) +{ + return allocator->alloc (allocator->allocator_data, size); +} + +/* + * qpb_alloc_ptr_array + * + * Allocate space for the specified number of pointers. + */ +static inline void * +qpb_alloc_ptr_array (qpb_allocator_t *allocator, size_t num_ptrs) +{ + return qpb_alloc (allocator, num_ptrs * sizeof (void *)); +} + +/* + * qpb_free + */ +static inline void +qpb_free (qpb_allocator_t *allocator, void *ptr) +{ + allocator->free (allocator->allocator_data, ptr); +} + +/* + * QPB_ALLOC + * + * Convenience macro to reduce the probability of allocating memory of + * incorrect size. It returns enough memory to store the given type, + * and evaluates to an appropriately typed pointer. + */ +#define QPB_ALLOC(allocator, type) \ + (type *) qpb_alloc(allocator, sizeof(type)) + + +/* + * Externs. + */ +extern void qpb_allocator_init_linear (qpb_allocator_t *, + struct linear_allocator_t_ *); + +/* + * The following macros are for the common case where a qpb allocator + * is being used alongside a linear allocator that allocates memory + * off of the stack. + */ +#define QPB_DECLARE_STACK_ALLOCATOR(allocator, size) \ + qpb_allocator_t allocator; \ + linear_allocator_t lin_ ## allocator; \ + char lin_ ## allocator ## _buf[size] + +#define QPB_INIT_STACK_ALLOCATOR(allocator) \ + do \ + { \ + linear_allocator_init(&(lin_ ## allocator), \ + lin_ ## allocator ## _buf, \ + sizeof(lin_ ## allocator ## _buf)); \ + qpb_allocator_init_linear(&allocator, &(lin_ ## allocator)); \ + } while (0) + +#define QPB_RESET_STACK_ALLOCATOR(allocator) \ + do \ + { \ + linear_allocator_reset (&(lin_ ## allocator)); \ + } while (0) + +#endif /* _QPB_ALLOCATOR_H_ */ diff --git a/redhat/Makefile.am b/redhat/Makefile.am index 20bca27cb2..96870689dc 100644 --- a/redhat/Makefile.am +++ b/redhat/Makefile.am @@ -1,6 +1,6 @@ EXTRA_DIST = bgpd.init isisd.init \ - ospf6d.init ospfd.init \ + ospf6d.init ospfd.init ldpd.init \ quagga.logrotate quagga.pam quagga.spec \ quagga.sysconfig ripd.init ripngd.init \ watchquagga.init pimd.init zebra.init \ diff --git a/redhat/README.rpm_build.md b/redhat/README.rpm_build.md index 3e8fa05306..05a0bdc591 100644 --- a/redhat/README.rpm_build.md +++ b/redhat/README.rpm_build.md @@ -52,6 +52,8 @@ Building your own Quagga RPM %{!?with_rtadv: %global with_rtadv 1 } %{!?with_isisd: %global with_isisd 1 } %{!?with_pimd: %global with_pimd 1 } + %{!?with_mpls: %global with_mpls 0 } + %{!?with_ldpd: %global with_ldpd 0 } %{!?with_shared: %global with_shared 1 } %{!?with_multipath: %global with_multipath 64 } %{!?quagga_user: %global quagga_user quagga } diff --git a/redhat/ldpd.init b/redhat/ldpd.init new file mode 100644 index 0000000000..b9b9538cb8 --- /dev/null +++ b/redhat/ldpd.init @@ -0,0 +1,72 @@ +#!/bin/bash +# chkconfig: - 16 84 +# config: /etc/quagga/ldpd.conf + +### BEGIN INIT INFO +# Provides: ldpd +# Short-Description: LDP engine +# Description: LDP engine for use with Zebra +### END INIT INFO + +# source function library +. /etc/rc.d/init.d/functions + +# Get network config +. /etc/sysconfig/network + +# quagga command line options +. /etc/sysconfig/quagga + +RETVAL=0 +PROG="ldpd" +cmd=ldpd +LOCK_FILE=/var/lock/subsys/ldpd +CONF_FILE=/etc/quagga/ldpd.conf + +case "$1" in + start) + # Check that networking is up. + [ "${NETWORKING}" = "no" ] && exit 1 + + # The process must be configured first. + [ -f $CONF_FILE ] || exit 6 + if [ `id -u` -ne 0 ]; then + echo $"Insufficient privilege" 1>&2 + exit 4 + fi + + echo -n $"Starting $PROG: " + daemon $cmd -d $LDPD_OPTS -f $CONF_FILE + RETVAL=$? + [ $RETVAL -eq 0 ] && touch $LOCK_FILE + echo + ;; + stop) + echo -n $"Shutting down $PROG: " + killproc $cmd + RETVAL=$? + [ $RETVAL -eq 0 ] && rm -f $LOCK_FILE + echo + ;; + restart|reload|force-reload) + $0 stop + $0 start + RETVAL=$? + ;; + condrestart|try-restart) + if [ -f $LOCK_FILE ]; then + $0 stop + $0 start + fi + RETVAL=$? + ;; + status) + status $cmd + RETVAL=$? + ;; + *) + echo $"Usage: $0 {start|stop|restart|reload|force-reload|condrestart|try-restart|status}" + exit 2 +esac + +exit $RETVAL diff --git a/redhat/quagga.logrotate b/redhat/quagga.logrotate index afbd40c02f..1f1baead0b 100644 --- a/redhat/quagga.logrotate +++ b/redhat/quagga.logrotate @@ -53,3 +53,11 @@ /bin/kill -USR1 `cat /var/run/quagga/ripngd.pid 2> /dev/null` 2> /dev/null || true endscript } + +/var/log/quagga/ldpd.log { + notifempty + missingok + postrotate + /bin/kill -USR1 `cat /var/run/quagga/ldpd.pid 2> /dev/null` 2> /dev/null || true + endscript +} diff --git a/redhat/quagga.spec.in b/redhat/quagga.spec.in index 9ff65e9a22..4c35a8bf68 100644 --- a/redhat/quagga.spec.in +++ b/redhat/quagga.spec.in @@ -16,12 +16,15 @@ %{!?with_ospfapi: %global with_ospfapi 1 } %{!?with_irdp: %global with_irdp 1 } %{!?with_rtadv: %global with_rtadv 1 } +%{!?with_mpls: %global with_mpls 0 } +%{!?with_ldpd: %global with_ldpd 0 } %{!?with_shared: %global with_shared 1 } %{!?with_multipath: %global with_multipath 256 } %{!?quagga_user: %global quagga_user quagga } %{!?vty_group: %global vty_group quaggavty } %{!?with_fpm: %global with_fpm 0 } %{!?with_watchquagga: %global with_watchquagga 1 } +%{!?with_bgp_vnc: %global with_bgp_vnc 0 } # path defines %define _sysconfdir /etc/quagga @@ -71,13 +74,19 @@ %define daemon_list zebra ripd ospfd bgpd isisd pimd ripngd ospfd6d +%if %{with_ldpd} +%define daemon_ldpd ldpd +%else +%define daemon_ldpd "" +%endif + %if %{with_watchquagga} %define daemon_watchquagga watchquagga %else %define daemon_watchquagga "" %endif -%define all_daemons %{daemon_list} %{daemon_watchquagga} +%define all_daemons %{daemon_list} %{daemon_ldpd} %{daemon_watchquagga} # allow build dir to be kept %{!?keep_build: %global keep_build 0 } @@ -198,6 +207,16 @@ developing OSPF-API and quagga applications. %else --enable-rtadv=no \ %endif +%if %{with_mpls} + --enable-mpls=yes \ +%else + --disable-mpls \ +%endif +%if %{with_ldpd} + --enable-ldpd \ +%else + --disable-ldpd \ +%endif %if %{with_pam} --with-libpam \ %endif @@ -217,6 +236,11 @@ developing OSPF-API and quagga applications. --enable-watchquagga \ %else --disable-watchquagga \ +%endif +%if %{with_bgp_vnc} + --enable-bgp-vnc \ +%else + --disable-bgp-vnc \ %endif --enable-gcc-rdynamic \ --enable-isisd=yes \ @@ -315,6 +339,9 @@ zebra_spec_add_service ospfapi 2607/tcp "OSPF-API" %endif zebra_spec_add_service isisd 2608/tcp "ISISd vty" zebra_spec_add_service pimd 2611/tcp "PIMd vty" +%if %{with_ldpd} +zebra_spec_add_service ldpd 2612/tcp "LDPd vty" +%endif %if "%{initsystem}" == "systemd" for daemon in %all_daemons ; do @@ -493,6 +520,9 @@ rm -rf %{buildroot} %{_sbindir}/ospf6d %{_sbindir}/pimd %{_sbindir}/isisd +%if %{with_ldpd} +%{_sbindir}/ldpd +%endif %if %{with_shared} %attr(755,root,root) %{_libdir}/lib*.so %attr(755,root,root) %{_libdir}/lib*.so.* @@ -513,6 +543,9 @@ rm -rf %{buildroot} %config /etc/rc.d/init.d/ospf6d %config /etc/rc.d/init.d/isisd %config /etc/rc.d/init.d/pimd + %if %{with_ldpd} + %config /etc/rc.d/init.d/ldpd + %endif %endif %config(noreplace) /etc/default/quagga %config(noreplace) /etc/pam.d/quagga diff --git a/redhat/quagga.sysconfig b/redhat/quagga.sysconfig index 2092cba2fa..0cc6acfbae 100644 --- a/redhat/quagga.sysconfig +++ b/redhat/quagga.sysconfig @@ -9,6 +9,7 @@ RIPD_OPTS="-A 127.0.0.1" RIPNGD_OPTS="-A ::1" ZEBRA_OPTS="-A 127.0.0.1" PIMD_OPTS="-A 127.0.0.1" +LDPD_OPTS="-A 127.0.0.1" # Watchquagga configuration for LSB initscripts # diff --git a/ripd/rip_interface.c b/ripd/rip_interface.c index 6df219628c..8c5092d787 100644 --- a/ripd/rip_interface.c +++ b/ripd/rip_interface.c @@ -112,6 +112,8 @@ ipv4_multicast_leave (int sock, return ret; } +static void rip_interface_reset (struct rip_interface *); + /* Allocate new RIP's interface configuration. */ static struct rip_interface * rip_interface_new (void) @@ -120,17 +122,7 @@ rip_interface_new (void) ri = XCALLOC (MTYPE_RIP_INTERFACE, sizeof (struct rip_interface)); - /* Default authentication type is simple password for Cisco - compatibility. */ - ri->auth_type = RIP_NO_AUTH; - ri->md5_auth_len = RIP_AUTH_MD5_COMPAT_SIZE; - - /* Set default split-horizon behavior. If the interface is Frame - Relay or SMDS is enabled, the default value for split-horizon is - off. But currently Zebra does detect Frame Relay or SMDS - interface. So all interface is set to split horizon. */ - ri->split_horizon_default = RIP_SPLIT_HORIZON; - ri->split_horizon = ri->split_horizon_default; + rip_interface_reset (ri); return ri; } @@ -503,17 +495,9 @@ rip_interface_delete (int command, struct zclient *zclient, return 0; } -void -rip_interface_clean (void) -{ - struct listnode *node; - struct interface *ifp; - struct rip_interface *ri; - - for (ALL_LIST_ELEMENTS_RO (vrf_iflist (VRF_DEFAULT), node, ifp)) +static void +rip_interface_clean (struct rip_interface *ri) { - ri = ifp->info; - ri->enable_network = 0; ri->enable_interface = 0; ri->running = 0; @@ -524,28 +508,35 @@ rip_interface_clean (void) ri->t_wakeup = NULL; } } -} void -rip_interface_reset (void) +rip_interfaces_clean (void) { struct listnode *node; struct interface *ifp; - struct rip_interface *ri; for (ALL_LIST_ELEMENTS_RO (vrf_iflist (VRF_DEFAULT), node, ifp)) - { - ri = ifp->info; + rip_interface_clean (ifp->info); +} - ri->enable_network = 0; - ri->enable_interface = 0; - ri->running = 0; +static void +rip_interface_reset (struct rip_interface *ri) + { + /* Default authentication type is simple password for Cisco + compatibility. */ + ri->auth_type = RIP_NO_AUTH; + ri->md5_auth_len = RIP_AUTH_MD5_COMPAT_SIZE; + + /* Set default split-horizon behavior. If the interface is Frame + Relay or SMDS is enabled, the default value for split-horizon is + off. But currently Zebra does detect Frame Relay or SMDS + interface. So all interface is set to split horizon. */ + ri->split_horizon_default = RIP_SPLIT_HORIZON; + ri->split_horizon = ri->split_horizon_default; ri->ri_send = RI_RIP_UNSPEC; ri->ri_receive = RI_RIP_UNSPEC; - ri->auth_type = RIP_NO_AUTH; - if (ri->auth_str) { free (ri->auth_str); @@ -557,27 +548,29 @@ rip_interface_reset (void) ri->key_chain = NULL; } - ri->split_horizon = RIP_NO_SPLIT_HORIZON; - ri->split_horizon_default = RIP_NO_SPLIT_HORIZON; - ri->list[RIP_FILTER_IN] = NULL; ri->list[RIP_FILTER_OUT] = NULL; ri->prefix[RIP_FILTER_IN] = NULL; ri->prefix[RIP_FILTER_OUT] = NULL; - if (ri->t_wakeup) - { - thread_cancel (ri->t_wakeup); - ri->t_wakeup = NULL; - } - ri->recv_badpackets = 0; ri->recv_badroutes = 0; ri->sent_updates = 0; ri->passive = 0; + + rip_interface_clean (ri); } + +void +rip_interfaces_reset (void) +{ + struct listnode *node; + struct interface *ifp; + + for (ALL_LIST_ELEMENTS_RO (vrf_iflist (VRF_DEFAULT), node, ifp)) + rip_interface_reset (ifp->info); } int @@ -647,7 +640,7 @@ rip_apply_address_add (struct connected *ifc) if ((rip_enable_if_lookup(ifc->ifp->name) >= 0) || (rip_enable_network_lookup2(ifc) >= 0)) rip_redistribute_add(ZEBRA_ROUTE_CONNECT, RIP_ROUTE_INTERFACE, - &address, ifc->ifp->ifindex, NULL, 0, 0); + &address, ifc->ifp->ifindex, NULL, 0, 0, 0); } @@ -958,7 +951,7 @@ rip_connect_set (struct interface *ifp, int set) (rip_enable_network_lookup2(connected) >= 0)) rip_redistribute_add (ZEBRA_ROUTE_CONNECT, RIP_ROUTE_INTERFACE, &address, connected->ifp->ifindex, - NULL, 0, 0); + NULL, 0, 0, 0); } else { rip_redistribute_delete (ZEBRA_ROUTE_CONNECT, RIP_ROUTE_INTERFACE, @@ -966,7 +959,7 @@ rip_connect_set (struct interface *ifp, int set) if (rip_redistribute_check (ZEBRA_ROUTE_CONNECT)) rip_redistribute_add (ZEBRA_ROUTE_CONNECT, RIP_ROUTE_REDISTRIBUTE, &address, connected->ifp->ifindex, - NULL, 0, 0); + NULL, 0, 0, 0); } } } diff --git a/ripd/rip_routemap.c b/ripd/rip_routemap.c index 3cdfd8adfc..60ce66265e 100644 --- a/ripd/rip_routemap.c +++ b/ripd/rip_routemap.c @@ -366,8 +366,9 @@ static route_map_result_t route_match_tag (void *rule, struct prefix *prefix, route_map_object_t type, void *object) { - u_short *tag; + route_tag_t *tag; struct rip_info *rinfo; + route_tag_t rinfo_tag; if (type == RMAP_RIP) { @@ -375,7 +376,8 @@ route_match_tag (void *rule, struct prefix *prefix, rinfo = object; /* The information stored by rinfo is host ordered. */ - if (rinfo->tag == *tag) + rinfo_tag = rinfo->tag; + if (rinfo_tag == *tag) return RMAP_MATCH; else return RMAP_NOMATCH; @@ -383,45 +385,13 @@ route_match_tag (void *rule, struct prefix *prefix, return RMAP_NOMATCH; } -/* Route map `match tag' match statement. `arg' is TAG value */ -static void * -route_match_tag_compile (const char *arg) -{ - u_short *tag; - u_short tmp; - - /* tag value shoud be integer. */ - if (! all_digit (arg)) - return NULL; - - tmp = atoi(arg); - if (tmp < 1) - return NULL; - - tag = XMALLOC (MTYPE_ROUTE_MAP_COMPILED, sizeof (u_short)); - - if (!tag) - return tag; - - *tag = tmp; - - return tag; -} - -/* Free route map's compiled `match tag' value. */ -static void -route_match_tag_free (void *rule) -{ - XFREE (MTYPE_ROUTE_MAP_COMPILED, rule); -} - /* Route map commands for tag matching. */ -struct route_map_rule_cmd route_match_tag_cmd = +static struct route_map_rule_cmd route_match_tag_cmd = { "tag", route_match_tag, - route_match_tag_compile, - route_match_tag_free + route_map_rule_tag_compile, + route_map_rule_tag_free, }; /* `set metric METRIC' */ @@ -590,7 +560,7 @@ static route_map_result_t route_set_tag (void *rule, struct prefix *prefix, route_map_object_t type, void *object) { - u_short *tag; + route_tag_t *tag; struct rip_info *rinfo; if(type == RMAP_RIP) @@ -606,33 +576,13 @@ route_set_tag (void *rule, struct prefix *prefix, return RMAP_OKAY; } -/* Route map `tag' compile function. Given string is converted - to u_short. */ -static void * -route_set_tag_compile (const char *arg) -{ - u_short *tag; - - tag = XMALLOC (MTYPE_ROUTE_MAP_COMPILED, sizeof (u_short)); - *tag = atoi (arg); - - return tag; -} - -/* Free route map's compiled `ip nexthop' value. */ -static void -route_set_tag_free (void *rule) -{ - XFREE (MTYPE_ROUTE_MAP_COMPILED, rule); -} - /* Route map commands for tag set. */ static struct route_map_rule_cmd route_set_tag_cmd = { "tag", route_set_tag, - route_set_tag_compile, - route_set_tag_free + route_map_rule_tag_compile, + route_map_rule_tag_free }; #define MATCH_STR "Match values from routing table\n" diff --git a/ripd/rip_zebra.c b/ripd/rip_zebra.c index 756a77cff9..5aea4f5222 100644 --- a/ripd/rip_zebra.c +++ b/ripd/rip_zebra.c @@ -91,6 +91,12 @@ rip_zebra_ipv4_send (struct route_node *rp, u_char cmd) api.distance = rinfo->distance; } + if (rinfo->tag) + { + SET_FLAG (api.message, ZAPI_MESSAGE_TAG); + api.tag = rinfo->tag; + } + zapi_ipv4_route (cmd, zclient, (struct prefix_ipv4 *)&rp->p, &api); @@ -147,13 +153,13 @@ rip_zebra_read_ipv4 (int command, struct zclient *zclient, zebra_size_t length, /* Type, flags, message. */ api.type = stream_getc (s); api.instance = stream_getw (s); - api.flags = stream_getc (s); + api.flags = stream_getl (s); api.message = stream_getc (s); /* IPv4 prefix. */ memset (&p, 0, sizeof (struct prefix_ipv4)); p.family = AF_INET; - p.prefixlen = stream_getc (s); + p.prefixlen = MIN(IPV4_MAX_PREFIXLEN, stream_getc (s)); stream_get (&p.prefix, s, PSIZE (p.prefixlen)); /* Nexthop, ifindex, distance, metric. */ @@ -176,10 +182,15 @@ rip_zebra_read_ipv4 (int command, struct zclient *zclient, zebra_size_t length, else api.metric = 0; + if (CHECK_FLAG (api.message, ZAPI_MESSAGE_TAG)) + api.tag = stream_getl (s); + else + api.tag = 0; + /* Then fetch IPv4 prefixes. */ if (command == ZEBRA_REDISTRIBUTE_IPV4_ADD) rip_redistribute_add (api.type, RIP_ROUTE_REDISTRIBUTE, &p, ifindex, - &nexthop, api.metric, api.distance); + &nexthop, api.metric, api.distance, api.tag); else if (command == ZEBRA_REDISTRIBUTE_IPV4_DEL) rip_redistribute_delete (api.type, RIP_ROUTE_REDISTRIBUTE, &p, ifindex); @@ -248,6 +259,7 @@ static struct { {ZEBRA_ROUTE_STATIC, 1, "static"}, {ZEBRA_ROUTE_OSPF, 1, "ospf"}, {ZEBRA_ROUTE_BGP, 2, "bgp"}, + {ZEBRA_ROUTE_VNC, 1, "vnc"}, {0, 0, NULL} }; @@ -586,7 +598,7 @@ DEFUN (rip_default_information_originate, rip->default_information = 1; rip_redistribute_add (ZEBRA_ROUTE_RIP, RIP_ROUTE_DEFAULT, &p, 0, - NULL, 0, 0); + NULL, 0, 0, 0); } return CMD_SUCCESS; @@ -696,8 +708,6 @@ rip_zclient_init (struct thread_master *master) zclient->interface_delete = rip_interface_delete; zclient->interface_address_add = rip_interface_address_add; zclient->interface_address_delete = rip_interface_address_delete; - zclient->ipv4_route_add = rip_zebra_read_ipv4; - zclient->ipv4_route_delete = rip_zebra_read_ipv4; zclient->interface_up = rip_interface_up; zclient->interface_down = rip_interface_down; zclient->redistribute_route_ipv4_add = rip_zebra_read_ipv4; diff --git a/ripd/ripd.c b/ripd/ripd.c index c5d928ba6d..7b04368496 100644 --- a/ripd/ripd.c +++ b/ripd/ripd.c @@ -316,102 +316,35 @@ rip_timeout_update (struct rip_info *rinfo) } static int -rip_incoming_filter (struct prefix_ipv4 *p, struct rip_interface *ri) +rip_filter (int rip_distribute, struct prefix_ipv4 *p, struct rip_interface *ri) { struct distribute *dist; struct access_list *alist; struct prefix_list *plist; + int distribute = rip_distribute == RIP_FILTER_OUT ? + DISTRIBUTE_V4_OUT : DISTRIBUTE_V4_IN; + const char *inout = rip_distribute == RIP_FILTER_OUT ? "out" : "in"; /* Input distribute-list filtering. */ - if (ri->list[RIP_FILTER_IN]) + if (ri->list[rip_distribute]) { - if (access_list_apply (ri->list[RIP_FILTER_IN], + if (access_list_apply (ri->list[rip_distribute], (struct prefix *) p) == FILTER_DENY) { if (IS_RIP_DEBUG_PACKET) - zlog_debug ("%s/%d filtered by distribute in", - inet_ntoa (p->prefix), p->prefixlen); - return -1; - } - } - if (ri->prefix[RIP_FILTER_IN]) - { - if (prefix_list_apply (ri->prefix[RIP_FILTER_IN], - (struct prefix *) p) == PREFIX_DENY) - { - if (IS_RIP_DEBUG_PACKET) - zlog_debug ("%s/%d filtered by prefix-list in", - inet_ntoa (p->prefix), p->prefixlen); - return -1; - } - } - - /* All interface filter check. */ - dist = distribute_lookup (NULL); - if (dist) - { - if (dist->list[DISTRIBUTE_IN]) - { - alist = access_list_lookup (AFI_IP, dist->list[DISTRIBUTE_IN]); - - if (alist) - { - if (access_list_apply (alist, - (struct prefix *) p) == FILTER_DENY) - { - if (IS_RIP_DEBUG_PACKET) - zlog_debug ("%s/%d filtered by distribute in", - inet_ntoa (p->prefix), p->prefixlen); + zlog_debug ("%s/%d filtered by distribute %s", + inet_ntoa (p->prefix), p->prefixlen, inout); return -1; } } - } - if (dist->prefix[DISTRIBUTE_IN]) - { - plist = prefix_list_lookup (AFI_IP, dist->prefix[DISTRIBUTE_IN]); - - if (plist) - { - if (prefix_list_apply (plist, - (struct prefix *) p) == PREFIX_DENY) - { - if (IS_RIP_DEBUG_PACKET) - zlog_debug ("%s/%d filtered by prefix-list in", - inet_ntoa (p->prefix), p->prefixlen); - return -1; - } - } - } - } - return 0; -} - -static int -rip_outgoing_filter (struct prefix_ipv4 *p, struct rip_interface *ri) + if (ri->prefix[rip_distribute]) { - struct distribute *dist; - struct access_list *alist; - struct prefix_list *plist; - - if (ri->list[RIP_FILTER_OUT]) - { - if (access_list_apply (ri->list[RIP_FILTER_OUT], - (struct prefix *) p) == FILTER_DENY) - { - if (IS_RIP_DEBUG_PACKET) - zlog_debug ("%s/%d is filtered by distribute out", - inet_ntoa (p->prefix), p->prefixlen); - return -1; - } - } - if (ri->prefix[RIP_FILTER_OUT]) - { - if (prefix_list_apply (ri->prefix[RIP_FILTER_OUT], + if (prefix_list_apply (ri->prefix[rip_distribute], (struct prefix *) p) == PREFIX_DENY) { if (IS_RIP_DEBUG_PACKET) - zlog_debug ("%s/%d is filtered by prefix-list out", - inet_ntoa (p->prefix), p->prefixlen); + zlog_debug ("%s/%d filtered by prefix-list %s", + inet_ntoa (p->prefix), p->prefixlen, inout); return -1; } } @@ -420,25 +353,24 @@ rip_outgoing_filter (struct prefix_ipv4 *p, struct rip_interface *ri) dist = distribute_lookup (NULL); if (dist) { - if (dist->list[DISTRIBUTE_OUT]) + if (dist->list[distribute]) { - alist = access_list_lookup (AFI_IP, dist->list[DISTRIBUTE_OUT]); + alist = access_list_lookup (AFI_IP, dist->list[distribute]); if (alist) { - if (access_list_apply (alist, - (struct prefix *) p) == FILTER_DENY) + if (access_list_apply (alist, (struct prefix *) p) == FILTER_DENY) { if (IS_RIP_DEBUG_PACKET) - zlog_debug ("%s/%d filtered by distribute out", - inet_ntoa (p->prefix), p->prefixlen); + zlog_debug ("%s/%d filtered by distribute %s", + inet_ntoa (p->prefix), p->prefixlen, inout); return -1; } } } - if (dist->prefix[DISTRIBUTE_OUT]) + if (dist->prefix[distribute]) { - plist = prefix_list_lookup (AFI_IP, dist->prefix[DISTRIBUTE_OUT]); + plist = prefix_list_lookup (AFI_IP, dist->prefix[distribute]); if (plist) { @@ -446,8 +378,8 @@ rip_outgoing_filter (struct prefix_ipv4 *p, struct rip_interface *ri) (struct prefix *) p) == PREFIX_DENY) { if (IS_RIP_DEBUG_PACKET) - zlog_debug ("%s/%d filtered by prefix-list out", - inet_ntoa (p->prefix), p->prefixlen); + zlog_debug ("%s/%d filtered by prefix-list %s", + inet_ntoa (p->prefix), p->prefixlen, inout); return -1; } } @@ -511,7 +443,7 @@ rip_rte_process (struct rte *rte, struct sockaddr_in *from, /* Apply input filters. */ ri = ifp->info; - ret = rip_incoming_filter (&p, ri); + ret = rip_filter (RIP_FILTER_IN, &p, ri); if (ret < 0) return; @@ -828,17 +760,18 @@ rip_packet_dump (struct rip_packet *packet, int size, const char *sndrcv) } } else - zlog_debug (" %s/%d -> %s family %d tag %d metric %ld", + zlog_debug (" %s/%d -> %s family %d tag %"ROUTE_TAG_PRI" metric %ld", inet_ntop (AF_INET, &rte->prefix, pbuf, BUFSIZ), netmask, inet_ntop (AF_INET, &rte->nexthop, nbuf, BUFSIZ), ntohs (rte->family), - ntohs (rte->tag), (u_long) ntohl (rte->metric)); + (route_tag_t)ntohs (rte->tag), + (u_long) ntohl (rte->metric)); } else { - zlog_debug (" %s family %d tag %d metric %ld", + zlog_debug (" %s family %d tag %"ROUTE_TAG_PRI" metric %ld", inet_ntop (AF_INET, &rte->prefix, pbuf, BUFSIZ), - ntohs (rte->family), ntohs (rte->tag), + ntohs (rte->family), (route_tag_t)ntohs (rte->tag), (u_long)ntohl (rte->metric)); } } @@ -1580,7 +1513,8 @@ rip_send_packet (u_char * buf, int size, struct sockaddr_in *to, void rip_redistribute_add (int type, int sub_type, struct prefix_ipv4 *p, ifindex_t ifindex, struct in_addr *nexthop, - unsigned int metric, unsigned char distance) + unsigned int metric, unsigned char distance, + route_tag_t tag) { int ret; struct route_node *rp = NULL; @@ -1601,6 +1535,8 @@ rip_redistribute_add (int type, int sub_type, struct prefix_ipv4 *p, newinfo.metric = 1; newinfo.external_metric = metric; newinfo.distance = distance; + if (tag <= UINT16_MAX) /* RIP only supports 16 bit tags */ + newinfo.tag = tag; newinfo.rp = rp; if (nexthop) newinfo.nexthop = *nexthop; @@ -2301,7 +2237,7 @@ rip_output_process (struct connected *ifc, struct sockaddr_in *to, p = (struct prefix_ipv4 *) &rp->p; /* Apply output filters. */ - ret = rip_outgoing_filter (p, ri); + ret = rip_filter (RIP_FILTER_OUT, p, ri); if (ret < 0) continue; @@ -3009,7 +2945,7 @@ DEFUN (rip_route, node->info = (void *)1; - rip_redistribute_add (ZEBRA_ROUTE_RIP, RIP_ROUTE_STATIC, &p, 0, NULL, 0, 0); + rip_redistribute_add (ZEBRA_ROUTE_RIP, RIP_ROUTE_STATIC, &p, 0, NULL, 0, 0, 0); return CMD_SUCCESS; } @@ -3620,13 +3556,13 @@ DEFUN (show_ip_rip, (rinfo->sub_type == RIP_ROUTE_RTE)) { vty_out (vty, "%-15s ", inet_ntoa (rinfo->from)); - vty_out (vty, "%3d ", rinfo->tag); + vty_out (vty, "%3"ROUTE_TAG_PRI" ", (route_tag_t)rinfo->tag); rip_vty_out_uptime (vty, rinfo); } else if (rinfo->metric == RIP_METRIC_INFINITY) { vty_out (vty, "self "); - vty_out (vty, "%3d ", rinfo->tag); + vty_out (vty, "%3"ROUTE_TAG_PRI" ", (route_tag_t)rinfo->tag); rip_vty_out_uptime (vty, rinfo); } else @@ -3642,7 +3578,7 @@ DEFUN (show_ip_rip, } else vty_out (vty, "self "); - vty_out (vty, "%3d", rinfo->tag); + vty_out (vty, "%3"ROUTE_TAG_PRI, (route_tag_t)rinfo->tag); } vty_out (vty, "%s", VTY_NEWLINE); @@ -3873,9 +3809,9 @@ rip_distribute_update (struct distribute *dist) ri = ifp->info; - if (dist->list[DISTRIBUTE_IN]) + if (dist->list[DISTRIBUTE_V4_IN]) { - alist = access_list_lookup (AFI_IP, dist->list[DISTRIBUTE_IN]); + alist = access_list_lookup (AFI_IP, dist->list[DISTRIBUTE_V4_IN]); if (alist) ri->list[RIP_FILTER_IN] = alist; else @@ -3884,9 +3820,9 @@ rip_distribute_update (struct distribute *dist) else ri->list[RIP_FILTER_IN] = NULL; - if (dist->list[DISTRIBUTE_OUT]) + if (dist->list[DISTRIBUTE_V4_OUT]) { - alist = access_list_lookup (AFI_IP, dist->list[DISTRIBUTE_OUT]); + alist = access_list_lookup (AFI_IP, dist->list[DISTRIBUTE_V4_OUT]); if (alist) ri->list[RIP_FILTER_OUT] = alist; else @@ -3895,9 +3831,9 @@ rip_distribute_update (struct distribute *dist) else ri->list[RIP_FILTER_OUT] = NULL; - if (dist->prefix[DISTRIBUTE_IN]) + if (dist->prefix[DISTRIBUTE_V4_IN]) { - plist = prefix_list_lookup (AFI_IP, dist->prefix[DISTRIBUTE_IN]); + plist = prefix_list_lookup (AFI_IP, dist->prefix[DISTRIBUTE_V4_IN]); if (plist) ri->prefix[RIP_FILTER_IN] = plist; else @@ -3906,9 +3842,9 @@ rip_distribute_update (struct distribute *dist) else ri->prefix[RIP_FILTER_IN] = NULL; - if (dist->prefix[DISTRIBUTE_OUT]) + if (dist->prefix[DISTRIBUTE_V4_OUT]) { - plist = prefix_list_lookup (AFI_IP, dist->prefix[DISTRIBUTE_OUT]); + plist = prefix_list_lookup (AFI_IP, dist->prefix[DISTRIBUTE_V4_OUT]); if (plist) ri->prefix[RIP_FILTER_OUT] = plist; else @@ -4031,7 +3967,7 @@ rip_clean (void) rip_clean_network (); rip_passive_nondefault_clean (); rip_offset_clean (); - rip_interface_clean (); + rip_interfaces_clean (); rip_distance_reset (); rip_redistribute_clean (); } @@ -4055,7 +3991,7 @@ rip_reset (void) distribute_list_reset (); - rip_interface_reset (); + rip_interfaces_reset (); rip_distance_reset (); rip_zclient_reset (); @@ -4149,8 +4085,6 @@ rip_init (void) /* Install rip commands. */ install_element (VIEW_NODE, &show_ip_rip_cmd); install_element (VIEW_NODE, &show_ip_rip_status_cmd); - install_element (ENABLE_NODE, &show_ip_rip_cmd); - install_element (ENABLE_NODE, &show_ip_rip_status_cmd); install_element (CONFIG_NODE, &router_rip_cmd); install_element (CONFIG_NODE, &no_router_rip_cmd); diff --git a/ripd/ripd.h b/ripd/ripd.h index e6b18e3f04..1cc46ff310 100644 --- a/ripd/ripd.h +++ b/ripd/ripd.h @@ -225,7 +225,7 @@ struct rip_info struct in_addr nexthop_out; u_char metric_set; u_int32_t metric_out; - u_short tag_out; + u_int16_t tag_out; ifindex_t ifindex_out; struct route_node *rp; @@ -383,8 +383,8 @@ extern void rip_init (void); extern void rip_reset (void); extern void rip_clean (void); extern void rip_clean_network (void); -extern void rip_interface_clean (void); -extern void rip_interface_reset (void); +extern void rip_interfaces_clean (void); +extern void rip_interfaces_reset (void); extern void rip_passive_nondefault_clean (void); extern void rip_if_init (void); extern void rip_if_down_all (void); @@ -402,7 +402,8 @@ extern int rip_neighbor_lookup (struct sockaddr_in *); extern int rip_redistribute_check (int); extern void rip_redistribute_add (int, int, struct prefix_ipv4 *, ifindex_t, - struct in_addr *, unsigned int, unsigned char); + struct in_addr *, unsigned int, unsigned char, + route_tag_t); extern void rip_redistribute_delete (int, int, struct prefix_ipv4 *, ifindex_t); extern void rip_redistribute_withdraw (int); extern void rip_zebra_ipv4_add (struct route_node *); diff --git a/ripngd/ripng_debug.c b/ripngd/ripng_debug.c index de5367261d..16c8b3400d 100644 --- a/ripngd/ripng_debug.c +++ b/ripngd/ripng_debug.c @@ -247,7 +247,6 @@ ripng_debug_init () install_element (VIEW_NODE, &show_debugging_ripng_cmd); - install_element (ENABLE_NODE, &show_debugging_ripng_cmd); install_element (ENABLE_NODE, &debug_ripng_events_cmd); install_element (ENABLE_NODE, &debug_ripng_packet_cmd); install_element (ENABLE_NODE, &debug_ripng_packet_direct_cmd); diff --git a/ripngd/ripng_interface.c b/ripngd/ripng_interface.c index 4081968122..7bee7625d1 100644 --- a/ripngd/ripng_interface.c +++ b/ripngd/ripng_interface.c @@ -383,7 +383,7 @@ ripng_apply_address_add (struct connected *ifc) { if ((ripng_enable_if_lookup(ifc->ifp->name) >= 0) || (ripng_enable_network_lookup2(ifc) >= 0)) ripng_redistribute_add(ZEBRA_ROUTE_CONNECT, RIPNG_ROUTE_INTERFACE, - &address, ifc->ifp->ifindex, NULL); + &address, ifc->ifp->ifindex, NULL, 0); } @@ -704,13 +704,13 @@ ripng_connect_set (struct interface *ifp, int set) if ((ripng_enable_if_lookup(connected->ifp->name) >= 0) || (ripng_enable_network_lookup2(connected) >= 0)) ripng_redistribute_add (ZEBRA_ROUTE_CONNECT, RIPNG_ROUTE_INTERFACE, - &address, connected->ifp->ifindex, NULL); + &address, connected->ifp->ifindex, NULL, 0); } else { ripng_redistribute_delete (ZEBRA_ROUTE_CONNECT, RIPNG_ROUTE_INTERFACE, &address, connected->ifp->ifindex); if (ripng_redistribute_check (ZEBRA_ROUTE_CONNECT)) ripng_redistribute_add (ZEBRA_ROUTE_CONNECT, RIPNG_ROUTE_REDISTRIBUTE, - &address, connected->ifp->ifindex, NULL); + &address, connected->ifp->ifindex, NULL, 0); } } } diff --git a/ripngd/ripng_route.h b/ripngd/ripng_route.h index fe65c88363..9ff90aa8d0 100644 --- a/ripngd/ripng_route.h +++ b/ripngd/ripng_route.h @@ -35,13 +35,13 @@ struct ripng_aggregate u_char metric; /* Tag field of RIPng packet.*/ - u_short tag; + u_int16_t tag; /* Route-map futures - this variables can be changed. */ struct in6_addr nexthop_out; u_char metric_set; u_char metric_out; - u_short tag_out; + u_int16_t tag_out; }; extern void ripng_aggregate_increment (struct route_node *rp, diff --git a/ripngd/ripng_routemap.c b/ripngd/ripng_routemap.c index 9e032e97f2..61900408ff 100644 --- a/ripngd/ripng_routemap.c +++ b/ripngd/ripng_routemap.c @@ -152,8 +152,9 @@ static route_map_result_t route_match_tag (void *rule, struct prefix *prefix, route_map_object_t type, void *object) { - u_short *tag; + route_tag_t *tag; struct ripng_info *rinfo; + route_tag_t rinfo_tag; if (type == RMAP_RIPNG) { @@ -161,7 +162,8 @@ route_match_tag (void *rule, struct prefix *prefix, rinfo = object; /* The information stored by rinfo is host ordered. */ - if (rinfo->tag == *tag) + rinfo_tag = rinfo->tag; + if (rinfo_tag == *tag) return RMAP_MATCH; else return RMAP_NOMATCH; @@ -169,32 +171,12 @@ route_match_tag (void *rule, struct prefix *prefix, return RMAP_NOMATCH; } -/* Route map `match tag' match statement. `arg' is TAG value */ -static void * -route_match_tag_compile (const char *arg) -{ - u_short *tag; - - tag = XMALLOC (MTYPE_ROUTE_MAP_COMPILED, sizeof (u_short)); - *tag = atoi (arg); - - return tag; -} - -/* Free route map's compiled `match tag' value. */ -static void -route_match_tag_free (void *rule) -{ - XFREE (MTYPE_ROUTE_MAP_COMPILED, rule); -} - -/* Route map commands for tag matching. */ static struct route_map_rule_cmd route_match_tag_cmd = { "tag", route_match_tag, - route_match_tag_compile, - route_match_tag_free + route_map_rule_tag_compile, + route_map_rule_tag_free, }; /* `set metric METRIC' */ @@ -364,7 +346,7 @@ static route_map_result_t route_set_tag (void *rule, struct prefix *prefix, route_map_object_t type, void *object) { - u_short *tag; + route_tag_t *tag; struct ripng_info *rinfo; if(type == RMAP_RIPNG) @@ -380,33 +362,13 @@ route_set_tag (void *rule, struct prefix *prefix, return RMAP_OKAY; } -/* Route map `tag' compile function. Given string is converted - to u_short. */ -static void * -route_set_tag_compile (const char *arg) -{ - u_short *tag; - - tag = XMALLOC (MTYPE_ROUTE_MAP_COMPILED, sizeof (u_short)); - *tag = atoi (arg); - - return tag; -} - -/* Free route map's compiled `ip nexthop' value. */ -static void -route_set_tag_free (void *rule) -{ - XFREE (MTYPE_ROUTE_MAP_COMPILED, rule); -} - /* Route map commands for tag set. */ static struct route_map_rule_cmd route_set_tag_cmd = { "tag", route_set_tag, - route_set_tag_compile, - route_set_tag_free + route_map_rule_tag_compile, + route_map_rule_tag_free }; #define MATCH_STR "Match values from routing table\n" diff --git a/ripngd/ripng_zebra.c b/ripngd/ripng_zebra.c index 1190b1873a..09472683d7 100644 --- a/ripngd/ripng_zebra.c +++ b/ripngd/ripng_zebra.c @@ -92,6 +92,12 @@ ripng_zebra_ipv6_send (struct route_node *rp, u_char cmd) SET_FLAG (api.message, ZAPI_MESSAGE_METRIC); api.metric = rinfo->metric; + if (rinfo->tag) + { + SET_FLAG (api.message, ZAPI_MESSAGE_TAG); + api.tag = rinfo->tag; + } + zapi_ipv6_route (cmd, zclient, (struct prefix_ipv6 *)&rp->p, &api); @@ -143,13 +149,13 @@ ripng_zebra_read_ipv6 (int command, struct zclient *zclient, /* Type, flags, message. */ api.type = stream_getc (s); api.instance = stream_getw (s); - api.flags = stream_getc (s); + api.flags = stream_getl (s); api.message = stream_getc (s); /* IPv6 prefix. */ memset (&p, 0, sizeof (struct prefix_ipv6)); p.family = AF_INET6; - p.prefixlen = stream_getc (s); + p.prefixlen = MIN(IPV6_MAX_PREFIXLEN, stream_getc (s)); stream_get (&p.prefix, s, PSIZE (p.prefixlen)); /* Nexthop, ifindex, distance, metric. */ @@ -172,8 +178,13 @@ ripng_zebra_read_ipv6 (int command, struct zclient *zclient, else api.metric = 0; + if (CHECK_FLAG (api.message, ZAPI_MESSAGE_TAG)) + api.tag = stream_getl (s); + else + api.tag = 0; + if (command == ZEBRA_REDISTRIBUTE_IPV6_ADD) - ripng_redistribute_add (api.type, RIPNG_ROUTE_REDISTRIBUTE, &p, ifindex, &nexthop); + ripng_redistribute_add (api.type, RIPNG_ROUTE_REDISTRIBUTE, &p, ifindex, &nexthop, api.tag); else ripng_redistribute_delete (api.type, RIPNG_ROUTE_REDISTRIBUTE, &p, ifindex); @@ -255,6 +266,7 @@ static struct { {ZEBRA_ROUTE_STATIC, 1, "static"}, {ZEBRA_ROUTE_OSPF6, 1, "ospf6"}, {ZEBRA_ROUTE_BGP, 2, "bgp"}, + {ZEBRA_ROUTE_VNC, 1, "vnc"}, {0, 0, NULL} }; @@ -519,12 +531,6 @@ zebra_init (struct thread_master *master) zclient->interface_delete = ripng_interface_delete; zclient->interface_address_add = ripng_interface_address_add; zclient->interface_address_delete = ripng_interface_address_delete; - zclient->ipv4_route_add = NULL; - zclient->ipv4_route_delete = NULL; - zclient->redistribute_route_ipv4_add = NULL; - zclient->redistribute_route_ipv4_del = NULL; - zclient->ipv6_route_add = ripng_zebra_read_ipv6; - zclient->ipv6_route_delete = ripng_zebra_read_ipv6; zclient->redistribute_route_ipv6_add = ripng_zebra_read_ipv6; zclient->redistribute_route_ipv6_del = ripng_zebra_read_ipv6; diff --git a/ripngd/ripngd.c b/ripngd/ripngd.c index c896922bc0..e7db971208 100644 --- a/ripngd/ripngd.c +++ b/ripngd/ripngd.c @@ -321,9 +321,9 @@ ripng_packet_dump (struct ripng_packet *packet, int size, const char *sndrcv) if (rte->metric == RIPNG_METRIC_NEXTHOP) zlog_debug (" nexthop %s/%d", inet6_ntoa (rte->addr), rte->prefixlen); else - zlog_debug (" %s/%d metric %d tag %d", + zlog_debug (" %s/%d metric %d tag %"ROUTE_TAG_PRI, inet6_ntoa (rte->addr), rte->prefixlen, - rte->metric, ntohs (rte->tag)); + rte->metric, (route_tag_t)ntohs (rte->tag)); } } @@ -337,15 +337,15 @@ ripng_nexthop_rte (struct rte *rte, /* Logging before checking RTE. */ if (IS_RIPNG_DEBUG_RECV) - zlog_debug ("RIPng nexthop RTE address %s tag %d prefixlen %d", - inet6_ntoa (rte->addr), ntohs (rte->tag), rte->prefixlen); + zlog_debug ("RIPng nexthop RTE address %s tag %"ROUTE_TAG_PRI" prefixlen %d", + inet6_ntoa (rte->addr), (route_tag_t)ntohs (rte->tag), rte->prefixlen); /* RFC2080 2.1.1 Next Hop: The route tag and prefix length in the next hop RTE must be set to zero on sending and ignored on receiption. */ if (ntohs (rte->tag) != 0) - zlog_warn ("RIPng nexthop RTE with non zero tag value %d from %s", - ntohs (rte->tag), inet6_ntoa (from->sin6_addr)); + zlog_warn ("RIPng nexthop RTE with non zero tag value %"ROUTE_TAG_PRI" from %s", + (route_tag_t)ntohs (rte->tag), inet6_ntoa (from->sin6_addr)); if (rte->prefixlen != 0) zlog_warn ("RIPng nexthop RTE with non zero prefixlen value %d from %s", @@ -610,102 +610,36 @@ ripng_timeout_update (struct ripng_info *rinfo) } static int -ripng_incoming_filter (struct prefix_ipv6 *p, struct ripng_interface *ri) +ripng_filter (int ripng_distribute, struct prefix_ipv6 *p, + struct ripng_interface *ri) { struct distribute *dist; struct access_list *alist; struct prefix_list *plist; + int distribute = ripng_distribute == RIPNG_FILTER_OUT ? + DISTRIBUTE_V6_OUT : DISTRIBUTE_V6_IN; + const char *inout = ripng_distribute == RIPNG_FILTER_OUT ? "out" : "in"; /* Input distribute-list filtering. */ - if (ri->list[RIPNG_FILTER_IN]) + if (ri->list[ripng_distribute]) { - if (access_list_apply (ri->list[RIPNG_FILTER_IN], + if (access_list_apply (ri->list[ripng_distribute], (struct prefix *) p) == FILTER_DENY) { if (IS_RIPNG_DEBUG_PACKET) - zlog_debug ("%s/%d filtered by distribute in", - inet6_ntoa (p->prefix), p->prefixlen); - return -1; - } - } - if (ri->prefix[RIPNG_FILTER_IN]) - { - if (prefix_list_apply (ri->prefix[RIPNG_FILTER_IN], - (struct prefix *) p) == PREFIX_DENY) - { - if (IS_RIPNG_DEBUG_PACKET) - zlog_debug ("%s/%d filtered by prefix-list in", - inet6_ntoa (p->prefix), p->prefixlen); - return -1; - } - } - - /* All interface filter check. */ - dist = distribute_lookup (NULL); - if (dist) - { - if (dist->list[DISTRIBUTE_IN]) - { - alist = access_list_lookup (AFI_IP6, dist->list[DISTRIBUTE_IN]); - - if (alist) - { - if (access_list_apply (alist, - (struct prefix *) p) == FILTER_DENY) - { - if (IS_RIPNG_DEBUG_PACKET) - zlog_debug ("%s/%d filtered by distribute in", - inet6_ntoa (p->prefix), p->prefixlen); + zlog_debug ("%s/%d filtered by distribute %s", + inet6_ntoa (p->prefix), p->prefixlen, inout); return -1; } } - } - if (dist->prefix[DISTRIBUTE_IN]) - { - plist = prefix_list_lookup (AFI_IP6, dist->prefix[DISTRIBUTE_IN]); - - if (plist) - { - if (prefix_list_apply (plist, - (struct prefix *) p) == PREFIX_DENY) - { - if (IS_RIPNG_DEBUG_PACKET) - zlog_debug ("%s/%d filtered by prefix-list in", - inet6_ntoa (p->prefix), p->prefixlen); - return -1; - } - } - } - } - return 0; -} - -static int -ripng_outgoing_filter (struct prefix_ipv6 *p, struct ripng_interface *ri) + if (ri->prefix[ripng_distribute]) { - struct distribute *dist; - struct access_list *alist; - struct prefix_list *plist; - - if (ri->list[RIPNG_FILTER_OUT]) - { - if (access_list_apply (ri->list[RIPNG_FILTER_OUT], - (struct prefix *) p) == FILTER_DENY) - { - if (IS_RIPNG_DEBUG_PACKET) - zlog_debug ("%s/%d is filtered by distribute out", - inet6_ntoa (p->prefix), p->prefixlen); - return -1; - } - } - if (ri->prefix[RIPNG_FILTER_OUT]) - { - if (prefix_list_apply (ri->prefix[RIPNG_FILTER_OUT], + if (prefix_list_apply (ri->prefix[ripng_distribute], (struct prefix *) p) == PREFIX_DENY) { if (IS_RIPNG_DEBUG_PACKET) - zlog_debug ("%s/%d is filtered by prefix-list out", - inet6_ntoa (p->prefix), p->prefixlen); + zlog_debug ("%s/%d filtered by prefix-list %s", + inet6_ntoa (p->prefix), p->prefixlen, inout); return -1; } } @@ -714,9 +648,9 @@ ripng_outgoing_filter (struct prefix_ipv6 *p, struct ripng_interface *ri) dist = distribute_lookup (NULL); if (dist) { - if (dist->list[DISTRIBUTE_OUT]) + if (dist->list[distribute]) { - alist = access_list_lookup (AFI_IP6, dist->list[DISTRIBUTE_OUT]); + alist = access_list_lookup (AFI_IP6, dist->list[distribute]); if (alist) { @@ -724,15 +658,15 @@ ripng_outgoing_filter (struct prefix_ipv6 *p, struct ripng_interface *ri) (struct prefix *) p) == FILTER_DENY) { if (IS_RIPNG_DEBUG_PACKET) - zlog_debug ("%s/%d filtered by distribute out", - inet6_ntoa (p->prefix), p->prefixlen); + zlog_debug ("%s/%d filtered by distribute %s", + inet6_ntoa (p->prefix), p->prefixlen, inout); return -1; } } } - if (dist->prefix[DISTRIBUTE_OUT]) + if (dist->prefix[distribute]) { - plist = prefix_list_lookup (AFI_IP6, dist->prefix[DISTRIBUTE_OUT]); + plist = prefix_list_lookup (AFI_IP6, dist->prefix[distribute]); if (plist) { @@ -740,8 +674,8 @@ ripng_outgoing_filter (struct prefix_ipv6 *p, struct ripng_interface *ri) (struct prefix *) p) == PREFIX_DENY) { if (IS_RIPNG_DEBUG_PACKET) - zlog_debug ("%s/%d filtered by prefix-list out", - inet6_ntoa (p->prefix), p->prefixlen); + zlog_debug ("%s/%d filtered by prefix-list %s", + inet6_ntoa (p->prefix), p->prefixlen, inout); return -1; } } @@ -781,7 +715,7 @@ ripng_route_process (struct rte *rte, struct sockaddr_in6 *from, /* Apply input filters. */ ri = ifp->info; - ret = ripng_incoming_filter (&p, ri); + ret = ripng_filter (RIPNG_FILTER_IN, &p, ri); if (ret < 0) return; @@ -973,7 +907,8 @@ ripng_route_process (struct rte *rte, struct sockaddr_in6 *from, /* Add redistributed route to RIPng table. */ void ripng_redistribute_add (int type, int sub_type, struct prefix_ipv6 *p, - ifindex_t ifindex, struct in6_addr *nexthop) + ifindex_t ifindex, struct in6_addr *nexthop, + route_tag_t tag) { struct route_node *rp; struct ripng_info *rinfo = NULL, newinfo; @@ -992,6 +927,8 @@ ripng_redistribute_add (int type, int sub_type, struct prefix_ipv6 *p, newinfo.sub_type = sub_type; newinfo.ifindex = ifindex; newinfo.metric = 1; + if (tag <= UINT16_MAX) /* RIPng only supports 16 bit tags */ + newinfo.tag = tag; newinfo.rp = rp; if (nexthop && IN6_IS_ADDR_LINKLOCAL(nexthop)) newinfo.nexthop = *nexthop; @@ -1676,7 +1613,7 @@ ripng_output_process (struct interface *ifp, struct sockaddr_in6 *to, rinfo->nexthop_out = rinfo->nexthop; /* Apply output filters. */ - ret = ripng_outgoing_filter (p, ri); + ret = ripng_filter (RIPNG_FILTER_OUT, p, ri); if (ret < 0) continue; @@ -1805,7 +1742,7 @@ ripng_output_process (struct interface *ifp, struct sockaddr_in6 *to, memset(&aggregate->nexthop_out, 0, sizeof(aggregate->nexthop_out)); /* Apply output filters.*/ - ret = ripng_outgoing_filter (p, ri); + ret = ripng_filter (RIPNG_FILTER_OUT, p, ri); if (ret < 0) continue; @@ -2083,8 +2020,8 @@ DEFUN (show_ipv6_ripng, vty_out (vty, "%*s", 18, " "); vty_out (vty, "%*s", 28, " "); - vty_out (vty, "self %2d %3d%s", aggregate->metric, - aggregate->tag, + vty_out (vty, "self %2d %3"ROUTE_TAG_PRI"%s", aggregate->metric, + (route_tag_t)aggregate->tag, VTY_NEWLINE); } @@ -2128,8 +2065,8 @@ DEFUN (show_ipv6_ripng, if (len > 0) vty_out (vty, "%*s", len, " "); - vty_out (vty, " %2d %3d ", - rinfo->metric, rinfo->tag); + vty_out (vty, " %2d %3"ROUTE_TAG_PRI" ", + rinfo->metric, (route_tag_t)rinfo->tag); /* time */ if ((rinfo->type == ZEBRA_ROUTE_RIPNG) && @@ -2283,7 +2220,7 @@ DEFUN (ripng_route, } rp->info = (void *)1; - ripng_redistribute_add (ZEBRA_ROUTE_RIPNG, RIPNG_ROUTE_STATIC, &p, 0, NULL); + ripng_redistribute_add (ZEBRA_ROUTE_RIPNG, RIPNG_ROUTE_STATIC, &p, 0, NULL, 0); return CMD_SUCCESS; } @@ -2615,7 +2552,7 @@ DEFUN (ripng_default_information_originate, ripng->default_information = 1; str2prefix_ipv6 ("::/0", &p); - ripng_redistribute_add (ZEBRA_ROUTE_RIPNG, RIPNG_ROUTE_DEFAULT, &p, 0, NULL); + ripng_redistribute_add (ZEBRA_ROUTE_RIPNG, RIPNG_ROUTE_DEFAULT, &p, 0, NULL, 0); } return CMD_SUCCESS; @@ -2821,9 +2758,9 @@ ripng_distribute_update (struct distribute *dist) ri = ifp->info; - if (dist->list[DISTRIBUTE_IN]) + if (dist->list[DISTRIBUTE_V6_IN]) { - alist = access_list_lookup (AFI_IP6, dist->list[DISTRIBUTE_IN]); + alist = access_list_lookup (AFI_IP6, dist->list[DISTRIBUTE_V6_IN]); if (alist) ri->list[RIPNG_FILTER_IN] = alist; else @@ -2832,9 +2769,9 @@ ripng_distribute_update (struct distribute *dist) else ri->list[RIPNG_FILTER_IN] = NULL; - if (dist->list[DISTRIBUTE_OUT]) + if (dist->list[DISTRIBUTE_V6_OUT]) { - alist = access_list_lookup (AFI_IP6, dist->list[DISTRIBUTE_OUT]); + alist = access_list_lookup (AFI_IP6, dist->list[DISTRIBUTE_V6_OUT]); if (alist) ri->list[RIPNG_FILTER_OUT] = alist; else @@ -2843,9 +2780,9 @@ ripng_distribute_update (struct distribute *dist) else ri->list[RIPNG_FILTER_OUT] = NULL; - if (dist->prefix[DISTRIBUTE_IN]) + if (dist->prefix[DISTRIBUTE_V6_IN]) { - plist = prefix_list_lookup (AFI_IP6, dist->prefix[DISTRIBUTE_IN]); + plist = prefix_list_lookup (AFI_IP6, dist->prefix[DISTRIBUTE_V6_IN]); if (plist) ri->prefix[RIPNG_FILTER_IN] = plist; else @@ -2854,9 +2791,9 @@ ripng_distribute_update (struct distribute *dist) else ri->prefix[RIPNG_FILTER_IN] = NULL; - if (dist->prefix[DISTRIBUTE_OUT]) + if (dist->prefix[DISTRIBUTE_V6_OUT]) { - plist = prefix_list_lookup (AFI_IP6, dist->prefix[DISTRIBUTE_OUT]); + plist = prefix_list_lookup (AFI_IP6, dist->prefix[DISTRIBUTE_V6_OUT]); if (plist) ri->prefix[RIPNG_FILTER_OUT] = plist; else @@ -3091,9 +3028,6 @@ ripng_init () install_element (VIEW_NODE, &show_ipv6_ripng_cmd); install_element (VIEW_NODE, &show_ipv6_ripng_status_cmd); - install_element (ENABLE_NODE, &show_ipv6_ripng_cmd); - install_element (ENABLE_NODE, &show_ipv6_ripng_status_cmd); - install_element (CONFIG_NODE, &router_ripng_cmd); install_element (CONFIG_NODE, &no_router_ripng_cmd); diff --git a/ripngd/ripngd.h b/ripngd/ripngd.h index 031ca963d9..70cba3c680 100644 --- a/ripngd/ripngd.h +++ b/ripngd/ripngd.h @@ -148,7 +148,7 @@ struct ripng struct rte { struct in6_addr addr; /* RIPng destination prefix */ - u_short tag; /* RIPng tag */ + u_int16_t tag; /* RIPng tag */ u_char prefixlen; /* Length of the RIPng prefix */ u_char metric; /* Metric of the RIPng route */ /* The nexthop is stored by the structure @@ -202,7 +202,7 @@ struct ripng_info struct in6_addr nexthop_out; u_char metric_set; u_char metric_out; - u_short tag_out; + u_int16_t tag_out; struct route_node *rp; }; @@ -382,7 +382,7 @@ extern void ripng_info_free (struct ripng_info *rinfo); extern void ripng_event (enum ripng_event, int); extern int ripng_request (struct interface *ifp); extern void ripng_redistribute_add (int, int, struct prefix_ipv6 *, - ifindex_t, struct in6_addr *); + ifindex_t, struct in6_addr *, route_tag_t); extern void ripng_redistribute_delete (int, int, struct prefix_ipv6 *, ifindex_t); extern void ripng_redistribute_withdraw (int type); diff --git a/tests/Makefile.am b/tests/Makefile.am index 16c9e4c3db..76280f7189 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -25,6 +25,12 @@ else TESTS_BGPD = endif +if ENABLE_BGP_VNC +BGP_VNC_RFP_LIB=@top_builddir@/$(LIBRFP)/librfp.a +else +BGP_VNC_RFP_LIB = +endif + check_PROGRAMS = testsig testsegv testbuffer testmemory heavy heavywq heavythread \ testprivs teststream testchecksum tabletest testnexthopiter \ testcommands test-timer-correctness test-timer-performance \ @@ -77,12 +83,12 @@ teststream_LDADD = ../lib/libzebra.la @LIBCAP@ heavy_LDADD = ../lib/libzebra.la @LIBCAP@ -lm heavywq_LDADD = ../lib/libzebra.la @LIBCAP@ -lm heavythread_LDADD = ../lib/libzebra.la @LIBCAP@ -lm -aspathtest_LDADD = ../bgpd/libbgp.a ../lib/libzebra.la @LIBCAP@ -lm -testbgpcap_LDADD = ../bgpd/libbgp.a ../lib/libzebra.la @LIBCAP@ -lm -ecommtest_LDADD = ../bgpd/libbgp.a ../lib/libzebra.la @LIBCAP@ -lm -testbgpmpattr_LDADD = ../bgpd/libbgp.a ../lib/libzebra.la @LIBCAP@ -lm +aspathtest_LDADD = ../bgpd/libbgp.a $(BGP_VNC_RFP_LIB) ../lib/libzebra.la @LIBCAP@ -lm +testbgpcap_LDADD = ../bgpd/libbgp.a $(BGP_VNC_RFP_LIB) ../lib/libzebra.la @LIBCAP@ -lm +ecommtest_LDADD = ../bgpd/libbgp.a $(BGP_VNC_RFP_LIB) ../lib/libzebra.la @LIBCAP@ -lm +testbgpmpattr_LDADD = ../bgpd/libbgp.a $(BGP_VNC_RFP_LIB) ../lib/libzebra.la @LIBCAP@ -lm testchecksum_LDADD = ../lib/libzebra.la @LIBCAP@ -testbgpmpath_LDADD = ../bgpd/libbgp.a ../lib/libzebra.la @LIBCAP@ -lm +testbgpmpath_LDADD = ../bgpd/libbgp.a $(BGP_VNC_RFP_LIB) ../lib/libzebra.la @LIBCAP@ -lm tabletest_LDADD = ../lib/libzebra.la @LIBCAP@ -lm testnexthopiter_LDADD = ../lib/libzebra.la @LIBCAP@ testcommands_LDADD = ../lib/libzebra.la @LIBCAP@ diff --git a/tests/bgp_capability_test.c b/tests/bgp_capability_test.c index 73f46b59a0..7fa4be5611 100644 --- a/tests/bgp_capability_test.c +++ b/tests/bgp_capability_test.c @@ -21,6 +21,7 @@ #include +#include "qobj.h" #include "vty.h" #include "stream.h" #include "privs.h" @@ -643,6 +644,7 @@ main (void) term_bgp_debug_packet = -1UL; term_bgp_debug_as4 = -1UL; + qobj_init (); master = thread_master_create (); bgp_master_init (); vrf_init (); diff --git a/tests/bgp_mp_attr_test.c b/tests/bgp_mp_attr_test.c index 928d69752a..cde23d79e4 100644 --- a/tests/bgp_mp_attr_test.c +++ b/tests/bgp_mp_attr_test.c @@ -21,6 +21,7 @@ #include +#include "qobj.h" #include "vty.h" #include "stream.h" #include "privs.h" @@ -532,6 +533,7 @@ main (void) term_bgp_debug_packet = -1UL; term_bgp_debug_as4 = -1UL; + qobj_init (); master = thread_master_create (); bgp_master_init (); vrf_init (); diff --git a/tests/bgp_mpath_test.c b/tests/bgp_mpath_test.c index 66a718cbe2..dbcb00a2ef 100644 --- a/tests/bgp_mpath_test.c +++ b/tests/bgp_mpath_test.c @@ -23,6 +23,7 @@ #include +#include "qobj.h" #include "vty.h" #include "stream.h" #include "privs.h" @@ -374,6 +375,7 @@ int all_tests_count = (sizeof(all_tests)/sizeof(testcase_t *)); static int global_test_init (void) { + qobj_init (); master = thread_master_create (); zclient = zclient_new(master); bgp_master_init (); diff --git a/tools/quagga-reload.py b/tools/quagga-reload.py index 900ed55c43..ed36b940a9 100755 --- a/tools/quagga-reload.py +++ b/tools/quagga-reload.py @@ -26,6 +26,10 @@ from pprint import pformat log = logging.getLogger(__name__) +class VtyshMarkException(Exception): + pass + + class Context(object): """ @@ -88,9 +92,7 @@ class Config(object): try: file_output = subprocess.check_output(['/usr/bin/vtysh', '-m', '-f', filename]) except subprocess.CalledProcessError as e: - log.error('vtysh marking of config file %s failed with error %s:', filename, str(e)) - print "vtysh marking of file %s failed with error: %s" % (filename, str(e)) - sys.exit(1) + raise VtyshMarkException(str(e)) for line in file_output.split('\n'): line = line.strip() @@ -115,9 +117,7 @@ class Config(object): "/usr/bin/vtysh -c 'show run' | /usr/bin/tail -n +4 | /usr/bin/vtysh -m -f -", shell=True) except subprocess.CalledProcessError as e: - log.error('vtysh marking of running config failed with error %s:', str(e)) - print "vtysh marking of running config failed with error %s:" % (str(e)) - sys.exit(1) + raise VtyshMarkException(str(e)) for line in config_text.split('\n'): line = line.strip() diff --git a/tools/xml2cli.pl b/tools/xml2cli.pl new file mode 100755 index 0000000000..43789131c3 --- /dev/null +++ b/tools/xml2cli.pl @@ -0,0 +1,436 @@ +#!/usr/bin/perl +## +## Parse a XML file containing a tree-like representation of Quagga CLI +## commands and generate a file with: +## +## - a DEFUN function for each command; +## - an initialization function. +## +## +## Copyright (C) 2012 Renato Westphal +## This file is part of GNU Zebra. +## +## GNU Zebra is free software; you can redistribute it and/or modify it +## under the terms of the GNU General Public License as published by the +## Free Software Foundation; either version 2, or (at your option) any +## later version. +## +## GNU Zebra is distributed in the hope that it will be useful, but +## WITHOUT ANY WARRANTY; without even the implied warranty of +## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +## General Public License for more details. +## +## You should have received a copy of the GNU General Public License +## along with GNU Zebra; see the file COPYING. If not, write to the Free +## Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA +## 02111-1307, USA. +## + +use strict; +use warnings; +use Getopt::Std; +use vars qw($opt_d); +use File::Basename qw(fileparse); +use XML::LibXML; + +%::input_strs = ( + "ifname" => "IFNAME", + "word" => "WORD", + "line" => ".LINE", + "ipv4" => "A.B.C.D", + "ipv4m" => "A.B.C.D/M", + "ipv6" => "X:X::X:X", + "ipv6m" => "X:X::X:X/M", + "mtu" => "<1500-9180>", + # BGP specific + "rd" => "ASN:nn_or_IP-address:nn", + "asn" => "<1-4294967295>", + "community" => "AA:NN", + "clist" => "<1-500>", + # LDP specific + "disc_time" => "<1-65535>", + "session_time" => "<15-65535>", + "pwid" => "<1-4294967295>", + "hops" => "<1-254>" + ); + +# parse options node and store the corresponding information +# into a global hash of hashes +sub parse_options { + my $xml_node = $_[0]; + my @cmdstr; + + my $options_name = $xml_node->findvalue('./@name'); + if (not $options_name) { + die('error: "options" node without "name" attribute'); + } + + # initialize hash + $::options{$options_name}{'cmdstr'} = ""; + $::options{$options_name}{'help'} = ""; + + my @children = $xml_node->getChildnodes(); + foreach my $child(@children) { + # skip comments, random text, etc + if ($child->getType() != XML_ELEMENT_NODE) { + next; + } + + # check for error/special conditions + if ($child->getName() ne "option") { + die('error: invalid node type: "' . $child->getName() . '"'); + } + + my $name = $child->findvalue('./@name'); + my $input = $child->findvalue('./@input'); + my $help = $child->findvalue('./@help'); + if ($input) { + $name = $::input_strs{$input}; + } + + push (@cmdstr, $name); + $::options{$options_name}{'help'} .= "\n \"" . $help . "\\n\""; + } + $::options{$options_name}{'cmdstr'} = "(" . join('|', @cmdstr) . ")"; +} + +# given a subtree, replace all the corresponding include nodes by +# this subtree +sub subtree_replace_includes { + my $subtree = $_[0]; + + my $subtree_name = $subtree->findvalue('./@name'); + if (not $subtree_name) { + die("subtree without \"name\" attribute"); + } + + my $query = "//include[\@subtree='$subtree_name']"; + foreach my $include_node($::xml->findnodes($query)) { + my @children = $subtree->getChildnodes(); + foreach my $child(reverse @children) { + my $include_node_parent = $include_node->getParentNode(); + $include_node_parent->insertAfter($child->cloneNode(1), + $include_node); + } + $include_node->unbindNode(); + } + $subtree->unbindNode(); +} + +# generate arguments for a given command +sub generate_arguments { + my @nodes = @_; + my $arguments; + my $no_args = 1; + my $argc = 0; + + $arguments .= " struct vty_arg *args[] =\n"; + $arguments .= " {\n"; + for (my $i = 0; $i < @nodes; $i++) { + my %node = %{$nodes[$i]}; + my $arg_value; + + if (not $node{'arg'}) { + next; + } + $no_args = 0; + + # for input and select nodes, the value of the argument is an + # argv[] element. for the other types of nodes, the value of the + # argument is the name of the node + if ($node{'input'} or $node{'type'} eq "select") { + $arg_value = "argv[" . $argc++ . "]"; + } else { + $arg_value = '"' . $node{'name'} . '"'; + } + + if ($node{'input'} and $node{'input'} eq "line") { + # arguments of the type 'line' may have multiple spaces (i.e + # they don't fit into a single argv[] element). to properly + # handle these arguments, we need to provide direct access + # to the argv[] array and the argc variable. + my $argc_str = "argc" . (($argc > 1) ? " - " . ($argc - 1) : ""); + my $argv_str = "argv" . (($argc > 1) ? " + " . ($argc - 1) : ""); + $arguments .= " &(struct vty_arg) { " + . ".name = \"" . $node{'arg'} . "\", " + . ".argc = $argc_str, " + . ".argv = $argv_str },\n"; + } else { + # common case - each argument has a name and a single value + $arguments .= " &(struct vty_arg) { " + . ".name = \"" . $node{'arg'} . "\", " + . ".value = " . $arg_value . " },\n"; + } + } + $arguments .= " NULL\n"; + $arguments .= " };\n"; + + # handle special case + if ($no_args) { + return " struct vty_arg *args[] = { NULL };\n"; + } + + return $arguments; +} + +# generate C code +sub generate_code { + my @nodes = @_; + my $funcname = ''; + my $cmdstr = ''; + my $cmdname = ''; + my $helpstr = ''; + my $function = ''; + + for (my $i = 0; $i < @nodes; $i++) { + my %node = %{$nodes[$i]}; + if ($node{'input'}) { + $funcname .= $node{'input'} . " "; + $cmdstr .= $::input_strs{$node{'input'}} . " "; + $helpstr .= "\n \"" . $node{'help'} . "\\n\""; + } elsif ($node{'type'} eq "select") { + my $options_name = $node{'options'}; + $funcname .= $options_name . " "; + $cmdstr .= $::options{$options_name}{'cmdstr'} . " "; + $helpstr .= $::options{$options_name}{'help'}; + } else { + $funcname .= $node{'name'} . " "; + $cmdstr .= $node{'name'} . " "; + $helpstr .= "\n \"" . $node{'help'} . "\\n\""; + } + + # update the command string + if ($node{'function'} ne "inherited") { + $function = $node{'function'}; + } + } + + # rtrim + $funcname =~ s/\s+$//; + $cmdstr =~ s/\s+$//; + # lowercase + $funcname = lc($funcname); + # replace " " by "_" + $funcname =~ tr/ /_/; + # replace "-" by "_" + $funcname =~ tr/-/_/; + # add prefix + $funcname = $::cmdprefix . '_' . $funcname; + + # generate DEFUN + $cmdname = $funcname . "_cmd"; + + # don't generate same command more than once + if ($::commands{$cmdname}) { + return $cmdname; + } + $::commands{$cmdname} = "1"; + + print STDOUT "DEFUN (" . $funcname . ",\n" + . " " . $cmdname . ",\n" + . " \"" . $cmdstr . "\"," + . $helpstr . ")\n" + . "{\n" + . generate_arguments(@nodes) + . " return " . $function . " (vty, args);\n" + . "}\n\n"; + + return $cmdname; +} + +# parse tree node (recursive function) +sub parse_tree { + # get args + my $xml_node = $_[0]; + my @nodes = @{$_[1]}; + my $tree_name = $_[2]; + + # hash containing all the node attributes + my %node; + $node{'type'} = $xml_node->getName(); + + # check for error/special conditions + if ($node{'type'} eq "tree") { + goto end; + } + if ($node{'type'} eq "include") { + die('error: can not include "' + . $xml_node->findvalue('./@subtree') . '"'); + } + if (not $node{'type'} ~~ [qw(option select)]) { + die('error: invalid node type: "' . $node{'type'} . '"'); + } + if ($node{'type'} eq "select") { + my $options_name = $xml_node->findvalue('./@options'); + if (not $options_name) { + die('error: "select" node without "name" attribute'); + } + if (not $::options{$options_name}) { + die('error: can not find options'); + } + $node{'options'} = $options_name; + } + + # get node attributes + $node{'name'} = $xml_node->findvalue('./@name'); + $node{'input'} = $xml_node->findvalue('./@input'); + $node{'arg'} = $xml_node->findvalue('./@arg'); + $node{'help'} = $xml_node->findvalue('./@help'); + $node{'function'} = $xml_node->findvalue('./@function'); + $node{'ifdef'} = $xml_node->findvalue('./@ifdef'); + + # push node to stack + push (@nodes, \%node); + + # generate C code + if ($node{'function'}) { + my $cmdname = generate_code(@nodes); + push (@{$::trees{$tree_name}}, [0, $cmdname, 0]); + } + + if ($node{'ifdef'}) { + push (@{$::trees{$tree_name}}, [$node{'ifdef'}, 0, 0]); + } + +end: + # recursively process child nodes + my @children = $xml_node->getChildnodes(); + foreach my $child(@children) { + # skip comments, random text, etc + if ($child->getType() != XML_ELEMENT_NODE) { + next; + } + parse_tree($child, \@nodes, $tree_name); + } + + if ($node{'ifdef'}) { + push (@{$::trees{$tree_name}}, [0, 0, $node{'ifdef'}]); + } +} + +sub parse_node { + # get args + my $xml_node = $_[0]; + + my $node_name = $xml_node->findvalue('./@name'); + if (not $node_name) { + die('missing the "name" attribute'); + } + + my $install = $xml_node->findvalue('./@install'); + my $config_write = $xml_node->findvalue('./@config_write'); + if ($install and $install eq "1") { + print " install_node (&" .lc( $node_name) . "_node, " . $config_write . ");\n"; + } + + my $install_default = $xml_node->findvalue('./@install_default'); + if ($install_default and $install_default eq "1") { + print " install_default (" . $node_name . "_NODE);\n"; + } + + my @children = $xml_node->getChildnodes(); + foreach my $child(@children) { + # skip comments, random text, etc + if ($child->getType() != XML_ELEMENT_NODE) { + next; + } + + if ($child->getName() ne "include") { + die('error: invalid node type: "' . $child->getName() . '"'); + } + my $tree_name = $child->findvalue('./@tree'); + if (not $tree_name) { + die('missing the "tree" attribute'); + } + + foreach my $entry (@{$::trees{$tree_name}}) { + my ($ifdef, $cmdname, $endif) = @{$entry}; + + if ($ifdef) { + print ("#ifdef " . $ifdef . "\n"); + } + + if ($cmdname) { + print " install_element (" . $node_name . "_NODE, &" . $cmdname . ");\n"; + } + + if ($endif) { + print ("#endif /* " . $endif . " */\n"); + } + } + } +} + +# parse command-line arguments +if (not getopts('d')) { + die("Usage: xml2cli.pl [-d] FILE\n"); +} +my $file = shift; + +# initialize the XML parser +my $parser = new XML::LibXML; +$parser->keep_blanks(0); + +# parse XML file +$::xml = $parser->parse_file($file); +my $xmlroot = $::xml->getDocumentElement(); +if ($xmlroot->getName() ne "file") { + die('XML root element name must be "file"'); +} + +# read file attributes +my $init_function = $xmlroot->findvalue('./@init'); +if (not $init_function) { + die('missing the "init" attribute in the "file" node'); +} +$::cmdprefix = $xmlroot->findvalue('./@cmdprefix'); +if (not $::cmdprefix) { + die('missing the "cmdprefix" attribute in the "file" node'); +} +my $header = $xmlroot->findvalue('./@header'); +if (not $header) { + die('missing the "header" attribute in the "file" node'); +} + +# generate source header +print STDOUT "/* Auto-generated from " . fileparse($file) . ". */\n" + . "/* Do not edit! */\n\n" + . "#include \n\n" + . "#include \"command.h\"\n" + . "#include \"vty.h\"\n" + . "#include \"$header\"\n\n"; + +# Parse options +foreach my $options($::xml->findnodes("/file/options")) { + parse_options($options); +} + +# replace include nodes by the corresponding subtrees +foreach my $subtree(reverse $::xml->findnodes("/file/subtree")) { + subtree_replace_includes($subtree); +} + +# Parse trees +foreach my $tree($::xml->findnodes("/file/tree")) { + my @nodes = (); + my $tree_name = $tree->findvalue('./@name'); + parse_tree($tree, \@nodes, $tree_name); +} + +# install function header +print STDOUT "void\n" + . $init_function . " (void)\n" + . "{\n"; + +# Parse nodes +foreach my $node($::xml->findnodes("/file/node")) { + parse_node($node); +} + +# closing braces for the install function +print STDOUT "}"; + +# print to stderr the expanded XML file if the debug flag (-d) is given +if ($opt_d) { + print STDERR $::xml->toString(1); +} diff --git a/vtysh/Makefile.am b/vtysh/Makefile.am index ed49acca47..58ffdfca26 100644 --- a/vtysh/Makefile.am +++ b/vtysh/Makefile.am @@ -1,6 +1,23 @@ ## Process this file with Automake to create Makefile.in -AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/lib -I$(top_builddir)/lib + +if ENABLE_BGP_VNC +BGP_VNC_RFP_SRCDIR = @top_srcdir@/@LIBRFP@ +BGP_VNC_RFP_INCDIR = -I$(BGP_VNC_RFP_SRCDIR) +BGP_VNC_RFP_SRC = $(BGP_VNC_RFP_SRCDIR)/*.c +BGP_VNC_RFAPI_SRCDIR = @top_srcdir@/bgpd/rfapi +BGP_VNC_RFAPI_INCDIR = -I$(BGP_VNC_RFAPI_SRCDIR) -I$(top_srcdir)/bgpd +BGP_VNC_RFAPI_SRC = $(BGP_VNC_RFAPI_SRCDIR)/*.c +else +BGP_VNC_RFP_INCDIR = +BGP_VNC_RFP_SRCDIR = +BGP_VNC_RFP_SRC = +BGP_VNC_RFAPI_INCDIR = +BGP_VNC_RFAPI_SRCDIR = +BGP_VNC_RFAPI_SRC = +endif +AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/lib -I$(top_builddir)/lib \ + $(BGP_VNC_RFAPI_INCDIR) $(BGP_VNC_RFP_INCDIR) DEFS = @DEFS@ -DSYSCONFDIR=\"$(sysconfdir)/\" LIBS = @LIBS@ @CURSES@ @LIBPAM@ @@ -42,6 +59,10 @@ if OSPF6D vtysh_scan += $(top_srcdir)/ospf6d/*.c endif +if LDPD +vtysh_scan += $(top_srcdir)/ldpd/ldp_vty_cmds.c +endif + if RIPD vtysh_scan += $(top_srcdir)/ripd/*.c endif @@ -63,7 +84,9 @@ vtysh_cmd_FILES = $(vtysh_scan) \ $(top_srcdir)/zebra/zserv.c $(top_srcdir)/zebra/router-id.c \ $(top_srcdir)/zebra/zebra_routemap.c \ $(top_srcdir)/zebra/zebra_fpm.c \ - $(top_srcdir)/zebra/zebra_ptm.c + $(top_srcdir)/zebra/zebra_ptm.c \ + $(top_srcdir)/zebra/zebra_mpls_vty.c \ + $(BGP_VNC_RFAPI_SRC) $(BGP_VNC_RFP_SRC) vtysh_cmd.c: $(vtysh_cmd_FILES) extract.pl ./extract.pl $(vtysh_cmd_FILES) > vtysh_cmd.c diff --git a/vtysh/extract.pl.in b/vtysh/extract.pl.in index 2b0f50cb2f..0c06aee4f4 100755 --- a/vtysh/extract.pl.in +++ b/vtysh/extract.pl.in @@ -44,6 +44,9 @@ $ignore{'"router ripng"'} = "ignore"; $ignore{'"router ospf"'} = "ignore"; $ignore{'"router ospf (1-65535)"'} = "ignore"; $ignore{'"router ospf6"'} = "ignore"; +$ignore{'"mpls ldp"'} = "ignore"; +$ignore{'"l2vpn WORD type vpls"'} = "ignore"; +$ignore{'"member pseudowire IFNAME"'} = "ignore"; $ignore{'"router bgp"'} = "ignore"; $ignore{'"router bgp " "(1-4294967295)"'} = "ignore"; $ignore{'"router bgp " "(1-4294967295)" " WORD"'} = "ignore"; @@ -63,6 +66,9 @@ $ignore{'"address-family encapv6"'} = "ignore"; $ignore{'"address-family vpnv6"'} = "ignore"; $ignore{'"address-family vpnv6 unicast"'} = "ignore"; $ignore{'"exit-address-family"'} = "ignore"; +$ignore{'"vnc defaults"'} = "ignore"; +$ignore{'"vnc nve-group NAME"'} = "ignore"; +$ignore{'"exit-vnc"'} = "ignore"; $ignore{'"key chain WORD"'} = "ignore"; $ignore{'"key (0-2147483647)"'} = "ignore"; $ignore{'"route-map WORD (1-65535)"'} = "ignore"; @@ -79,7 +85,7 @@ my $cli_stomp = 0; foreach (@ARGV) { $file = $_; - open (FH, "@CPP@ -DHAVE_CONFIG_H -DVTYSH_EXTRACT_PL -DHAVE_IPV6 -I@top_builddir@ -I@srcdir@/ -I@srcdir@/.. -I@top_srcdir@/lib -I@top_builddir@/lib -I@top_srcdir@/isisd/topology @CPPFLAGS@ $file |"); + open (FH, "@CPP@ -DHAVE_CONFIG_H -DVTYSH_EXTRACT_PL -DHAVE_IPV6 -I@top_builddir@ -I@srcdir@/ -I@srcdir@/.. -I@top_srcdir@/lib -I@top_builddir@/lib -I@top_srcdir@/isisd/topology -I@top_srcdir@/bgpd -I@top_srcdir@/@LIBRFP@ -I@top_srcdir@/bgpd/rfapi @CPPFLAGS@ $file |"); local $/; undef $/; $line = ; close (FH); @@ -93,7 +99,7 @@ foreach (@ARGV) { # $_ will contain the entire string including the DEFUN, ALIAS, etc. # We need to extract the DEFUN/ALIAS from everything in ()s. # The /s at the end tells the regex to allow . to match newlines. - $_ =~ /^(.*?) \((.*)\)$/s; + $_ =~ /^(.*?)\s*\((.*)\)$/s; my (@defun_array); $defun_or_alias = $1; @@ -158,6 +164,9 @@ foreach (@ARGV) { elsif ($file =~ /lib\/vty\.c$/) { $protocol = "VTYSH_ALL"; } + elsif ($file =~ /librfp\/.*\.c$/ || $file =~ /rfapi\/.*\.c$/) { + $protocol = "VTYSH_BGPD"; + } else { ($protocol) = ($file =~ /^.*\/([a-z0-9]+)\/[a-zA-Z0-9_\-]+\.c$/); $protocol = "VTYSH_" . uc $protocol; @@ -224,7 +233,6 @@ foreach (@ARGV) { # please fix your code before submittal if ($cli_stomp) { warn "There are $cli_stomp command line stomps\n"; - exit $cli_stomp; } # Check finaly alive $cmd; diff --git a/vtysh/vtysh.c b/vtysh/vtysh.c index 70b0d7832e..2d040fd5d3 100644 --- a/vtysh/vtysh.c +++ b/vtysh/vtysh.c @@ -69,13 +69,13 @@ struct vtysh_client vtysh_client[] = { .fd = -1, .name = "ripngd", .flag = VTYSH_RIPNGD, .path = RIPNG_VTYSH_PATH, .next = NULL}, { .fd = -1, .name = "ospfd", .flag = VTYSH_OSPFD, .path = OSPF_VTYSH_PATH, .next = NULL}, { .fd = -1, .name = "ospf6d", .flag = VTYSH_OSPF6D, .path = OSPF6_VTYSH_PATH, .next = NULL}, + { .fd = -1, .name = "ldpd", .flag = VTYSH_LDPD, .path = LDP_VTYSH_PATH, .next = NULL}, { .fd = -1, .name = "bgpd", .flag = VTYSH_BGPD, .path = BGP_VTYSH_PATH, .next = NULL}, { .fd = -1, .name = "isisd", .flag = VTYSH_ISISD, .path = ISIS_VTYSH_PATH, .next = NULL}, { .fd = -1, .name = "pimd", .flag = VTYSH_PIMD, .path = PIM_VTYSH_PATH, .next = NULL}, }; -/* Using integrated config from Quagga.conf. Default is no. */ -int vtysh_writeconfig_integrated = 1; +enum vtysh_write_integrated vtysh_write_integrated = WRITE_INTEGRATED_UNSPECIFIED; extern char config_default[]; @@ -374,6 +374,12 @@ vtysh_execute_func (const char *line, int pager) { vtysh_execute("exit-address-family"); } + else if ((saved_node == BGP_VNC_DEFAULTS_NODE + || saved_node == BGP_VNC_NVE_GROUP_NODE + || saved_node == BGP_VNC_L2_GROUP_NODE) && (tried == 1)) + { + vtysh_execute("exit-vnc"); + } else if ((saved_node == KEYCHAIN_KEY_NODE) && (tried == 1)) { vtysh_execute("exit"); @@ -1007,6 +1013,24 @@ static struct cmd_node bgp_ipv6m_node = "%s(config-router-af)# " }; +static struct cmd_node bgp_vnc_defaults_node = +{ + BGP_VNC_DEFAULTS_NODE, + "%s(config-router-vnc-defaults)# " +}; + +static struct cmd_node bgp_vnc_nve_group_node = +{ + BGP_VNC_NVE_GROUP_NODE, + "%s(config-router-vnc-nve-group)# " +}; + +static struct cmd_node bgp_vnc_l2_group_node = +{ + BGP_VNC_L2_GROUP_NODE, + "%s(config-router-vnc-l2-group)# " +}; + static struct cmd_node ospf_node = { OSPF_NODE, @@ -1025,6 +1049,48 @@ static struct cmd_node ospf6_node = "%s(config-ospf6)# " }; +static struct cmd_node ldp_node = +{ + LDP_NODE, + "%s(config-ldp)# " +}; + +static struct cmd_node ldp_ipv4_node = +{ + LDP_IPV4_NODE, + "%s(config-ldp-af)# " +}; + +static struct cmd_node ldp_ipv6_node = +{ + LDP_IPV6_NODE, + "%s(config-ldp-af)# " +}; + +static struct cmd_node ldp_ipv4_iface_node = +{ + LDP_IPV4_IFACE_NODE, + "%s(config-ldp-af-if)# " +}; + +static struct cmd_node ldp_ipv6_iface_node = +{ + LDP_IPV6_IFACE_NODE, + "%s(config-ldp-af-if)# " +}; + +static struct cmd_node ldp_l2vpn_node = +{ + LDP_L2VPN_NODE, + "%s(config-l2vpn)# " +}; + +static struct cmd_node ldp_pseudowire_node = +{ + LDP_PSEUDOWIRE_NODE, + "%s(config-l2vpn-pw)# " +}; + static struct cmd_node keychain_node = { KEYCHAIN_NODE, @@ -1224,6 +1290,41 @@ DEFUNSH (VTYSH_BGPD, return CMD_SUCCESS; } +DEFUNSH (VTYSH_BGPD, + vnc_defaults, + vnc_defaults_cmd, + "vnc defaults", + "VNC/RFP related configuration\n" + "Configure default NVE group\n") +{ + vty->node = BGP_VNC_DEFAULTS_NODE; + return CMD_SUCCESS; +} + +DEFUNSH (VTYSH_BGPD, + vnc_nve_group, + vnc_nve_group_cmd, + "vnc nve-group NAME", + "VNC/RFP related configuration\n" + "Configure a NVE group\n" + "Group name\n") +{ + vty->node = BGP_VNC_NVE_GROUP_NODE; + return CMD_SUCCESS; +} + +DEFUNSH (VTYSH_BGPD, + vnc_l2_group, + vnc_l2_group_cmd, + "vnc l2-group NAME", + "VNC/RFP related configuration\n" + "Configure a L2 group\n" + "Group name\n") +{ + vty->node = BGP_VNC_L2_GROUP_NODE; + return CMD_SUCCESS; +} + DEFUNSH (VTYSH_RIPD, key_chain, key_chain_cmd, @@ -1292,6 +1393,86 @@ DEFUNSH (VTYSH_OSPF6D, return CMD_SUCCESS; } +DEFUNSH (VTYSH_LDPD, + ldp_mpls_ldp, + ldp_mpls_ldp_cmd, + "mpls ldp", + "Global MPLS configuration subcommands\n" + "Label Distribution Protocol\n") +{ + vty->node = LDP_NODE; + return CMD_SUCCESS; +} + +DEFUNSH (VTYSH_LDPD, + ldp_address_family_ipv4, + ldp_address_family_ipv4_cmd, + "address-family ipv4", + "Configure Address Family and its parameters\n" + "IPv4\n") +{ + vty->node = LDP_IPV4_NODE; + return CMD_SUCCESS; +} + +DEFUNSH (VTYSH_LDPD, + ldp_address_family_ipv6, + ldp_address_family_ipv6_cmd, + "address-family ipv6", + "Configure Address Family and its parameters\n" + "IPv6\n") +{ + vty->node = LDP_IPV6_NODE; + return CMD_SUCCESS; +} + +DEFUNSH (VTYSH_LDPD, + ldp_interface_ifname, + ldp_interface_ifname_cmd, + "interface IFNAME", + "Enable LDP on an interface and enter interface submode\n" + "Interface's name\n") +{ + switch (vty->node) + { + case LDP_IPV4_NODE: + vty->node = LDP_IPV4_IFACE_NODE; + break; + case LDP_IPV6_NODE: + vty->node = LDP_IPV6_IFACE_NODE; + break; + default: + break; + } + + return CMD_SUCCESS; +} + +DEFUNSH (VTYSH_LDPD, + ldp_l2vpn_word_type_vpls, + ldp_l2vpn_word_type_vpls_cmd, + "l2vpn WORD type vpls", + "Configure l2vpn commands\n" + "L2VPN name\n" + "L2VPN type\n" + "Virtual Private LAN Service\n") +{ + vty->node = LDP_L2VPN_NODE; + return CMD_SUCCESS; +} + +DEFUNSH (VTYSH_LDPD, + ldp_member_pseudowire_ifname, + ldp_member_pseudowire_ifname_cmd, + "member pseudowire IFNAME", + "L2VPN member configuration\n" + "Pseudowire interface\n" + "Interface's name\n") +{ + vty->node = LDP_PSEUDOWIRE_NODE; + return CMD_SUCCESS; +} + DEFUNSH (VTYSH_ISISD, router_isis, router_isis_cmd, @@ -1382,6 +1563,8 @@ vtysh_exit (struct vty *vty) case RIPNG_NODE: case OSPF_NODE: case OSPF6_NODE: + case LDP_NODE: + case LDP_L2VPN_NODE: case ISIS_NODE: case MASC_NODE: case RMAP_NODE: @@ -1399,8 +1582,24 @@ vtysh_exit (struct vty *vty) case BGP_IPV4M_NODE: case BGP_IPV6_NODE: case BGP_IPV6M_NODE: + case BGP_VNC_DEFAULTS_NODE: + case BGP_VNC_NVE_GROUP_NODE: + case BGP_VNC_L2_GROUP_NODE: vty->node = BGP_NODE; break; + case LDP_IPV4_NODE: + case LDP_IPV6_NODE: + vty->node = LDP_NODE; + break; + case LDP_IPV4_IFACE_NODE: + vty->node = LDP_IPV4_NODE; + break; + case LDP_IPV6_IFACE_NODE: + vty->node = LDP_IPV6_NODE; + break; + case LDP_PSEUDOWIRE_NODE: + vty->node = LDP_L2VPN_NODE; + break; case KEYCHAIN_KEY_NODE: vty->node = KEYCHAIN_NODE; break; @@ -1449,6 +1648,19 @@ DEFUNSH (VTYSH_BGPD, return CMD_SUCCESS; } +DEFUNSH (VTYSH_BGPD, + exit_vnc_config, + exit_vnc_config_cmd, + "exit-vnc", + "Exit from VNC configuration mode\n") +{ + if (vty->node == BGP_VNC_DEFAULTS_NODE + || vty->node == BGP_VNC_NVE_GROUP_NODE + || vty->node == BGP_VNC_L2_GROUP_NODE) + vty->node = BGP_NODE; + return CMD_SUCCESS; +} + DEFUNSH (VTYSH_ZEBRA, vtysh_exit_zebra, vtysh_exit_zebra_cmd, @@ -1575,6 +1787,20 @@ DEFUNSH (VTYSH_OSPF6D, return vtysh_exit_ospf6d (self, vty, argc, argv); } +DEFUNSH (VTYSH_LDPD, + vtysh_exit_ldpd, + vtysh_exit_ldpd_cmd, + "exit", + "Exit current mode and down to previous mode\n") +{ + return vtysh_exit (vty); +} + +ALIAS (vtysh_exit_ldpd, + vtysh_quit_ldpd_cmd, + "quit", + "Exit current mode and down to previous mode\n") + DEFUNSH (VTYSH_ISISD, vtysh_exit_isisd, vtysh_exit_isisd_cmd, @@ -1624,7 +1850,7 @@ DEFUNSH (VTYSH_INTERFACE, } /* TODO Implement "no interface command in isisd. */ -DEFSH (VTYSH_ZEBRA|VTYSH_RIPD|VTYSH_RIPNGD|VTYSH_OSPFD|VTYSH_OSPF6D, +DEFSH (VTYSH_ZEBRA|VTYSH_RIPD|VTYSH_RIPNGD|VTYSH_OSPFD|VTYSH_OSPF6D|VTYSH_LDPD, vtysh_no_interface_cmd, "no interface IFNAME", NO_STR @@ -1708,7 +1934,7 @@ DEFUNSH (VTYSH_VRF, /* TODO Implement interface description commands in ripngd, ospf6d * and isisd. */ -DEFSH (VTYSH_ZEBRA|VTYSH_RIPD|VTYSH_OSPFD, +DEFSH (VTYSH_ZEBRA|VTYSH_RIPD|VTYSH_OSPFD|VTYSH_LDPD, interface_desc_cmd, "description LINE...", "Interface specific description\n" @@ -2158,7 +2384,7 @@ DEFUNSH (VTYSH_ALL, DEFUN (vtysh_write_terminal, vtysh_write_terminal_cmd, - "write terminal []", + "write terminal []", "Write running configuration to memory, network, or terminal\n" "Write to terminal\n" "For the zebra daemon\n" @@ -2166,6 +2392,7 @@ DEFUN (vtysh_write_terminal, "For the ripng daemon\n" "For the ospf daemon\n" "For the ospfv6 daemon\n" + "For the ldpd daemon" "For the bgp daemon\n" "For the isis daemon\n" "For the pim daemon\n") @@ -2217,7 +2444,7 @@ DEFUN (vtysh_write_terminal, DEFUN (vtysh_show_running_config, vtysh_show_running_config_cmd, - "show running-config []", + "show running-config []", SHOW_STR "Current operating configuration\n" "For the zebra daemon\n" @@ -2225,6 +2452,7 @@ DEFUN (vtysh_show_running_config, "For the ripng daemon\n" "For the ospf daemon\n" "For the ospfv6 daemon\n" + "For the ldp daemon\n" "For the bgp daemon\n" "For the isis daemon\n" "For the pim daemon\n") @@ -2238,7 +2466,7 @@ DEFUN (vtysh_integrated_config, "Set up miscellaneous service\n" "Write configuration into integrated file\n") { - vtysh_writeconfig_integrated = 1; + vtysh_write_integrated = WRITE_INTEGRATED_YES; return CMD_SUCCESS; } @@ -2249,7 +2477,7 @@ DEFUN (no_vtysh_integrated_config, "Set up miscellaneous service\n" "Write configuration into integrated file\n") { - vtysh_writeconfig_integrated = 0; + vtysh_write_integrated = WRITE_INTEGRATED_NO; return CMD_SUCCESS; } @@ -2329,6 +2557,24 @@ write_config_integrated(void) return CMD_SUCCESS; } +static bool vtysh_writeconfig_integrated(void) +{ + struct stat s; + + switch (vtysh_write_integrated) + { + case WRITE_INTEGRATED_UNSPECIFIED: + if (stat(integrate_default, &s) && errno == ENOENT) + return false; + return true; + case WRITE_INTEGRATED_NO: + return false; + case WRITE_INTEGRATED_YES: + return true; + } + return true; +} + DEFUN (vtysh_write_memory, vtysh_write_memory_cmd, "write []", @@ -2342,7 +2588,7 @@ DEFUN (vtysh_write_memory, FILE *fp; /* If integrated Quagga.conf explicitely set. */ - if (vtysh_writeconfig_integrated) + if (vtysh_writeconfig_integrated()) return write_config_integrated(); else backup_config_file(integrate_default); @@ -2388,7 +2634,6 @@ DEFUN (vtysh_copy_running_config, return vtysh_write_memory (self, vty, argc, argv); } - DEFUN (vtysh_terminal_length, vtysh_terminal_length_cmd, "terminal length (0-512)", @@ -2876,19 +3121,30 @@ vtysh_init_vty (void) /* #ifdef HAVE_IPV6 */ install_node (&bgp_ipv6_node, NULL); install_node (&bgp_ipv6m_node, NULL); +/* #endif */ +/*#if ENABLE_BGP_VNC */ + install_node (&bgp_vnc_defaults_node, NULL); + install_node (&bgp_vnc_nve_group_node, NULL); + install_node (&bgp_vnc_l2_group_node, NULL); /* #endif */ install_node (&ospf_node, NULL); /* #ifdef HAVE_IPV6 */ install_node (&ripng_node, NULL); install_node (&ospf6_node, NULL); /* #endif */ + install_node (&ldp_node, NULL); + install_node (&ldp_ipv4_node, NULL); + install_node (&ldp_ipv6_node, NULL); + install_node (&ldp_ipv4_iface_node, NULL); + install_node (&ldp_ipv6_iface_node, NULL); + install_node (&ldp_l2vpn_node, NULL); + install_node (&ldp_pseudowire_node, NULL); install_node (&keychain_node, NULL); install_node (&keychain_key_node, NULL); install_node (&isis_node, NULL); install_node (&vty_node, NULL); vtysh_install_default (VIEW_NODE); - vtysh_install_default (ENABLE_NODE); vtysh_install_default (CONFIG_NODE); vtysh_install_default (BGP_NODE); vtysh_install_default (RIP_NODE); @@ -2906,9 +3162,21 @@ vtysh_init_vty (void) vtysh_install_default (BGP_IPV4M_NODE); vtysh_install_default (BGP_IPV6_NODE); vtysh_install_default (BGP_IPV6M_NODE); + /* #if ENABLE_BGP_VNC */ + vtysh_install_default (BGP_VNC_DEFAULTS_NODE); + vtysh_install_default (BGP_VNC_NVE_GROUP_NODE); + vtysh_install_default (BGP_VNC_L2_GROUP_NODE); + /* #endif */ vtysh_install_default (OSPF_NODE); vtysh_install_default (RIPNG_NODE); vtysh_install_default (OSPF6_NODE); + vtysh_install_default (LDP_NODE); + vtysh_install_default (LDP_IPV4_NODE); + vtysh_install_default (LDP_IPV6_NODE); + vtysh_install_default (LDP_IPV4_IFACE_NODE); + vtysh_install_default (LDP_IPV6_IFACE_NODE); + vtysh_install_default (LDP_L2VPN_NODE); + vtysh_install_default (LDP_PSEUDOWIRE_NODE); vtysh_install_default (ISIS_NODE); vtysh_install_default (KEYCHAIN_NODE); vtysh_install_default (KEYCHAIN_KEY_NODE); @@ -2922,8 +3190,6 @@ vtysh_init_vty (void) install_element (VIEW_NODE, &vtysh_exit_all_cmd); install_element (CONFIG_NODE, &vtysh_exit_all_cmd); /* install_element (CONFIG_NODE, &vtysh_quit_all_cmd); */ - install_element (ENABLE_NODE, &vtysh_exit_all_cmd); - install_element (ENABLE_NODE, &vtysh_quit_all_cmd); install_element (RIP_NODE, &vtysh_exit_ripd_cmd); install_element (RIP_NODE, &vtysh_quit_ripd_cmd); install_element (RIPNG_NODE, &vtysh_exit_ripngd_cmd); @@ -2932,6 +3198,20 @@ vtysh_init_vty (void) install_element (OSPF_NODE, &vtysh_quit_ospfd_cmd); install_element (OSPF6_NODE, &vtysh_exit_ospf6d_cmd); install_element (OSPF6_NODE, &vtysh_quit_ospf6d_cmd); + install_element (LDP_NODE, &vtysh_exit_ldpd_cmd); + install_element (LDP_NODE, &vtysh_quit_ldpd_cmd); + install_element (LDP_IPV4_NODE, &vtysh_exit_ldpd_cmd); + install_element (LDP_IPV4_NODE, &vtysh_quit_ldpd_cmd); + install_element (LDP_IPV6_NODE, &vtysh_exit_ldpd_cmd); + install_element (LDP_IPV6_NODE, &vtysh_quit_ldpd_cmd); + install_element (LDP_IPV4_IFACE_NODE, &vtysh_exit_ldpd_cmd); + install_element (LDP_IPV4_IFACE_NODE, &vtysh_quit_ldpd_cmd); + install_element (LDP_IPV6_IFACE_NODE, &vtysh_exit_ldpd_cmd); + install_element (LDP_IPV6_IFACE_NODE, &vtysh_quit_ldpd_cmd); + install_element (LDP_L2VPN_NODE, &vtysh_exit_ldpd_cmd); + install_element (LDP_L2VPN_NODE, &vtysh_quit_ldpd_cmd); + install_element (LDP_PSEUDOWIRE_NODE, &vtysh_exit_ldpd_cmd); + install_element (LDP_PSEUDOWIRE_NODE, &vtysh_quit_ldpd_cmd); install_element (BGP_NODE, &vtysh_exit_bgpd_cmd); install_element (BGP_NODE, &vtysh_quit_bgpd_cmd); install_element (BGP_VPNV4_NODE, &vtysh_exit_bgpd_cmd); @@ -2950,6 +3230,12 @@ vtysh_init_vty (void) install_element (BGP_IPV6_NODE, &vtysh_quit_bgpd_cmd); install_element (BGP_IPV6M_NODE, &vtysh_exit_bgpd_cmd); install_element (BGP_IPV6M_NODE, &vtysh_quit_bgpd_cmd); + install_element (BGP_VNC_DEFAULTS_NODE, &vtysh_exit_bgpd_cmd); + install_element (BGP_VNC_DEFAULTS_NODE, &vtysh_quit_bgpd_cmd); + install_element (BGP_VNC_NVE_GROUP_NODE, &vtysh_exit_bgpd_cmd); + install_element (BGP_VNC_NVE_GROUP_NODE, &vtysh_quit_bgpd_cmd); + install_element (BGP_VNC_L2_GROUP_NODE, &vtysh_exit_bgpd_cmd); + install_element (BGP_VNC_L2_GROUP_NODE, &vtysh_quit_bgpd_cmd); install_element (ISIS_NODE, &vtysh_exit_isisd_cmd); install_element (ISIS_NODE, &vtysh_quit_isisd_cmd); install_element (KEYCHAIN_NODE, &vtysh_exit_ripd_cmd); @@ -2968,6 +3254,13 @@ vtysh_init_vty (void) install_element (RIPNG_NODE, &vtysh_end_all_cmd); install_element (OSPF_NODE, &vtysh_end_all_cmd); install_element (OSPF6_NODE, &vtysh_end_all_cmd); + install_element (LDP_NODE, &vtysh_end_all_cmd); + install_element (LDP_IPV4_NODE, &vtysh_end_all_cmd); + install_element (LDP_IPV6_NODE, &vtysh_end_all_cmd); + install_element (LDP_IPV4_IFACE_NODE, &vtysh_end_all_cmd); + install_element (LDP_IPV6_IFACE_NODE, &vtysh_end_all_cmd); + install_element (LDP_L2VPN_NODE, &vtysh_end_all_cmd); + install_element (LDP_PSEUDOWIRE_NODE, &vtysh_end_all_cmd); install_element (BGP_NODE, &vtysh_end_all_cmd); install_element (BGP_IPV4_NODE, &vtysh_end_all_cmd); install_element (BGP_IPV4M_NODE, &vtysh_end_all_cmd); @@ -2977,6 +3270,9 @@ vtysh_init_vty (void) install_element (BGP_ENCAPV6_NODE, &vtysh_end_all_cmd); install_element (BGP_IPV6_NODE, &vtysh_end_all_cmd); install_element (BGP_IPV6M_NODE, &vtysh_end_all_cmd); + install_element (BGP_VNC_DEFAULTS_NODE, &vtysh_end_all_cmd); + install_element (BGP_VNC_NVE_GROUP_NODE, &vtysh_end_all_cmd); + install_element (BGP_VNC_L2_GROUP_NODE, &vtysh_end_all_cmd); install_element (ISIS_NODE, &vtysh_end_all_cmd); install_element (KEYCHAIN_NODE, &vtysh_end_all_cmd); install_element (KEYCHAIN_KEY_NODE, &vtysh_end_all_cmd); @@ -3007,6 +3303,13 @@ vtysh_init_vty (void) #ifdef HAVE_IPV6 install_element (CONFIG_NODE, &router_ospf6_cmd); #endif + install_element (CONFIG_NODE, &ldp_mpls_ldp_cmd); + install_element (LDP_NODE, &ldp_address_family_ipv4_cmd); + install_element (LDP_NODE, &ldp_address_family_ipv6_cmd); + install_element (LDP_IPV4_NODE, &ldp_interface_ifname_cmd); + install_element (LDP_IPV6_NODE, &ldp_interface_ifname_cmd); + install_element (CONFIG_NODE, &ldp_l2vpn_word_type_vpls_cmd); + install_element (LDP_L2VPN_NODE, &ldp_member_pseudowire_ifname_cmd); install_element (CONFIG_NODE, &router_isis_cmd); install_element (CONFIG_NODE, &router_bgp_cmd); install_element (BGP_NODE, &address_family_vpnv4_cmd); @@ -3015,6 +3318,8 @@ vtysh_init_vty (void) install_element (BGP_NODE, &address_family_vpnv6_unicast_cmd); install_element (BGP_NODE, &address_family_encap_cmd); install_element (BGP_NODE, &address_family_encapv6_cmd); + install_element (BGP_NODE, &vnc_defaults_cmd); + install_element (BGP_NODE, &vnc_nve_group_cmd); install_element (BGP_NODE, &address_family_ipv4_unicast_cmd); install_element (BGP_NODE, &address_family_ipv4_multicast_cmd); #ifdef HAVE_IPV6 @@ -3030,6 +3335,11 @@ vtysh_init_vty (void) install_element (BGP_IPV4M_NODE, &exit_address_family_cmd); install_element (BGP_IPV6_NODE, &exit_address_family_cmd); install_element (BGP_IPV6M_NODE, &exit_address_family_cmd); + + install_element (BGP_VNC_DEFAULTS_NODE, &exit_vnc_config_cmd); + install_element (BGP_VNC_NVE_GROUP_NODE, &exit_vnc_config_cmd); + install_element (BGP_VNC_L2_GROUP_NODE, &exit_vnc_config_cmd); + install_element (CONFIG_NODE, &key_chain_cmd); install_element (CONFIG_NODE, &route_map_cmd); install_element (CONFIG_NODE, &vtysh_line_vty_cmd); @@ -3056,11 +3366,8 @@ vtysh_init_vty (void) install_element (ENABLE_NODE, &vtysh_write_memory_cmd); install_element (VIEW_NODE, &vtysh_terminal_length_cmd); - install_element (ENABLE_NODE, &vtysh_terminal_length_cmd); install_element (VIEW_NODE, &vtysh_terminal_no_length_cmd); - install_element (ENABLE_NODE, &vtysh_terminal_no_length_cmd); install_element (VIEW_NODE, &vtysh_show_daemons_cmd); - install_element (ENABLE_NODE, &vtysh_show_daemons_cmd); install_element (VIEW_NODE, &vtysh_ping_cmd); install_element (VIEW_NODE, &vtysh_ping_ip_cmd); @@ -3074,37 +3381,21 @@ vtysh_init_vty (void) install_element (VIEW_NODE, &vtysh_telnet_cmd); install_element (VIEW_NODE, &vtysh_telnet_port_cmd); install_element (VIEW_NODE, &vtysh_ssh_cmd); -#endif - install_element (ENABLE_NODE, &vtysh_ping_cmd); - install_element (ENABLE_NODE, &vtysh_ping_ip_cmd); - install_element (ENABLE_NODE, &vtysh_traceroute_cmd); - install_element (ENABLE_NODE, &vtysh_traceroute_ip_cmd); -#ifdef HAVE_IPV6 - install_element (ENABLE_NODE, &vtysh_ping6_cmd); - install_element (ENABLE_NODE, &vtysh_traceroute6_cmd); #endif #if defined(HAVE_SHELL_ACCESS) - install_element (ENABLE_NODE, &vtysh_telnet_cmd); - install_element (ENABLE_NODE, &vtysh_telnet_port_cmd); - install_element (ENABLE_NODE, &vtysh_ssh_cmd); install_element (ENABLE_NODE, &vtysh_start_shell_cmd); install_element (ENABLE_NODE, &vtysh_start_bash_cmd); install_element (ENABLE_NODE, &vtysh_start_zsh_cmd); #endif install_element (VIEW_NODE, &vtysh_show_memory_cmd); - install_element (ENABLE_NODE, &vtysh_show_memory_cmd); install_element (VIEW_NODE, &vtysh_show_work_queues_cmd); - install_element (ENABLE_NODE, &vtysh_show_work_queues_cmd); - install_element (ENABLE_NODE, &vtysh_show_work_queues_daemon_cmd); install_element (VIEW_NODE, &vtysh_show_work_queues_daemon_cmd); install_element (VIEW_NODE, &vtysh_show_thread_cmd); - install_element (ENABLE_NODE, &vtysh_show_thread_cmd); /* Logging */ - install_element (ENABLE_NODE, &vtysh_show_logging_cmd); install_element (VIEW_NODE, &vtysh_show_logging_cmd); install_element (CONFIG_NODE, &vtysh_log_stdout_cmd); install_element (CONFIG_NODE, &vtysh_log_stdout_level_cmd); diff --git a/vtysh/vtysh.h b/vtysh/vtysh.h index 3aa2bad81e..3aa7b8dc83 100644 --- a/vtysh/vtysh.h +++ b/vtysh/vtysh.h @@ -33,10 +33,11 @@ DECLARE_MGROUP(MVTYSH) #define VTYSH_BGPD 0x20 #define VTYSH_ISISD 0x40 #define VTYSH_PIMD 0x100 +#define VTYSH_LDPD 0x200 -#define VTYSH_ALL VTYSH_ZEBRA|VTYSH_RIPD|VTYSH_RIPNGD|VTYSH_OSPFD|VTYSH_OSPF6D|VTYSH_BGPD|VTYSH_ISISD|VTYSH_PIMD +#define VTYSH_ALL VTYSH_ZEBRA|VTYSH_RIPD|VTYSH_RIPNGD|VTYSH_OSPFD|VTYSH_OSPF6D|VTYSH_LDPD|VTYSH_BGPD|VTYSH_ISISD|VTYSH_PIMD #define VTYSH_RMAP VTYSH_ZEBRA|VTYSH_RIPD|VTYSH_RIPNGD|VTYSH_OSPFD|VTYSH_OSPF6D|VTYSH_BGPD|VTYSH_PIMD -#define VTYSH_INTERFACE VTYSH_ZEBRA|VTYSH_RIPD|VTYSH_RIPNGD|VTYSH_OSPFD|VTYSH_OSPF6D|VTYSH_ISISD|VTYSH_PIMD +#define VTYSH_INTERFACE VTYSH_ZEBRA|VTYSH_RIPD|VTYSH_RIPNGD|VTYSH_OSPFD|VTYSH_OSPF6D|VTYSH_LDPD|VTYSH_ISISD|VTYSH_PIMD #define VTYSH_NS VTYSH_ZEBRA #define VTYSH_VRF VTYSH_ZEBRA @@ -44,6 +45,14 @@ DECLARE_MGROUP(MVTYSH) #define VTYSH_DEFAULT_CONFIG "vtysh.conf" #define QUAGGA_DEFAULT_CONFIG "Quagga.conf" +enum vtysh_write_integrated { + WRITE_INTEGRATED_UNSPECIFIED, + WRITE_INTEGRATED_NO, + WRITE_INTEGRATED_YES +}; + +extern enum vtysh_write_integrated vtysh_write_integrated; + void vtysh_init_vty (void); void vtysh_init_cmd (void); extern int vtysh_connect_all (const char *optional_daemon_name); diff --git a/vtysh/vtysh_config.c b/vtysh/vtysh_config.c index 760003eb3b..4b0a390843 100644 --- a/vtysh/vtysh_config.c +++ b/vtysh/vtysh_config.c @@ -33,8 +33,6 @@ DEFINE_MTYPE_STATIC(MVTYSH, VTYSH_CONFIG_LINE, "Vtysh configuration line") vector configvec; -extern int vtysh_writeconfig_integrated; - struct config { /* Configuration node name. */ @@ -173,10 +171,36 @@ vtysh_config_parse_line (const char *line) /* Store line to current configuration. */ if (config) { - if (config->index == RMAP_NODE || + if (strncmp (line, " address-family vpnv4", + strlen (" address-family vpnv4")) == 0) + config = config_get (BGP_VPNV4_NODE, line); + else if (strncmp (line, " address-family vpn6", + strlen (" address-family vpn6")) == 0) + config = config_get (BGP_VPNV6_NODE, line); + else if (strncmp (line, " address-family encapv6", + strlen (" address-family encapv6")) == 0) + config = config_get (BGP_ENCAPV6_NODE, line); + else if (strncmp (line, " address-family encap", + strlen (" address-family encap")) == 0) + config = config_get (BGP_ENCAP_NODE, line); + else if (strncmp (line, " address-family ipv4 multicast", + strlen (" address-family ipv4 multicast")) == 0) + config = config_get (BGP_IPV4M_NODE, line); + else if (strncmp (line, " address-family ipv6", + strlen (" address-family ipv6")) == 0) + config = config_get (BGP_IPV6_NODE, line); + else if (strncmp (line, " vnc defaults", + strlen (" vnc defaults")) == 0) + config = config_get (BGP_VNC_DEFAULTS_NODE, line); + else if (strncmp (line, " vnc nve-group", + strlen (" vnc nve-group")) == 0) + config = config_get (BGP_VNC_NVE_GROUP_NODE, line); + else if (strncmp (line, " vnc l2-group", + strlen (" vnc l2-group")) == 0) + config = config_get (BGP_VNC_L2_GROUP_NODE, line); + else if (config->index == RMAP_NODE || config->index == INTERFACE_NODE || config->index == NS_NODE || - config->index == VRF_NODE || config->index == VTY_NODE) config_add_line_uniq (config->line, line); else @@ -202,6 +226,10 @@ vtysh_config_parse_line (const char *line) config = config_get (OSPF_NODE, line); else if (strncmp (line, "router ospf6", strlen ("router ospf6")) == 0) config = config_get (OSPF6_NODE, line); + else if (strncmp (line, "mpls ldp", strlen ("mpls ldp")) == 0) + config = config_get (LDP_NODE, line); + else if (strncmp (line, "l2vpn", strlen ("l2vpn")) == 0) + config = config_get (LDP_L2VPN_NODE, line); else if (strncmp (line, "router bgp", strlen ("router bgp")) == 0) config = config_get (BGP_NODE, line); else if (strncmp (line, "router isis", strlen ("router isis")) == 0) @@ -256,6 +284,8 @@ vtysh_config_parse_line (const char *line) config = config_get (PROTOCOL_NODE, line); else if (strncmp (line, "ipv6 nht", strlen ("ipv6 nht")) == 0) config = config_get (PROTOCOL_NODE, line); + else if (strncmp (line, "mpls", strlen ("mpls")) == 0) + config = config_get (MPLS_NODE, line); else { if (strncmp (line, "log", strlen ("log")) == 0 @@ -300,7 +330,7 @@ vtysh_config_parse (char *line) || (I) == AS_LIST_NODE || (I) == COMMUNITY_LIST_NODE || \ (I) == ACCESS_IPV6_NODE || (I) == PREFIX_IPV6_NODE \ || (I) == SERVICE_NODE || (I) == FORWARDING_NODE || (I) == DEBUG_NODE \ - || (I) == AAA_NODE || (I) == VRF_DEBUG_NODE) + || (I) == AAA_NODE || (I) == VRF_DEBUG_NODE || (I) == MPLS_NODE) /* Display configuration to file pointer. */ void @@ -426,8 +456,10 @@ vtysh_config_write () sprintf (line, "hostname %s", host.name); vtysh_config_parse_line(line); } - if (!vtysh_writeconfig_integrated) + if (vtysh_write_integrated == WRITE_INTEGRATED_NO) vtysh_config_parse_line ("no service integrated-vtysh-config"); + if (vtysh_write_integrated == WRITE_INTEGRATED_YES) + vtysh_config_parse_line ("service integrated-vtysh-config"); user_config_write (); } diff --git a/zebra/Makefile.am b/zebra/Makefile.am index 32c94d3f0e..52766f37ba 100644 --- a/zebra/Makefile.am +++ b/zebra/Makefile.am @@ -1,6 +1,9 @@ +include ../common.am + ## Process this file with automake to produce Makefile.in. -AM_CPPFLAGS = -I.. -I$(top_srcdir) -I$(top_srcdir)/lib -I$(top_builddir)/lib +AM_CPPFLAGS = -I.. -I$(top_srcdir) -I$(top_srcdir)/lib -I$(top_builddir)/lib \ + -DVTY_DEPRECATE_INDEX DEFS = @DEFS@ -DSYSCONFDIR=\"$(sysconfdir)/\" INSTALL_SDATA=@INSTALL@ -m 600 @@ -12,14 +15,23 @@ rt_method = @RT_METHOD@ rtread_method = @RTREAD_METHOD@ kernel_method = @KERNEL_METHOD@ ioctl_method = @IOCTL_METHOD@ +mpls_method = @MPLS_METHOD@ otherobj = $(ioctl_method) $(ipforward) $(if_method) \ - $(rt_method) $(rtread_method) $(kernel_method) + $(rt_method) $(rtread_method) $(kernel_method) $(mpls_method) if HAVE_NETLINK othersrc = zebra_fpm_netlink.c endif +if HAVE_PROTOBUF +protobuf_srcs = zebra_fpm_protobuf.c +endif + +if DEV_BUILD +dev_srcs = zebra_fpm_dt.c +endif + AM_CFLAGS = $(WERROR) sbin_PROGRAMS = zebra @@ -32,13 +44,15 @@ zebra_SOURCES = \ redistribute.c debug.c rtadv.c zebra_snmp.c zebra_vty.c \ irdp_main.c irdp_interface.c irdp_packet.c router-id.c zebra_fpm.c \ $(othersrc) zebra_ptm.c zebra_rnh.c zebra_ptm_redistribute.c \ - zebra_ns.c zebra_vrf.c zebra_static.c + zebra_ns.c zebra_vrf.c zebra_static.c zebra_mpls.c zebra_mpls_vty.c \ + $(protobuf_srcs) \ + $(dev_srcs) testzebra_SOURCES = test_main.c zebra_rib.c interface.c connected.c debug.c \ zebra_vty.c zebra_ptm.c zebra_routemap.c zebra_ns.c zebra_vrf.c \ kernel_null.c redistribute_null.c ioctl_null.c misc_null.c zebra_rnh_null.c \ zebra_ptm_null.c rtadv_null.c if_null.c zserv_null.c zebra_static.c \ - zebra_memory.c + zebra_memory.c zebra_mpls.c zebra_mpls_vty.c zebra_mpls_null.c noinst_HEADERS = \ zebra_memory.h \ @@ -46,9 +60,9 @@ noinst_HEADERS = \ interface.h ipforward.h irdp.h router-id.h kernel_socket.h \ rt_netlink.h zebra_fpm.h zebra_fpm_private.h zebra_rnh.h \ zebra_ptm_redistribute.h zebra_ptm.h zebra_routemap.h \ - zebra_ns.h zebra_vrf.h ioctl_solaris.h zebra_static.h + zebra_ns.h zebra_vrf.h ioctl_solaris.h zebra_static.h zebra_mpls.h -zebra_LDADD = $(otherobj) ../lib/libzebra.la $(LIBCAP) +zebra_LDADD = $(otherobj) ../lib/libzebra.la $(LIBCAP) $(Q_FPM_PB_CLIENT_LDOPTS) testzebra_LDADD = ../lib/libzebra.la $(LIBCAP) @@ -60,6 +74,7 @@ EXTRA_DIST = if_ioctl.c if_ioctl_solaris.c if_netlink.c \ rt_socket.c rtread_netlink.c rtread_sysctl.c \ rtread_getmsg.c kernel_socket.c kernel_netlink.c \ ioctl.c ioctl_solaris.c \ + zebra_mpls_netlink.c zebra_mpls_openbsd.c \ GNOME-SMI GNOME-PRODUCT-ZEBRA-MIB client : client_main.o ../lib/libzebra.la diff --git a/zebra/connected.c b/zebra/connected.c index 97eb79d573..bc54aab01c 100644 --- a/zebra/connected.c +++ b/zebra/connected.c @@ -39,6 +39,8 @@ #include "zebra/interface.h" #include "zebra/connected.h" #include "zebra/rtadv.h" +#include "zebra/zebra_mpls.h" +#include "zebra/debug.h" /* communicate the withdrawal of a connected address */ static void @@ -214,6 +216,15 @@ connected_up_ipv4 (struct interface *ifp, struct connected *ifc) zlog_debug ("%u: IF %s IPv4 address add/up, scheduling RIB processing", ifp->vrf_id, ifp->name); rib_update (ifp->vrf_id, RIB_UPDATE_IF_CHANGE); + + /* Schedule LSP forwarding entries for processing, if appropriate. */ + if (ifp->vrf_id == VRF_DEFAULT) + { + if (IS_ZEBRA_DEBUG_MPLS) + zlog_debug ("%u: IF %s IPv4 address add/up, scheduling MPLS processing", + ifp->vrf_id, ifp->name); + mpls_mark_lsps_for_processing (vrf_info_lookup(ifp->vrf_id)); + } } /* Add connected IPv4 route to the interface. */ @@ -334,6 +345,15 @@ connected_down_ipv4 (struct interface *ifp, struct connected *ifc) ifp->vrf_id, ifp->name); rib_update (ifp->vrf_id, RIB_UPDATE_IF_CHANGE); + + /* Schedule LSP forwarding entries for processing, if appropriate. */ + if (ifp->vrf_id == VRF_DEFAULT) + { + if (IS_ZEBRA_DEBUG_MPLS) + zlog_debug ("%u: IF %s IPv4 address add/up, scheduling MPLS processing", + ifp->vrf_id, ifp->name); + mpls_mark_lsps_for_processing (vrf_info_lookup(ifp->vrf_id)); + } } /* Delete connected IPv4 route to the interface. */ @@ -360,6 +380,15 @@ connected_delete_ipv4 (struct interface *ifp, int flags, struct in_addr *addr, ifp->vrf_id, ifp->name); rib_update (ifp->vrf_id, RIB_UPDATE_IF_CHANGE); + + /* Schedule LSP forwarding entries for processing, if appropriate. */ + if (ifp->vrf_id == VRF_DEFAULT) + { + if (IS_ZEBRA_DEBUG_MPLS) + zlog_debug ("%u: IF %s IPv4 address add/up, scheduling MPLS processing", + ifp->vrf_id, ifp->name); + mpls_mark_lsps_for_processing (vrf_info_lookup(ifp->vrf_id)); + } } void @@ -390,6 +419,15 @@ connected_up_ipv6 (struct interface *ifp, struct connected *ifc) ifp->vrf_id, ifp->name); rib_update (ifp->vrf_id, RIB_UPDATE_IF_CHANGE); + + /* Schedule LSP forwarding entries for processing, if appropriate. */ + if (ifp->vrf_id == VRF_DEFAULT) + { + if (IS_ZEBRA_DEBUG_MPLS) + zlog_debug ("%u: IF %s IPv4 address add/up, scheduling MPLS processing", + ifp->vrf_id, ifp->name); + mpls_mark_lsps_for_processing (vrf_info_lookup(ifp->vrf_id)); + } } /* Add connected IPv6 route to the interface. */ @@ -480,6 +518,15 @@ connected_down_ipv6 (struct interface *ifp, struct connected *ifc) ifp->vrf_id, ifp->name); rib_update (ifp->vrf_id, RIB_UPDATE_IF_CHANGE); + + /* Schedule LSP forwarding entries for processing, if appropriate. */ + if (ifp->vrf_id == VRF_DEFAULT) + { + if (IS_ZEBRA_DEBUG_MPLS) + zlog_debug ("%u: IF %s IPv4 address add/up, scheduling MPLS processing", + ifp->vrf_id, ifp->name); + mpls_mark_lsps_for_processing (vrf_info_lookup(ifp->vrf_id)); + } } void @@ -505,6 +552,15 @@ connected_delete_ipv6 (struct interface *ifp, struct in6_addr *address, ifp->vrf_id, ifp->name); rib_update (ifp->vrf_id, RIB_UPDATE_IF_CHANGE); + + /* Schedule LSP forwarding entries for processing, if appropriate. */ + if (ifp->vrf_id == VRF_DEFAULT) + { + if (IS_ZEBRA_DEBUG_MPLS) + zlog_debug ("%u: IF %s IPv4 address add/up, scheduling MPLS processing", + ifp->vrf_id, ifp->name); + mpls_mark_lsps_for_processing (vrf_info_lookup(ifp->vrf_id)); + } } int diff --git a/zebra/debug.c b/zebra/debug.c index 6acf32b43d..65ae3fd174 100644 --- a/zebra/debug.c +++ b/zebra/debug.c @@ -31,6 +31,7 @@ unsigned long zebra_debug_kernel; unsigned long zebra_debug_rib; unsigned long zebra_debug_fpm; unsigned long zebra_debug_nht; +unsigned long zebra_debug_mpls; DEFUN (show_debugging_zebra, show_debugging_zebra_cmd, @@ -82,6 +83,8 @@ DEFUN (show_debugging_zebra, vty_out (vty, " Zebra FPM debugging is on%s", VTY_NEWLINE); if (IS_ZEBRA_DEBUG_NHT) vty_out (vty, " Zebra next-hop tracking debugging is on%s", VTY_NEWLINE); + if (IS_ZEBRA_DEBUG_MPLS) + vty_out (vty, " Zebra MPLS debugging is on%s", VTY_NEWLINE); return CMD_SUCCESS; } @@ -108,6 +111,17 @@ DEFUN (debug_zebra_nht, return CMD_WARNING; } +DEFUN (debug_zebra_mpls, + debug_zebra_mpls_cmd, + "debug zebra mpls", + DEBUG_STR + "Zebra configuration\n" + "Debug option set for zebra MPLS LSPs\n") +{ + zebra_debug_mpls = ZEBRA_DEBUG_MPLS; + return CMD_WARNING; +} + DEFUN (debug_zebra_packet, debug_zebra_packet_cmd, "debug zebra packet", @@ -248,6 +262,18 @@ DEFUN (no_debug_zebra_nht, return CMD_SUCCESS; } +DEFUN (no_debug_zebra_mpls, + no_debug_zebra_mpls_cmd, + "no debug zebra mpls", + NO_STR + DEBUG_STR + "Zebra configuration\n" + "Debug option set for zebra MPLS LSPs\n") +{ + zebra_debug_mpls = 0; + return CMD_SUCCESS; +} + DEFUN (no_debug_zebra_packet, no_debug_zebra_packet_cmd, "no debug zebra packet", @@ -406,6 +432,11 @@ config_write_debug (struct vty *vty) vty_out (vty, "debug zebra fpm%s", VTY_NEWLINE); write++; } + if (IS_ZEBRA_DEBUG_MPLS) + { + vty_out (vty, "debug zebra mpls%s", VTY_NEWLINE); + write++; + } return write; } @@ -417,14 +448,15 @@ zebra_debug_init (void) zebra_debug_kernel = 0; zebra_debug_rib = 0; zebra_debug_fpm = 0; + zebra_debug_mpls = 0; install_node (&debug_node, config_write_debug); install_element (VIEW_NODE, &show_debugging_zebra_cmd); - install_element (ENABLE_NODE, &show_debugging_zebra_cmd); install_element (ENABLE_NODE, &debug_zebra_events_cmd); install_element (ENABLE_NODE, &debug_zebra_nht_cmd); + install_element (ENABLE_NODE, &debug_zebra_mpls_cmd); install_element (ENABLE_NODE, &debug_zebra_packet_cmd); install_element (ENABLE_NODE, &debug_zebra_packet_direct_cmd); install_element (ENABLE_NODE, &debug_zebra_packet_detail_cmd); @@ -435,6 +467,7 @@ zebra_debug_init (void) install_element (ENABLE_NODE, &debug_zebra_fpm_cmd); install_element (ENABLE_NODE, &no_debug_zebra_events_cmd); install_element (ENABLE_NODE, &no_debug_zebra_nht_cmd); + install_element (ENABLE_NODE, &no_debug_zebra_mpls_cmd); install_element (ENABLE_NODE, &no_debug_zebra_packet_cmd); install_element (ENABLE_NODE, &no_debug_zebra_kernel_cmd); install_element (ENABLE_NODE, &no_debug_zebra_kernel_msgdump_cmd); @@ -444,6 +477,7 @@ zebra_debug_init (void) install_element (CONFIG_NODE, &debug_zebra_events_cmd); install_element (CONFIG_NODE, &debug_zebra_nht_cmd); + install_element (CONFIG_NODE, &debug_zebra_mpls_cmd); install_element (CONFIG_NODE, &debug_zebra_packet_cmd); install_element (CONFIG_NODE, &debug_zebra_packet_direct_cmd); install_element (CONFIG_NODE, &debug_zebra_packet_detail_cmd); @@ -454,6 +488,7 @@ zebra_debug_init (void) install_element (CONFIG_NODE, &debug_zebra_fpm_cmd); install_element (CONFIG_NODE, &no_debug_zebra_events_cmd); install_element (CONFIG_NODE, &no_debug_zebra_nht_cmd); + install_element (CONFIG_NODE, &no_debug_zebra_mpls_cmd); install_element (CONFIG_NODE, &no_debug_zebra_packet_cmd); install_element (CONFIG_NODE, &no_debug_zebra_kernel_cmd); install_element (CONFIG_NODE, &no_debug_zebra_kernel_msgdump_cmd); diff --git a/zebra/debug.h b/zebra/debug.h index 4416068bf2..f8ebf3d616 100644 --- a/zebra/debug.h +++ b/zebra/debug.h @@ -41,6 +41,8 @@ #define ZEBRA_DEBUG_FPM 0x01 #define ZEBRA_DEBUG_NHT 0x01 +#define ZEBRA_DEBUG_MPLS 0x01 + /* Debug related macro. */ #define IS_ZEBRA_DEBUG_EVENT (zebra_debug_event & ZEBRA_DEBUG_EVENT) @@ -61,6 +63,7 @@ #define IS_ZEBRA_DEBUG_FPM (zebra_debug_fpm & ZEBRA_DEBUG_FPM) #define IS_ZEBRA_DEBUG_NHT (zebra_debug_nht & ZEBRA_DEBUG_NHT) +#define IS_ZEBRA_DEBUG_MPLS (zebra_debug_mpls & ZEBRA_DEBUG_MPLS) extern unsigned long zebra_debug_event; extern unsigned long zebra_debug_packet; @@ -68,6 +71,7 @@ extern unsigned long zebra_debug_kernel; extern unsigned long zebra_debug_rib; extern unsigned long zebra_debug_fpm; extern unsigned long zebra_debug_nht; +extern unsigned long zebra_debug_mpls; extern void zebra_debug_init (void); diff --git a/zebra/if_ioctl.c b/zebra/if_ioctl.c index 101529c321..5b7b5863e5 100644 --- a/zebra/if_ioctl.c +++ b/zebra/if_ioctl.c @@ -31,6 +31,7 @@ #include "zebra_memory.h" #include "log.h" #include "vrf.h" +#include "vty.h" #include "zebra/interface.h" #include "zebra/rib.h" diff --git a/zebra/if_ioctl_solaris.c b/zebra/if_ioctl_solaris.c index 45a45f3e81..0e727b9dc4 100644 --- a/zebra/if_ioctl_solaris.c +++ b/zebra/if_ioctl_solaris.c @@ -32,6 +32,7 @@ #include "log.h" #include "privs.h" #include "vrf.h" +#include "vty.h" #include "zebra/interface.h" #include "zebra/ioctl_solaris.h" diff --git a/zebra/interface.c b/zebra/interface.c index 62340a460a..bbe19fbaf4 100644 --- a/zebra/interface.c +++ b/zebra/interface.c @@ -1233,13 +1233,12 @@ DEFUN_NOSH (zebra_interface, "Interface's name\n") { int ret; - struct interface *ifp; /* Call lib interface() */ if ((ret = interface_cmd.func (self, vty, argc, argv)) != CMD_SUCCESS) return ret; - ifp = vty->index; + VTY_DECLVAR_CONTEXT (interface, ifp); if (ifp->ifindex == IFINDEX_INTERNAL) /* Is this really necessary? Shouldn't status be initialized to 0 @@ -1279,14 +1278,13 @@ DEFUN_NOSH (zebra_vrf, "Select a VRF to configure\n" "VRF's name\n") { + // VTY_DECLVAR_CONTEXT (vrf, vrfp); int ret; /* Call lib vrf() */ if ((ret = vrf_cmd.func (self, vty, argc, argv)) != CMD_SUCCESS) return ret; - // vrfp = vty->index; - return ret; } @@ -1500,11 +1498,10 @@ DEFUN (multicast, "multicast", "Set multicast flag to interface\n") { + VTY_DECLVAR_CONTEXT (interface, ifp); int ret; - struct interface *ifp; struct zebra_if *if_data; - ifp = (struct interface *) vty->index; if (CHECK_FLAG (ifp->status, ZEBRA_INTERFACE_ACTIVE)) { ret = if_set_flags (ifp, IFF_MULTICAST); @@ -1527,11 +1524,10 @@ DEFUN (no_multicast, NO_STR "Unset multicast flag to interface\n") { + VTY_DECLVAR_CONTEXT (interface, ifp); int ret; - struct interface *ifp; struct zebra_if *if_data; - ifp = (struct interface *) vty->index; if (CHECK_FLAG (ifp->status, ZEBRA_INTERFACE_ACTIVE)) { ret = if_unset_flags (ifp, IFF_MULTICAST); @@ -1553,10 +1549,9 @@ DEFUN (linkdetect, "link-detect", "Enable link detection on interface\n") { - struct interface *ifp; + VTY_DECLVAR_CONTEXT (interface, ifp); int if_was_operative; - ifp = (struct interface *) vty->index; if_was_operative = if_is_no_ptm_operative(ifp); SET_FLAG(ifp->status, ZEBRA_INTERFACE_LINKDETECTION); @@ -1576,10 +1571,9 @@ DEFUN (no_linkdetect, NO_STR "Disable link detection on interface\n") { - struct interface *ifp; + VTY_DECLVAR_CONTEXT (interface, ifp); int if_was_operative; - ifp = (struct interface *) vty->index; if_was_operative = if_is_no_ptm_operative(ifp); UNSET_FLAG(ifp->status, ZEBRA_INTERFACE_LINKDETECTION); @@ -1596,11 +1590,10 @@ DEFUN (shutdown_if, "shutdown", "Shutdown the selected interface\n") { + VTY_DECLVAR_CONTEXT (interface, ifp); int ret; - struct interface *ifp; struct zebra_if *if_data; - ifp = (struct interface *) vty->index; if (ifp->ifindex != IFINDEX_INTERNAL) { ret = if_unset_flags (ifp, IFF_UP); @@ -1623,12 +1616,10 @@ DEFUN (no_shutdown_if, NO_STR "Shutdown the selected interface\n") { + VTY_DECLVAR_CONTEXT (interface, ifp); int ret; - struct interface *ifp; struct zebra_if *if_data; - ifp = (struct interface *) vty->index; - if (ifp->ifindex != IFINDEX_INTERNAL) { ret = if_set_flags (ifp, IFF_UP | IFF_RUNNING); @@ -1658,10 +1649,9 @@ DEFUN (bandwidth_if, "Bandwidth in megabits\n") { int idx_number = 1; - struct interface *ifp; + VTY_DECLVAR_CONTEXT (interface, ifp); unsigned int bandwidth; - ifp = (struct interface *) vty->index; bandwidth = strtol(argv[idx_number]->arg, NULL, 10); /* bandwidth range is <1-100000> */ @@ -1687,9 +1677,7 @@ DEFUN (no_bandwidth_if, "Set bandwidth informational parameter\n" "Bandwidth in megabits\n") { - struct interface *ifp; - - ifp = (struct interface *) vty->index; + VTY_DECLVAR_CONTEXT (interface, ifp); ifp->bandwidth = 0; @@ -1757,6 +1745,7 @@ DEFUN (link_params, "link-params", LINK_PARAMS_STR) { + /* vty->qobj_index stays the same @ interface pointer */ vty->node = LINK_PARAMS_NODE; return CMD_SUCCESS; @@ -1768,7 +1757,7 @@ DEFUN (link_params_enable, "enable", "Activate link parameters on this interface\n") { - struct interface *ifp = (struct interface *) vty->index; + VTY_DECLVAR_CONTEXT (interface, ifp); /* This command could be issue at startup, when activate MPLS TE */ /* on a new interface or after a ON / OFF / ON toggle */ @@ -1797,7 +1786,7 @@ DEFUN (no_link_params_enable, NO_STR "Disable link parameters on this interface\n") { - struct interface *ifp = (struct interface *) vty->index; + VTY_DECLVAR_CONTEXT (interface, ifp); zlog_debug ("MPLS-TE: disable TE link parameters on interface %s", ifp->name); @@ -1818,7 +1807,7 @@ DEFUN (link_params_metric, "Metric value in decimal\n") { int idx_number = 1; - struct interface *ifp = (struct interface *) vty->index; + VTY_DECLVAR_CONTEXT (interface, ifp); struct if_link_params *iflp = if_link_params_get (ifp); u_int32_t metric; @@ -1834,9 +1823,9 @@ DEFUN (no_link_params_metric, no_link_params_metric_cmd, "no metric", NO_STR - "Disbale Link Metric on this interface\n") + "Disable Link Metric on this interface\n") { - struct interface *ifp = (struct interface *) vty->index; + VTY_DECLVAR_CONTEXT (interface, ifp); /* Unset TE Metric */ link_param_cmd_unset(ifp, LP_TE); @@ -1851,7 +1840,7 @@ DEFUN (link_params_maxbw, "Bytes/second (IEEE floating point format)\n") { int idx_bandwidth = 1; - struct interface *ifp = (struct interface *) vty->index; + VTY_DECLVAR_CONTEXT (interface, ifp); struct if_link_params *iflp = if_link_params_get (ifp); float bw; @@ -1896,7 +1885,7 @@ DEFUN (link_params_max_rsv_bw, "Bytes/second (IEEE floating point format)\n") { int idx_bandwidth = 1; - struct interface *ifp = (struct interface *) vty->index; + VTY_DECLVAR_CONTEXT (interface, ifp); struct if_link_params *iflp = if_link_params_get (ifp); float bw; @@ -1931,7 +1920,7 @@ DEFUN (link_params_unrsv_bw, { int idx_number = 1; int idx_bandwidth = 2; - struct interface *ifp = (struct interface *) vty->index; + VTY_DECLVAR_CONTEXT (interface, ifp); struct if_link_params *iflp = if_link_params_get (ifp); int priority; float bw; @@ -1973,7 +1962,7 @@ DEFUN (link_params_admin_grp, "32-bit Hexadecimal value (e.g. 0xa1)\n") { int idx_bitpattern = 1; - struct interface *ifp = (struct interface *) vty->index; + VTY_DECLVAR_CONTEXT (interface, ifp); struct if_link_params *iflp = if_link_params_get (ifp); unsigned long value; @@ -1994,9 +1983,9 @@ DEFUN (no_link_params_admin_grp, no_link_params_admin_grp_cmd, "no admin-grp", NO_STR - "Disbale Administrative group membership on this interface\n") + "Disable Administrative group membership on this interface\n") { - struct interface *ifp = (struct interface *) vty->index; + VTY_DECLVAR_CONTEXT (interface, ifp); /* Unset Admin Group */ link_param_cmd_unset(ifp, LP_ADM_GRP); @@ -2016,7 +2005,7 @@ DEFUN (link_params_inter_as, int idx_ipv4 = 1; int idx_number = 3; - struct interface *ifp = (struct interface *) vty->index; + VTY_DECLVAR_CONTEXT (interface, ifp); struct if_link_params *iflp = if_link_params_get (ifp); struct in_addr addr; u_int32_t as; @@ -2052,8 +2041,7 @@ DEFUN (no_link_params_inter_as, NO_STR "Remove Neighbor IP address and AS number for Inter-AS TE\n") { - - struct interface *ifp = (struct interface *) vty->index; + VTY_DECLVAR_CONTEXT (interface, ifp); struct if_link_params *iflp = if_link_params_get (ifp); /* Reset Remote IP and AS neighbor */ @@ -2088,7 +2076,7 @@ DEFUN (link_params_delay, VTY_GET_ULONG("maximum delay", high, argv[5]->arg); } - struct interface *ifp = (struct interface *) vty->index; + VTY_DECLVAR_CONTEXT (interface, ifp); struct if_link_params *iflp = if_link_params_get (ifp); u_int8_t update = 0; @@ -2154,9 +2142,9 @@ DEFUN (no_link_params_delay, no_link_params_delay_cmd, "no delay", NO_STR - "Disbale Unidirectional Average, Min & Max Link Delay on this interface\n") + "Disable Unidirectional Average, Min & Max Link Delay on this interface\n") { - struct interface *ifp = (struct interface *) vty->index; + VTY_DECLVAR_CONTEXT (interface, ifp); struct if_link_params *iflp = if_link_params_get (ifp); /* Unset Delays */ @@ -2180,7 +2168,7 @@ DEFUN (link_params_delay_var, "delay variation in micro-second as decimal (0...16777215)\n") { int idx_number = 1; - struct interface *ifp = (struct interface *) vty->index; + VTY_DECLVAR_CONTEXT (interface, ifp); struct if_link_params *iflp = if_link_params_get (ifp); u_int32_t value; @@ -2196,9 +2184,9 @@ DEFUN (no_link_params_delay_var, no_link_params_delay_var_cmd, "no delay-variation", NO_STR - "Disbale Unidirectional Delay Variation on this interface\n") + "Disable Unidirectional Delay Variation on this interface\n") { - struct interface *ifp = (struct interface *) vty->index; + VTY_DECLVAR_CONTEXT (interface, ifp); /* Unset Delay Variation */ link_param_cmd_unset(ifp, LP_DELAY_VAR); @@ -2213,7 +2201,7 @@ DEFUN (link_params_pkt_loss, "percentage of total traffic by 0.000003% step and less than 50.331642%\n") { int idx_percentage = 1; - struct interface *ifp = (struct interface *) vty->index; + VTY_DECLVAR_CONTEXT (interface, ifp); struct if_link_params *iflp = if_link_params_get (ifp); float fval; @@ -2237,9 +2225,9 @@ DEFUN (no_link_params_pkt_loss, no_link_params_pkt_loss_cmd, "no packet-loss", NO_STR - "Disbale Unidirectional Link Packet Loss on this interface\n") + "Disable Unidirectional Link Packet Loss on this interface\n") { - struct interface *ifp = (struct interface *) vty->index; + VTY_DECLVAR_CONTEXT (interface, ifp); /* Unset Packet Loss */ link_param_cmd_unset(ifp, LP_PKT_LOSS); @@ -2254,7 +2242,7 @@ DEFUN (link_params_res_bw, "Bytes/second (IEEE floating point format)\n") { int idx_bandwidth = 1; - struct interface *ifp = (struct interface *) vty->index; + VTY_DECLVAR_CONTEXT (interface, ifp); struct if_link_params *iflp = if_link_params_get (ifp); float bw; @@ -2284,9 +2272,9 @@ DEFUN (no_link_params_res_bw, no_link_params_res_bw_cmd, "no res-bw", NO_STR - "Disbale Unidirectional Residual Bandwidth on this interface\n") + "Disable Unidirectional Residual Bandwidth on this interface\n") { - struct interface *ifp = (struct interface *) vty->index; + VTY_DECLVAR_CONTEXT (interface, ifp); /* Unset Residual Bandwidth */ link_param_cmd_unset(ifp, LP_RES_BW); @@ -2301,7 +2289,7 @@ DEFUN (link_params_ava_bw, "Bytes/second (IEEE floating point format)\n") { int idx_bandwidth = 1; - struct interface *ifp = (struct interface *) vty->index; + VTY_DECLVAR_CONTEXT (interface, ifp); struct if_link_params *iflp = if_link_params_get (ifp); float bw; @@ -2331,9 +2319,9 @@ DEFUN (no_link_params_ava_bw, no_link_params_ava_bw_cmd, "no ava-bw", NO_STR - "Disbale Unidirectional Available Bandwidth on this interface\n") + "Disable Unidirectional Available Bandwidth on this interface\n") { - struct interface *ifp = (struct interface *) vty->index; + VTY_DECLVAR_CONTEXT (interface, ifp); /* Unset Available Bandwidth */ link_param_cmd_unset(ifp, LP_AVA_BW); @@ -2348,7 +2336,7 @@ DEFUN (link_params_use_bw, "Bytes/second (IEEE floating point format)\n") { int idx_bandwidth = 1; - struct interface *ifp = (struct interface *) vty->index; + VTY_DECLVAR_CONTEXT (interface, ifp); struct if_link_params *iflp = if_link_params_get (ifp); float bw; @@ -2378,9 +2366,9 @@ DEFUN (no_link_params_use_bw, no_link_params_use_bw_cmd, "no use-bw", NO_STR - "Disbale Unidirectional Utilised Bandwidth on this interface\n") + "Disable Unidirectional Utilised Bandwidth on this interface\n") { - struct interface *ifp = (struct interface *) vty->index; + VTY_DECLVAR_CONTEXT (interface, ifp); /* Unset Utilised Bandwidth */ link_param_cmd_unset(ifp, LP_USE_BW); @@ -2537,7 +2525,8 @@ DEFUN (ip_address, "IP address (e.g. 10.0.0.1/8)\n") { int idx_ipv4_prefixlen = 2; - return ip_address_install (vty, vty->index, argv[idx_ipv4_prefixlen]->arg, NULL, NULL); + VTY_DECLVAR_CONTEXT (interface, ifp); + return ip_address_install (vty, ifp, argv[idx_ipv4_prefixlen]->arg, NULL, NULL); } DEFUN (no_ip_address, @@ -2549,7 +2538,8 @@ DEFUN (no_ip_address, "IP Address (e.g. 10.0.0.1/8)") { int idx_ipv4_prefixlen = 3; - return ip_address_uninstall (vty, vty->index, argv[idx_ipv4_prefixlen]->arg, NULL, NULL); + VTY_DECLVAR_CONTEXT (interface, ifp); + return ip_address_uninstall (vty, ifp, argv[idx_ipv4_prefixlen]->arg, NULL, NULL); } @@ -2565,7 +2555,8 @@ DEFUN (ip_address_label, { int idx_ipv4_prefixlen = 2; int idx_line = 4; - return ip_address_install (vty, vty->index, argv[idx_ipv4_prefixlen]->arg, NULL, argv[idx_line]->arg); + VTY_DECLVAR_CONTEXT (interface, ifp); + return ip_address_install (vty, ifp, argv[idx_ipv4_prefixlen]->arg, NULL, argv[idx_line]->arg); } DEFUN (no_ip_address_label, @@ -2580,7 +2571,8 @@ DEFUN (no_ip_address_label, { int idx_ipv4_prefixlen = 3; int idx_line = 5; - return ip_address_uninstall (vty, vty->index, argv[idx_ipv4_prefixlen]->arg, NULL, argv[idx_line]->arg); + VTY_DECLVAR_CONTEXT (interface, ifp); + return ip_address_uninstall (vty, ifp, argv[idx_ipv4_prefixlen]->arg, NULL, argv[idx_line]->arg); } #endif /* HAVE_NETLINK */ @@ -2744,7 +2736,8 @@ DEFUN (ipv6_address, "IPv6 address (e.g. 3ffe:506::1/48)\n") { int idx_ipv6_prefixlen = 2; - return ipv6_address_install (vty, vty->index, argv[idx_ipv6_prefixlen]->arg, NULL, NULL, 0); + VTY_DECLVAR_CONTEXT (interface, ifp); + return ipv6_address_install (vty, ifp, argv[idx_ipv6_prefixlen]->arg, NULL, NULL, 0); } DEFUN (no_ipv6_address, @@ -2756,7 +2749,8 @@ DEFUN (no_ipv6_address, "IPv6 address (e.g. 3ffe:506::1/48)\n") { int idx_ipv6_prefixlen = 3; - return ipv6_address_uninstall (vty, vty->index, argv[idx_ipv6_prefixlen]->arg, NULL, NULL, 0); + VTY_DECLVAR_CONTEXT (interface, ifp); + return ipv6_address_uninstall (vty, ifp, argv[idx_ipv6_prefixlen]->arg, NULL, NULL, 0); } #endif /* HAVE_IPV6 */ @@ -2933,10 +2927,7 @@ zebra_if_init (void) install_element (VIEW_NODE, &show_interface_vrf_all_cmd); install_element (VIEW_NODE, &show_interface_name_vrf_cmd); install_element (VIEW_NODE, &show_interface_name_vrf_all_cmd); - install_element (ENABLE_NODE, &show_interface_cmd); - install_element (ENABLE_NODE, &show_interface_vrf_all_cmd); - install_element (ENABLE_NODE, &show_interface_name_vrf_cmd); - install_element (ENABLE_NODE, &show_interface_name_vrf_all_cmd); + install_element (ENABLE_NODE, &show_interface_desc_cmd); install_element (ENABLE_NODE, &show_interface_desc_vrf_all_cmd); install_element (CONFIG_NODE, &zebra_interface_cmd); diff --git a/zebra/ioctl_solaris.c b/zebra/ioctl_solaris.c index 12737cbf06..b5bf1ccb0a 100644 --- a/zebra/ioctl_solaris.c +++ b/zebra/ioctl_solaris.c @@ -28,6 +28,7 @@ #include "ioctl.h" #include "log.h" #include "privs.h" +#include "vty.h" #include "zebra/rib.h" #include "zebra/rt.h" diff --git a/zebra/irdp_interface.c b/zebra/irdp_interface.c index 2f741380f5..3e244f5af3 100644 --- a/zebra/irdp_interface.c +++ b/zebra/irdp_interface.c @@ -18,17 +18,17 @@ * You should have received a copy of the GNU General Public License * along with GNU Zebra; see the file COPYING. If not, write to the Free * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA - * 02111-1307, USA. + * 02111-1307, USA. */ -/* +/* * This work includes work with the following copywrite: * * Copyright (C) 1997, 2000 Kunihiro Ishiguro * */ -/* +/* * Thanks to Jens Låås at Swedish University of Agricultural Sciences * for reviewing and tests. */ @@ -36,7 +36,7 @@ #include -#ifdef HAVE_IRDP +#ifdef HAVE_IRDP #include "if.h" #include "vty.h" @@ -81,7 +81,7 @@ irdp_get_prefix(struct interface *ifp) { struct listnode *node; struct connected *ifc; - + if (ifp->connected) for (ALL_LIST_ELEMENTS_RO (ifp->connected, node, ifc)) return ifc->address; @@ -91,9 +91,9 @@ irdp_get_prefix(struct interface *ifp) /* Join to the add/leave multicast group. */ static int -if_group (struct interface *ifp, - int sock, - u_int32_t group, +if_group (struct interface *ifp, + int sock, + u_int32_t group, int add_leave) { struct ip_mreq m; @@ -116,7 +116,7 @@ if_group (struct interface *ifp, (char *) &m, sizeof (struct ip_mreq)); if (ret < 0) zlog_warn ("IRDP: %s can't setsockopt %s: %s", - add_leave == IP_ADD_MEMBERSHIP? "join group":"leave group", + add_leave == IP_ADD_MEMBERSHIP? "join group":"leave group", inet_2a(group, b1), safe_strerror (errno)); @@ -137,7 +137,7 @@ if_add_group (struct interface *ifp) } if(irdp->flags & IF_DEBUG_MISC ) - zlog_debug("IRDP: Adding group %s for %s", + zlog_debug("IRDP: Adding group %s for %s", inet_2a(htonl(INADDR_ALLRTRS_GROUP), b1), ifp->name); return 0; @@ -156,7 +156,7 @@ if_drop_group (struct interface *ifp) return ret; if(irdp->flags & IF_DEBUG_MISC) - zlog_debug("IRDP: Leaving group %s for %s", + zlog_debug("IRDP: Leaving group %s for %s", inet_2a(htonl(INADDR_ALLRTRS_GROUP), b1), ifp->name); return 0; @@ -206,7 +206,7 @@ irdp_if_start(struct interface *ifp, int multicast, int set_defaults) } irdp->flags |= IF_ACTIVE; - if(!multicast) + if(!multicast) irdp->flags |= IF_BROADCAST; if_add_update(ifp); @@ -219,13 +219,13 @@ irdp_if_start(struct interface *ifp, int multicast, int set_defaults) if( multicast) { if_add_group(ifp); - + if (! (ifp->flags & (IFF_MULTICAST|IFF_ALLMULTI))) { zlog_warn("IRDP: Interface not multicast enabled %s", ifp->name); } } - if(set_defaults) + if(set_defaults) if_set_defaults(ifp); irdp->irdp_sent = 0; @@ -239,9 +239,9 @@ irdp_if_start(struct interface *ifp, int multicast, int set_defaults) seed = ifc->address->u.prefix4.s_addr; break; } - + srandom(seed); - timer = (random () % IRDP_DEFAULT_INTERVAL) + 1; + timer = (random () % IRDP_DEFAULT_INTERVAL) + 1; irdp->AdvPrefList = list_new(); irdp->AdvPrefList->del = (void (*)(void *)) Adv_free; /* Destructor */ @@ -250,18 +250,18 @@ irdp_if_start(struct interface *ifp, int multicast, int set_defaults) /* And this for startup. Speed limit from 1991 :-). But it's OK*/ if(irdp->irdp_sent < MAX_INITIAL_ADVERTISEMENTS && - timer > MAX_INITIAL_ADVERT_INTERVAL ) + timer > MAX_INITIAL_ADVERT_INTERVAL ) timer= MAX_INITIAL_ADVERT_INTERVAL; - + if(irdp->flags & IF_DEBUG_MISC) - zlog_debug("IRDP: Init timer for %s set to %u", - ifp->name, + zlog_debug("IRDP: Init timer for %s set to %u", + ifp->name, timer); - irdp->t_advertise = thread_add_timer(zebrad.master, - irdp_send_thread, - ifp, + irdp->t_advertise = thread_add_timer(zebrad.master, + irdp_send_thread, + ifp, timer); } @@ -270,7 +270,7 @@ irdp_if_stop(struct interface *ifp) { struct zebra_if *zi=ifp->info; struct irdp_interface *irdp=&zi->irdp; - + if (irdp == NULL) { zlog_warn ("Interface %s structure is NULL", ifp->name); return; @@ -281,7 +281,7 @@ irdp_if_stop(struct interface *ifp) return; } - if(! (irdp->flags & IF_BROADCAST)) + if(! (irdp->flags & IF_BROADCAST)) if_drop_group(ifp); irdp_advert_off(ifp); @@ -307,9 +307,9 @@ irdp_if_shutdown(struct interface *ifp) irdp->flags |= IF_SHUTDOWN; irdp->flags &= ~IF_ACTIVE; - if(! (irdp->flags & IF_BROADCAST)) + if(! (irdp->flags & IF_BROADCAST)) if_drop_group(ifp); - + /* Tell the hosts we are out of service */ irdp_advert_off(ifp); } @@ -327,7 +327,7 @@ irdp_if_no_shutdown(struct interface *ifp) irdp->flags &= ~IF_SHUTDOWN; - irdp_if_start(ifp, irdp->flags & IF_BROADCAST? FALSE : TRUE, FALSE); + irdp_if_start(ifp, irdp->flags & IF_BROADCAST? FALSE : TRUE, FALSE); } @@ -344,30 +344,30 @@ void irdp_config_write (struct vty *vty, struct interface *ifp) if(irdp->flags & IF_ACTIVE || irdp->flags & IF_SHUTDOWN) { - if( irdp->flags & IF_SHUTDOWN) + if( irdp->flags & IF_SHUTDOWN) vty_out (vty, " ip irdp shutdown %s", VTY_NEWLINE); - if( irdp->flags & IF_BROADCAST) + if( irdp->flags & IF_BROADCAST) vty_out (vty, " ip irdp broadcast%s", VTY_NEWLINE); - else + else vty_out (vty, " ip irdp multicast%s", VTY_NEWLINE); - vty_out (vty, " ip irdp preference %ld%s", + vty_out (vty, " ip irdp preference %ld%s", irdp->Preference, VTY_NEWLINE); for (ALL_LIST_ELEMENTS_RO (irdp->AdvPrefList, node, adv)) vty_out (vty, " ip irdp address %s preference %d%s", inet_2a(adv->ip.s_addr, b1), - adv->pref, + adv->pref, VTY_NEWLINE); - vty_out (vty, " ip irdp holdtime %d%s", + vty_out (vty, " ip irdp holdtime %d%s", irdp->Lifetime, VTY_NEWLINE); - vty_out (vty, " ip irdp minadvertinterval %ld%s", + vty_out (vty, " ip irdp minadvertinterval %ld%s", irdp->MinAdvertInterval, VTY_NEWLINE); - vty_out (vty, " ip irdp maxadvertinterval %ld%s", + vty_out (vty, " ip irdp maxadvertinterval %ld%s", irdp->MaxAdvertInterval, VTY_NEWLINE); } @@ -380,12 +380,7 @@ DEFUN (ip_irdp_multicast, IP_STR "ICMP Router discovery on this interface using multicast\n") { - struct interface *ifp; - - ifp = (struct interface *) vty->index; - if(!ifp) { - return CMD_WARNING; - } + VTY_DECLVAR_CONTEXT (interface, ifp); irdp_if_start(ifp, TRUE, TRUE); return CMD_SUCCESS; @@ -397,12 +392,7 @@ DEFUN (ip_irdp_broadcast, IP_STR "ICMP Router discovery on this interface using broadcast\n") { - struct interface *ifp; - - ifp = (struct interface *) vty->index; - if(!ifp) { - return CMD_WARNING; - } + VTY_DECLVAR_CONTEXT (interface, ifp); irdp_if_start(ifp, FALSE, TRUE); return CMD_SUCCESS; @@ -415,12 +405,7 @@ DEFUN (no_ip_irdp, IP_STR "Disable ICMP Router discovery on this interface\n") { - struct interface *ifp; - - ifp = (struct interface *) vty->index; - if(!ifp) { - return CMD_WARNING; - } + VTY_DECLVAR_CONTEXT (interface, ifp); irdp_if_stop(ifp); return CMD_SUCCESS; @@ -432,12 +417,7 @@ DEFUN (ip_irdp_shutdown, IP_STR "ICMP Router discovery shutdown on this interface\n") { - struct interface *ifp; - - ifp = (struct interface *) vty->index; - if(!ifp) { - return CMD_WARNING; - } + VTY_DECLVAR_CONTEXT (interface, ifp); irdp_if_shutdown(ifp); return CMD_SUCCESS; @@ -450,12 +430,7 @@ DEFUN (no_ip_irdp_shutdown, IP_STR "ICMP Router discovery no shutdown on this interface\n") { - struct interface *ifp; - - ifp = (struct interface *) vty->index; - if(!ifp) { - return CMD_WARNING; - } + VTY_DECLVAR_CONTEXT (interface, ifp); irdp_if_no_shutdown(ifp); return CMD_SUCCESS; @@ -470,13 +445,9 @@ DEFUN (ip_irdp_holdtime, "Holdtime value in seconds. Default is 1800 seconds\n") { int idx_number = 3; - struct interface *ifp; + VTY_DECLVAR_CONTEXT (interface, ifp); struct zebra_if *zi; struct irdp_interface *irdp; - ifp = (struct interface *) vty->index; - if(!ifp) { - return CMD_WARNING; - } zi=ifp->info; irdp=&zi->irdp; @@ -494,13 +465,9 @@ DEFUN (ip_irdp_minadvertinterval, "Minimum advertisement interval in seconds\n") { int idx_number = 3; - struct interface *ifp; + VTY_DECLVAR_CONTEXT (interface, ifp); struct zebra_if *zi; struct irdp_interface *irdp; - ifp = (struct interface *) vty->index; - if(!ifp) { - return CMD_WARNING; - } zi=ifp->info; irdp=&zi->irdp; @@ -511,10 +478,10 @@ DEFUN (ip_irdp_minadvertinterval, return CMD_SUCCESS; } - vty_out (vty, "ICMP warning maxadvertinterval is greater or equal than minadvertinterval%s", + vty_out (vty, "ICMP warning maxadvertinterval is greater or equal than minadvertinterval%s", VTY_NEWLINE); - vty_out (vty, "Please correct!%s", + vty_out (vty, "Please correct!%s", VTY_NEWLINE); return CMD_WARNING; } @@ -528,13 +495,9 @@ DEFUN (ip_irdp_maxadvertinterval, "Maximum advertisement interval in seconds\n") { int idx_number = 3; - struct interface *ifp; + VTY_DECLVAR_CONTEXT (interface, ifp); struct zebra_if *zi; struct irdp_interface *irdp; - ifp = (struct interface *) vty->index; - if(!ifp) { - return CMD_WARNING; - } zi=ifp->info; irdp=&zi->irdp; @@ -546,10 +509,10 @@ DEFUN (ip_irdp_maxadvertinterval, return CMD_SUCCESS; } - vty_out (vty, "ICMP warning maxadvertinterval is greater or equal than minadvertinterval%s", + vty_out (vty, "ICMP warning maxadvertinterval is greater or equal than minadvertinterval%s", VTY_NEWLINE); - vty_out (vty, "Please correct!%s", + vty_out (vty, "Please correct!%s", VTY_NEWLINE); return CMD_WARNING; } @@ -568,13 +531,9 @@ DEFUN (ip_irdp_preference, "Preference level\n") { int idx_number = 3; - struct interface *ifp; + VTY_DECLVAR_CONTEXT (interface, ifp); struct zebra_if *zi; struct irdp_interface *irdp; - ifp = (struct interface *) vty->index; - if(!ifp) { - return CMD_WARNING; - } zi=ifp->info; irdp=&zi->irdp; @@ -594,20 +553,15 @@ DEFUN (ip_irdp_address_preference, { int idx_ipv4 = 3; int idx_number = 5; + VTY_DECLVAR_CONTEXT (interface, ifp); struct listnode *node; - struct in_addr ip; + struct in_addr ip; int pref; int ret; - struct interface *ifp; struct zebra_if *zi; struct irdp_interface *irdp; struct Adv *adv; - ifp = (struct interface *) vty->index; - if(!ifp) { - return CMD_WARNING; - } - zi=ifp->info; irdp=&zi->irdp; @@ -617,7 +571,7 @@ DEFUN (ip_irdp_address_preference, pref = atoi(argv[idx_number]->arg); for (ALL_LIST_ELEMENTS_RO (irdp->AdvPrefList, node, adv)) - if(adv->ip.s_addr == ip.s_addr) + if(adv->ip.s_addr == ip.s_addr) return CMD_SUCCESS; adv = Adv_new(); @@ -640,24 +594,19 @@ DEFUN (no_ip_irdp_address_preference, "Old preference level\n") { int idx_ipv4 = 4; + VTY_DECLVAR_CONTEXT (interface, ifp); struct listnode *node, *nnode; - struct in_addr ip; + struct in_addr ip; int ret; - struct interface *ifp; struct zebra_if *zi; struct irdp_interface *irdp; struct Adv *adv; - ifp = (struct interface *) vty->index; - if(!ifp) { - return CMD_WARNING; - } - zi=ifp->info; irdp=&zi->irdp; ret = inet_aton(argv[idx_ipv4]->arg, &ip); - if (!ret) + if (!ret) return CMD_WARNING; for (ALL_LIST_ELEMENTS (irdp->AdvPrefList, node, nnode, adv)) @@ -668,7 +617,7 @@ DEFUN (no_ip_irdp_address_preference, break; } } - + return CMD_SUCCESS; } @@ -678,13 +627,9 @@ DEFUN (ip_irdp_debug_messages, IP_STR "ICMP Router discovery debug Averts. and Solicits (short)\n") { - struct interface *ifp; + VTY_DECLVAR_CONTEXT (interface, ifp); struct zebra_if *zi; struct irdp_interface *irdp; - ifp = (struct interface *) vty->index; - if(!ifp) { - return CMD_WARNING; - } zi=ifp->info; irdp=&zi->irdp; @@ -700,13 +645,9 @@ DEFUN (ip_irdp_debug_misc, IP_STR "ICMP Router discovery debug Averts. and Solicits (short)\n") { - struct interface *ifp; + VTY_DECLVAR_CONTEXT (interface, ifp); struct zebra_if *zi; struct irdp_interface *irdp; - ifp = (struct interface *) vty->index; - if(!ifp) { - return CMD_WARNING; - } zi=ifp->info; irdp=&zi->irdp; @@ -722,13 +663,9 @@ DEFUN (ip_irdp_debug_packet, IP_STR "ICMP Router discovery debug Averts. and Solicits (short)\n") { - struct interface *ifp; + VTY_DECLVAR_CONTEXT (interface, ifp); struct zebra_if *zi; struct irdp_interface *irdp; - ifp = (struct interface *) vty->index; - if(!ifp) { - return CMD_WARNING; - } zi=ifp->info; irdp=&zi->irdp; @@ -745,13 +682,9 @@ DEFUN (ip_irdp_debug_disable, IP_STR "ICMP Router discovery debug Averts. and Solicits (short)\n") { - struct interface *ifp; + VTY_DECLVAR_CONTEXT (interface, ifp); struct zebra_if *zi; struct irdp_interface *irdp; - ifp = (struct interface *) vty->index; - if(!ifp) { - return CMD_WARNING; - } zi=ifp->info; irdp=&zi->irdp; diff --git a/zebra/kernel_socket.c b/zebra/kernel_socket.c index 3a232129b6..f3f0a2777e 100644 --- a/zebra/kernel_socket.c +++ b/zebra/kernel_socket.c @@ -21,6 +21,9 @@ #include #include +#ifdef __OpenBSD__ +#include +#endif #include "if.h" #include "prefix.h" @@ -1068,6 +1071,7 @@ rtm_write (int message, union sockunion *dest, union sockunion *mask, union sockunion *gate, + union sockunion *mpls, unsigned int index, int zebra_flags, int metric) @@ -1097,6 +1101,10 @@ rtm_write (int message, msg.rtm.rtm_addrs = RTA_DST; msg.rtm.rtm_addrs |= RTA_GATEWAY; msg.rtm.rtm_flags = RTF_UP; +#ifdef __OpenBSD__ + msg.rtm.rtm_flags |= RTF_MPATH; + msg.rtm.rtm_fmask = RTF_MPLS; +#endif msg.rtm.rtm_index = index; if (metric != 0) @@ -1142,6 +1150,17 @@ rtm_write (int message, else if (message == RTM_ADD) msg.rtm.rtm_flags |= RTF_HOST; +#ifdef __OpenBSD__ + if (mpls) + { + msg.rtm.rtm_addrs |= RTA_SRC; + msg.rtm.rtm_flags |= RTF_MPLS; + + if (mpls->smpls.smpls_label != htonl (MPLS_IMP_NULL_LABEL << MPLS_LABEL_OFFSET)) + msg.rtm.rtm_mpls = MPLS_OP_PUSH; + } +#endif + /* Tagging route with flags */ msg.rtm.rtm_flags |= (RTF_PROTO1); @@ -1166,6 +1185,9 @@ rtm_write (int message, SOCKADDRSET (dest, RTA_DST); SOCKADDRSET (gate, RTA_GATEWAY); SOCKADDRSET (mask, RTA_NETMASK); +#ifdef __OpenBSD__ + SOCKADDRSET (mpls, RTA_SRC); +#endif msg.rtm.rtm_msglen = pnt - (caddr_t) &msg; diff --git a/zebra/kernel_socket.h b/zebra/kernel_socket.h index e9558ad6dd..18d69343a4 100644 --- a/zebra/kernel_socket.h +++ b/zebra/kernel_socket.h @@ -27,7 +27,8 @@ extern void rtm_read (struct rt_msghdr *); extern int ifam_read (struct ifa_msghdr *); extern int ifm_read (struct if_msghdr *); extern int rtm_write (int, union sockunion *, union sockunion *, - union sockunion *, unsigned int, int, int); + union sockunion *, union sockunion *, + unsigned int, int, int); extern const struct message rtm_type_str[]; #endif /* __ZEBRA_KERNEL_SOCKET_H */ diff --git a/zebra/main.c b/zebra/main.c index e67568140a..9247d43507 100644 --- a/zebra/main.c +++ b/zebra/main.c @@ -46,6 +46,7 @@ #include "zebra/zebra_ptm.h" #include "zebra/zebra_ns.h" #include "zebra/redistribute.h" +#include "zebra/zebra_mpls.h" #define ZEBRA_PTM_SUPPORT @@ -82,6 +83,7 @@ struct option longopts[] = { "daemon", no_argument, NULL, 'd'}, { "allow_delete", no_argument, NULL, 'a'}, { "keep_kernel", no_argument, NULL, 'k'}, + { "fpm_format", required_argument, NULL, 'F'}, { "config_file", required_argument, NULL, 'f'}, { "pid_file", required_argument, NULL, 'i'}, { "socket", required_argument, NULL, 'z'}, @@ -142,6 +144,7 @@ usage (char *progname, int status) "-d, --daemon Runs in daemon mode\n"\ "-a, --allow_delete Allow other processes to delete Quagga Routes\n" \ "-f, --config_file Set configuration file name\n"\ + "-F, --fpm_format Set fpm format to 'netlink' or 'protobuf'\n"\ "-i, --pid_file Set process identifier file name\n"\ "-z, --socket Set path of zebra socket\n"\ "-k, --keep_kernel Don't delete old routes which installed by "\ @@ -237,6 +240,7 @@ main (int argc, char **argv) char *progname; struct thread thread; char *zserv_path = NULL; + char *fpm_format = NULL; /* Set umask before anything for security */ umask (0027); @@ -256,9 +260,9 @@ main (int argc, char **argv) int opt; #ifdef HAVE_NETLINK - opt = getopt_long (argc, argv, "bdakf:i:z:hA:P:ru:g:vs:C", longopts, 0); + opt = getopt_long (argc, argv, "bdakf:F:i:z:hA:P:ru:g:vs:C", longopts, 0); #else - opt = getopt_long (argc, argv, "bdakf:i:z:hA:P:ru:g:vC", longopts, 0); + opt = getopt_long (argc, argv, "bdakf:F:i:z:hA:P:ru:g:vC", longopts, 0); #endif /* HAVE_NETLINK */ if (opt == EOF) @@ -285,6 +289,9 @@ main (int argc, char **argv) case 'f': config_file = optarg; break; + case 'F': + fpm_format = optarg; + break; case 'A': vty_addr = optarg; break; @@ -339,6 +346,7 @@ main (int argc, char **argv) /* Vty related initialize. */ signal_init (zebrad.master, array_size(zebra_signals), zebra_signals); cmd_init (1); + vty_config_lockless (); vty_init (zebrad.master); memory_init (); @@ -362,6 +370,9 @@ main (int argc, char **argv) zebra_ptm_init(); #endif + zebra_mpls_init (); + zebra_mpls_vty_init (); + /* For debug purpose. */ /* SET_FLAG (zebra_debug_event, ZEBRA_DEBUG_EVENT); */ @@ -373,9 +384,9 @@ main (int argc, char **argv) #endif /* HAVE_SNMP */ #ifdef HAVE_FPM - zfpm_init (zebrad.master, 1, 0); + zfpm_init (zebrad.master, 1, 0, fpm_format); #else - zfpm_init (zebrad.master, 0, 0); + zfpm_init (zebrad.master, 0, 0, fpm_format); #endif /* Process the configuration file. Among other configuration diff --git a/zebra/redistribute.c b/zebra/redistribute.c index 4e7538327f..6f91c94f7e 100644 --- a/zebra/redistribute.c +++ b/zebra/redistribute.c @@ -46,20 +46,14 @@ /* array holding redistribute info about table redistribution */ /* bit AFI is set if that AFI is redistributing routes from this table */ -static u_char zebra_import_table_used[ZEBRA_KERNEL_TABLE_MAX]; +static int zebra_import_table_used[AFI_MAX][ZEBRA_KERNEL_TABLE_MAX]; static u_int32_t zebra_import_table_distance[AFI_MAX][ZEBRA_KERNEL_TABLE_MAX]; int is_zebra_import_table_enabled(afi_t afi, u_int32_t table_id) { if (is_zebra_valid_kernel_table(table_id)) - { - if (CHECK_FLAG(zebra_import_table_used[table_id], (u_char)afi)) - return 1; - else - return 0; - } - + return zebra_import_table_used[afi][table_id]; return 0; } @@ -672,12 +666,12 @@ zebra_import_table (afi_t afi, u_int32_t table_id, u_int32_t distance, const cha zebra_del_import_table_route_map (afi, table_id); } - SET_FLAG(zebra_import_table_used[table_id], afi); + zebra_import_table_used[afi][table_id] = 1; zebra_import_table_distance[afi][table_id] = distance; } else { - UNSET_FLAG(zebra_import_table_used[table_id], (u_char)afi); + zebra_import_table_used[afi][table_id] = 0; zebra_import_table_distance[afi][table_id] = ZEBRA_TABLE_DISTANCE_DEFAULT; rmap_name = zebra_get_import_table_route_map (afi, table_id); @@ -721,7 +715,7 @@ zebra_import_table_config (struct vty *vty) int i; afi_t afi; int write = 0; - char afi_str[AFI_MAX][6] = {"", "ip", "ipv6"}; + char afi_str[AFI_MAX][10] = {"", "ip", "ipv6", "ethernet"}; const char *rmap_name; for (afi = AFI_IP; afi < AFI_MAX; afi++) diff --git a/zebra/rib.h b/zebra/rib.h index 9867323e6e..c95a9ba0c3 100644 --- a/zebra/rib.h +++ b/zebra/rib.h @@ -23,6 +23,7 @@ #ifndef _ZEBRA_RIB_H #define _ZEBRA_RIB_H +#include "zebra.h" #include "linklist.h" #include "prefix.h" #include "table.h" @@ -30,6 +31,7 @@ #include "nexthop.h" #include "vrf.h" #include "if.h" +#include "mpls.h" #define DISTANCE_INFINITY 255 #define ZEBRA_KERNEL_TABLE_MAX 252 /* support for no more than this rt tables */ @@ -46,6 +48,9 @@ struct rib /* Refrence count. */ unsigned long refcnt; + /* Tag */ + route_tag_t tag; + /* Uptime. */ time_t uptime; @@ -71,14 +76,11 @@ struct rib /* Distance. */ u_char distance; - /* Tag */ - u_short tag; - /* Flags of this route. * This flag's definition is in lib/zebra.h ZEBRA_FLAG_* and is exposed * to clients via Zserv */ - u_char flags; + u_int32_t flags; /* RIB internal status */ u_char status; @@ -86,6 +88,7 @@ struct rib /* to simplify NHT logic when NHs change, instead of doing a NH by NH cmp */ #define RIB_ENTRY_NEXTHOPS_CHANGED 0x2 #define RIB_ENTRY_CHANGED 0x4 +#define RIB_ENTRY_SELECTED_FIB 0x8 /* Nexthop information. */ u_char nexthop_num; @@ -373,6 +376,8 @@ extern struct route_table *rib_table_ipv6; extern int rib_gc_dest (struct route_node *rn); extern struct route_table *rib_tables_iter_next (rib_tables_iter_t *iter); +extern u_char route_distance(int type); + /* * Inline functions. */ diff --git a/zebra/rt.h b/zebra/rt.h index 46e71fa46e..1899ef17da 100644 --- a/zebra/rt.h +++ b/zebra/rt.h @@ -26,6 +26,8 @@ #include "prefix.h" #include "if.h" #include "zebra/rib.h" +#include "zebra/zebra_ns.h" +#include "zebra/zebra_mpls.h" extern int kernel_add_ipv4 (struct prefix *, struct rib *); extern int kernel_update_ipv4 (struct prefix *, struct rib *); @@ -39,4 +41,9 @@ extern int kernel_add_ipv6 (struct prefix *, struct rib *); extern int kernel_update_ipv6 (struct prefix *, struct rib *); extern int kernel_delete_ipv6 (struct prefix *, struct rib *); +extern int kernel_add_lsp (zebra_lsp_t *); +extern int kernel_upd_lsp (zebra_lsp_t *); +extern int kernel_del_lsp (zebra_lsp_t *); +extern int mpls_kernel_init (void); + #endif /* _ZEBRA_RT_H */ diff --git a/zebra/rt_netlink.c b/zebra/rt_netlink.c index a5f62dfa03..afcd5f0235 100644 --- a/zebra/rt_netlink.c +++ b/zebra/rt_netlink.c @@ -16,7 +16,7 @@ * You should have received a copy of the GNU General Public License * along with GNU Zebra; see the file COPYING. If not, write to the Free * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA - * 02111-1307, USA. + * 02111-1307, USA. */ #include @@ -41,6 +41,7 @@ #include "nexthop.h" #include "vrf.h" #include "vty.h" +#include "mpls.h" #include "zebra/zserv.h" #include "zebra/zebra_ns.h" @@ -51,6 +52,7 @@ #include "zebra/debug.h" #include "zebra/rtadv.h" #include "zebra/zebra_ptm.h" +#include "zebra/zebra_mpls.h" #include "rt_netlink.h" @@ -70,6 +72,53 @@ static const struct message nlmsg_str[] = { {0, NULL} }; +/* TODO - Temporary definitions, need to refine. */ +#ifndef AF_MPLS +#define AF_MPLS 28 +#endif + +#ifndef RTA_VIA +#define RTA_VIA 18 +#endif + +#ifndef RTA_NEWDST +#define RTA_NEWDST 19 +#endif + +#ifndef RTA_ENCAP_TYPE +#define RTA_ENCAP_TYPE 21 +#endif + +#ifndef RTA_ENCAP +#define RTA_ENCAP 22 +#endif + +#ifndef LWTUNNEL_ENCAP_MPLS +#define LWTUNNEL_ENCAP_MPLS 1 +#endif + +#ifndef MPLS_IPTUNNEL_DST +#define MPLS_IPTUNNEL_DST 1 +#endif +/* End of temporary definitions */ + +#ifndef NLMSG_TAIL +#define NLMSG_TAIL(nmsg) \ + ((struct rtattr *) (((u_char *) (nmsg)) + NLMSG_ALIGN((nmsg)->nlmsg_len))) +#endif + +#ifndef RTA_TAIL +#define RTA_TAIL(rta) \ + ((struct rtattr *) (((u_char *) (rta)) + RTA_ALIGN((rta)->rta_len))) +#endif + +struct gw_family_t +{ + u_int16_t filler; + u_int16_t family; + union g_addr gate; +}; + extern struct zebra_privs_t zserv_privs; extern u_int32_t nl_rcvbufsize; @@ -94,7 +143,7 @@ set_ifindex(struct interface *ifp, ifindex_t ifi_index) ifi_index, oifp->name, ifp->name); if (if_is_up(oifp)) zlog_err("interface rename detected on up interface: index %d " - "was renamed from %s to %s, results are uncertain!", + "was renamed from %s to %s, results are uncertain!", ifi_index, oifp->name, ifp->name); if_delete_update(oifp); } @@ -243,7 +292,7 @@ netlink_request (int family, int type, struct nlsock *nl) req.nlh.nlmsg_seq = ++nl->seq; req.g.rtgen_family = family; - /* linux appears to check capabilities on every message + /* linux appears to check capabilities on every message * have to raise caps for every message sent */ if (zserv_privs.change (ZPRIVS_RAISE)) @@ -440,10 +489,10 @@ netlink_parse_info (int (*filter) (struct sockaddr_nl *, struct nlmsghdr *, /* OK we got netlink message. */ if (IS_ZEBRA_DEBUG_KERNEL) - zlog_debug ("netlink_parse_info: %s type %s(%u), seq=%u, pid=%u", + zlog_debug ("netlink_parse_info: %s type %s(%u), len=%d, seq=%u, pid=%u", nl->name, lookup (nlmsg_str, h->nlmsg_type), h->nlmsg_type, - h->nlmsg_seq, h->nlmsg_pid); + h->nlmsg_len, h->nlmsg_seq, h->nlmsg_pid); /* skip unsolicited messages originating from command socket * linux sets the originators port-id for {NEW|DEL}ADDR messages, @@ -707,7 +756,7 @@ netlink_interface (struct sockaddr_nl *snl, struct nlmsghdr *h, /* Looking up interface name. */ memset (tb, 0, sizeof tb); netlink_parse_rtattr (tb, IFLA_MAX, IFLA_RTA (ifi), len); - + #ifdef IFLA_WIRELESS /* check for wireless messages to ignore */ if ((tb[IFLA_WIRELESS] != NULL) && (ifi->ifi_change == 0)) @@ -834,7 +883,7 @@ netlink_interface_addr (struct sockaddr_nl *snl, struct nlmsghdr *h, buf, BUFSIZ), ifa->ifa_prefixlen); if (tb[IFA_LABEL] && strcmp (ifp->name, RTA_DATA (tb[IFA_LABEL]))) zlog_debug (" IFA_LABEL %s", (char *)RTA_DATA (tb[IFA_LABEL])); - + if (tb[IFA_CACHEINFO]) { struct ifa_cacheinfo *ci = RTA_DATA (tb[IFA_CACHEINFO]); @@ -842,13 +891,13 @@ netlink_interface_addr (struct sockaddr_nl *snl, struct nlmsghdr *h, ci->ifa_prefered, ci->ifa_valid); } } - + /* logic copied from iproute2/ip/ipaddress.c:print_addrinfo() */ if (tb[IFA_LOCAL] == NULL) tb[IFA_LOCAL] = tb[IFA_ADDRESS]; if (tb[IFA_ADDRESS] == NULL) tb[IFA_ADDRESS] = tb[IFA_LOCAL]; - + /* local interface address */ addr = (tb[IFA_LOCAL] ? RTA_DATA(tb[IFA_LOCAL]) : NULL); @@ -962,6 +1011,11 @@ netlink_routing_table (struct sockaddr_nl *snl, struct nlmsghdr *h, if (rtm->rtm_src_len != 0) return 0; + /* We don't care about change notifications for the MPLS table. */ + /* TODO: Revisit this. */ + if (rtm->rtm_family == AF_MPLS) + return 0; + /* Table corresponding to route. */ if (tb[RTA_TABLE]) table = *(int *) RTA_DATA (tb[RTA_TABLE]); @@ -1124,7 +1178,7 @@ netlink_route_change (struct sockaddr_nl *snl, struct nlmsghdr *h, struct rtattr *tb[RTA_MAX + 1]; u_char zebra_flags = 0; struct prefix p; - + char anyaddr[16] = { 0 }; int index; @@ -1162,6 +1216,11 @@ netlink_route_change (struct sockaddr_nl *snl, struct nlmsghdr *h, return 0; } + /* We don't care about change notifications for the MPLS table. */ + /* TODO: Revisit this. */ + if (rtm->rtm_family == AF_MPLS) + return 0; + len = h->nlmsg_len - NLMSG_LENGTH (sizeof (struct rtmsg)); if (len < 0) return -1; @@ -1400,7 +1459,7 @@ netlink_link_change (struct sockaddr_nl *snl, struct nlmsghdr *h, return 0; } #endif /* IFLA_WIRELESS */ - + if (tb[IFLA_IFNAME] == NULL) return -1; name = (char *) RTA_DATA (tb[IFLA_IFNAME]); @@ -1638,7 +1697,7 @@ netlink_route_read (struct zebra_ns *zns) return 0; } -/* Utility function comes from iproute2. +/* Utility function comes from iproute2. Authors: Alexey Kuznetsov, */ int addattr_l (struct nlmsghdr *n, unsigned int maxlen, int type, void *data, int alen) @@ -1648,58 +1707,78 @@ addattr_l (struct nlmsghdr *n, unsigned int maxlen, int type, void *data, int al len = RTA_LENGTH (alen); - if (NLMSG_ALIGN (n->nlmsg_len) + len > maxlen) + if (NLMSG_ALIGN (n->nlmsg_len) + RTA_ALIGN (len) > maxlen) return -1; rta = (struct rtattr *) (((char *) n) + NLMSG_ALIGN (n->nlmsg_len)); rta->rta_type = type; rta->rta_len = len; memcpy (RTA_DATA (rta), data, alen); - n->nlmsg_len = NLMSG_ALIGN (n->nlmsg_len) + len; + n->nlmsg_len = NLMSG_ALIGN (n->nlmsg_len) + RTA_ALIGN (len); return 0; } int -rta_addattr_l (struct rtattr *rta, int maxlen, int type, void *data, int alen) +rta_addattr_l (struct rtattr *rta, unsigned int maxlen, int type, + void *data, int alen) { - int len; + unsigned int len; struct rtattr *subrta; len = RTA_LENGTH (alen); - if ((int)RTA_ALIGN (rta->rta_len) + len > maxlen) + if (RTA_ALIGN (rta->rta_len) + RTA_ALIGN (len) > maxlen) return -1; subrta = (struct rtattr *) (((char *) rta) + RTA_ALIGN (rta->rta_len)); subrta->rta_type = type; subrta->rta_len = len; memcpy (RTA_DATA (subrta), data, alen); - rta->rta_len = NLMSG_ALIGN (rta->rta_len) + len; + rta->rta_len = NLMSG_ALIGN (rta->rta_len) + RTA_ALIGN (len); return 0; } -/* Utility function comes from iproute2. +/* Utility function comes from iproute2. Authors: Alexey Kuznetsov, */ int addattr32 (struct nlmsghdr *n, unsigned int maxlen, int type, int data) { - int len; - struct rtattr *rta; + return addattr_l(n, maxlen, type, &data, sizeof(u_int32_t)); +} - len = RTA_LENGTH (4); +/* Some more utility functions from iproute2 */ +static struct rtattr * +addattr_nest(struct nlmsghdr *n, int maxlen, int type) +{ + struct rtattr *nest = NLMSG_TAIL(n); - if (NLMSG_ALIGN (n->nlmsg_len) + len > maxlen) - return -1; + addattr_l(n, maxlen, type, NULL, 0); + return nest; +} - rta = (struct rtattr *) (((char *) n) + NLMSG_ALIGN (n->nlmsg_len)); - rta->rta_type = type; - rta->rta_len = len; - memcpy (RTA_DATA (rta), &data, 4); - n->nlmsg_len = NLMSG_ALIGN (n->nlmsg_len) + len; +static int +addattr_nest_end(struct nlmsghdr *n, struct rtattr *nest) +{ + nest->rta_len = (u_char *)NLMSG_TAIL(n) - (u_char *)nest; + return n->nlmsg_len; +} - return 0; +static struct rtattr * +rta_nest(struct rtattr *rta, int maxlen, int type) +{ + struct rtattr *nest = RTA_TAIL(rta); + + rta_addattr_l(rta, maxlen, type, NULL, 0); + return nest; +} + +static int +rta_nest_end(struct rtattr *rta, struct rtattr *nest) +{ + nest->rta_len = (u_char *)RTA_TAIL(rta) - (u_char *)nest; + return rta->rta_len; } static int @@ -1738,10 +1817,10 @@ netlink_talk (struct nlmsghdr *n, struct nlsock *nl, struct zebra_ns *zns) n->nlmsg_flags |= NLM_F_ACK; if (IS_ZEBRA_DEBUG_KERNEL) - zlog_debug ("netlink_talk: %s type %s(%u), seq=%u flags 0x%x", + zlog_debug ("netlink_talk: %s type %s(%u), len=%d seq=%u flags 0x%x", nl->name, lookup (nlmsg_str, n->nlmsg_type), n->nlmsg_type, - n->nlmsg_seq, n->nlmsg_flags); + n->nlmsg_len, n->nlmsg_seq, n->nlmsg_flags); /* Send message to netlink interface. */ if (zserv_privs.change (ZPRIVS_RAISE)) @@ -1765,13 +1844,67 @@ netlink_talk (struct nlmsghdr *n, struct nlsock *nl, struct zebra_ns *zns) } - /* - * Get reply from netlink socket. + /* + * Get reply from netlink socket. * The reply should either be an acknowlegement or an error. */ return netlink_parse_info (netlink_talk_filter, nl, zns, 0); } +static void +_netlink_route_nl_add_gateway_info (u_char route_family, u_char gw_family, + struct nlmsghdr *nlmsg, + size_t req_size, int bytelen, + struct nexthop *nexthop) +{ + if (route_family == AF_MPLS) + { + struct gw_family_t gw_fam; + + gw_fam.family = gw_family; + if (gw_family == AF_INET) + memcpy (&gw_fam.gate.ipv4, &nexthop->gate.ipv4, bytelen); + else + memcpy (&gw_fam.gate.ipv6, &nexthop->gate.ipv6, bytelen); + addattr_l (nlmsg, req_size, RTA_VIA, &gw_fam.family, bytelen+2); + } + else + { + if (gw_family == AF_INET) + addattr_l (nlmsg, req_size, RTA_GATEWAY, &nexthop->gate.ipv4, bytelen); + else + addattr_l (nlmsg, req_size, RTA_GATEWAY, &nexthop->gate.ipv6, bytelen); + } +} + +static void +_netlink_route_rta_add_gateway_info (u_char route_family, u_char gw_family, + struct rtattr *rta, struct rtnexthop *rtnh, + size_t req_size, int bytelen, + struct nexthop *nexthop) +{ + if (route_family == AF_MPLS) + { + struct gw_family_t gw_fam; + + gw_fam.family = gw_family; + if (gw_family == AF_INET) + memcpy (&gw_fam.gate.ipv4, &nexthop->gate.ipv4, bytelen); + else + memcpy (&gw_fam.gate.ipv6, &nexthop->gate.ipv6, bytelen); + rta_addattr_l (rta, req_size, RTA_VIA, &gw_fam.family, bytelen+2); + rtnh->rtnh_len += RTA_LENGTH (bytelen + 2); + } + else + { + if (gw_family == AF_INET) + rta_addattr_l (rta, req_size, RTA_GATEWAY, &nexthop->gate.ipv4, bytelen); + else + rta_addattr_l (rta, req_size, RTA_GATEWAY, &nexthop->gate.ipv6, bytelen); + rtnh->rtnh_len += sizeof (struct rtattr) + bytelen; + } +} + /* This function takes a nexthop as argument and adds * the appropriate netlink attributes to an existing * netlink message. @@ -1793,6 +1926,9 @@ _netlink_route_build_singlepath( size_t req_size, int cmd) { + struct nexthop_label *nh_label; + mpls_lse_t out_lse[MPLS_MAX_LABELS]; + char label_buf[100]; if (rtmsg->rtm_family == AF_INET && (nexthop->type == NEXTHOP_TYPE_IPV6 @@ -1820,14 +1956,67 @@ _netlink_route_build_singlepath( return; } + label_buf[0] = '\0'; + /* outgoing label - either as NEWDST (in the case of LSR) or as ENCAP + * (in the case of LER) + */ + nh_label = nexthop->nh_label; + if (rtmsg->rtm_family == AF_MPLS) + { + assert (nh_label); + assert (nh_label->num_labels == 1); + } + + if (nh_label && nh_label->num_labels) + { + int i, num_labels = 0; + u_int32_t bos; + char label_buf1[20]; + + for (i = 0; i < nh_label->num_labels; i++) + { + if (nh_label->label[i] != MPLS_IMP_NULL_LABEL) + { + bos = ((i == (nh_label->num_labels - 1)) ? 1 : 0); + out_lse[i] = mpls_lse_encode (nh_label->label[i], 0, 0, bos); + if (!num_labels) + sprintf (label_buf, "label %d", nh_label->label[i]); + else + { + sprintf (label_buf1, "/%d", nh_label->label[i]); + strcat (label_buf, label_buf1); + } + num_labels++; + } + } + if (num_labels) + { + if (rtmsg->rtm_family == AF_MPLS) + addattr_l (nlmsg, req_size, RTA_NEWDST, + &out_lse, num_labels * sizeof(mpls_lse_t)); + else + { + struct rtattr *nest; + u_int16_t encap = LWTUNNEL_ENCAP_MPLS; + + addattr_l(nlmsg, req_size, RTA_ENCAP_TYPE, + &encap, sizeof (u_int16_t)); + nest = addattr_nest(nlmsg, req_size, RTA_ENCAP); + addattr_l (nlmsg, req_size, MPLS_IPTUNNEL_DST, + &out_lse, num_labels * sizeof(mpls_lse_t)); + addattr_nest_end(nlmsg, nest); + } + } + } + if (CHECK_FLAG (nexthop->flags, NEXTHOP_FLAG_ONLINK)) rtmsg->rtm_flags |= RTNH_F_ONLINK; if (nexthop->type == NEXTHOP_TYPE_IPV4 || nexthop->type == NEXTHOP_TYPE_IPV4_IFINDEX) { - addattr_l (nlmsg, req_size, RTA_GATEWAY, - &nexthop->gate.ipv4, bytelen); + _netlink_route_nl_add_gateway_info (rtmsg->rtm_family, AF_INET, nlmsg, + req_size, bytelen, nexthop); if (cmd == RTM_NEWROUTE) { @@ -1841,16 +2030,16 @@ _netlink_route_build_singlepath( if (IS_ZEBRA_DEBUG_KERNEL) zlog_debug("netlink_route_multipath() (%s): " - "nexthop via %s if %u", + "nexthop via %s %s if %u", routedesc, inet_ntoa (nexthop->gate.ipv4), - nexthop->ifindex); + label_buf, nexthop->ifindex); } if (nexthop->type == NEXTHOP_TYPE_IPV6 || nexthop->type == NEXTHOP_TYPE_IPV6_IFINDEX) { - addattr_l (nlmsg, req_size, RTA_GATEWAY, - &nexthop->gate.ipv6, bytelen); + _netlink_route_nl_add_gateway_info (rtmsg->rtm_family, AF_INET6, nlmsg, + req_size, bytelen, nexthop); if (cmd == RTM_NEWROUTE) { @@ -1864,10 +2053,10 @@ _netlink_route_build_singlepath( if (IS_ZEBRA_DEBUG_KERNEL) zlog_debug("netlink_route_multipath() (%s): " - "nexthop via %s if %u", + "nexthop via %s %s if %u", routedesc, inet6_ntoa (nexthop->gate.ipv6), - nexthop->ifindex); + label_buf, nexthop->ifindex); } if (nexthop->type == NEXTHOP_TYPE_IFINDEX || nexthop->type == NEXTHOP_TYPE_IPV4_IFINDEX) @@ -1935,6 +2124,10 @@ _netlink_route_build_multipath( struct rtmsg *rtmsg, union g_addr **src) { + struct nexthop_label *nh_label; + mpls_lse_t out_lse[MPLS_MAX_LABELS]; + char label_buf[100]; + rtnh->rtnh_len = sizeof (*rtnh); rtnh->rtnh_flags = 0; rtnh->rtnh_hops = 0; @@ -1967,6 +2160,63 @@ _netlink_route_build_multipath( return; } + label_buf[0] = '\0'; + /* outgoing label - either as NEWDST (in the case of LSR) or as ENCAP + * (in the case of LER) + */ + nh_label = nexthop->nh_label; + if (rtmsg->rtm_family == AF_MPLS) + { + assert (nh_label); + assert (nh_label->num_labels == 1); + } + + if (nh_label && nh_label->num_labels) + { + int i, num_labels = 0; + u_int32_t bos; + char label_buf1[20]; + + for (i = 0; i < nh_label->num_labels; i++) + { + if (nh_label->label[i] != MPLS_IMP_NULL_LABEL) + { + bos = ((i == (nh_label->num_labels - 1)) ? 1 : 0); + out_lse[i] = mpls_lse_encode (nh_label->label[i], 0, 0, bos); + if (!num_labels) + sprintf (label_buf, "label %d", nh_label->label[i]); + else + { + sprintf (label_buf1, "/%d", nh_label->label[i]); + strcat (label_buf, label_buf1); + } + num_labels++; + } + } + if (num_labels) + { + if (rtmsg->rtm_family == AF_MPLS) + { + rta_addattr_l (rta, NL_PKT_BUF_SIZE, RTA_NEWDST, + &out_lse, num_labels * sizeof(mpls_lse_t)); + rtnh->rtnh_len += RTA_LENGTH (num_labels * sizeof(mpls_lse_t)); + } + else + { + struct rtattr *nest; + u_int16_t encap = LWTUNNEL_ENCAP_MPLS; + int len = rta->rta_len; + + rta_addattr_l(rta, NL_PKT_BUF_SIZE, RTA_ENCAP_TYPE, + &encap, sizeof (u_int16_t)); + nest = rta_nest(rta, NL_PKT_BUF_SIZE, RTA_ENCAP); + rta_addattr_l (rta, NL_PKT_BUF_SIZE, MPLS_IPTUNNEL_DST, + &out_lse, num_labels * sizeof(mpls_lse_t)); + rta_nest_end(rta, nest); + rtnh->rtnh_len += rta->rta_len - len; + } + } + } if (CHECK_FLAG (nexthop->flags, NEXTHOP_FLAG_ONLINK)) rtnh->rtnh_flags |= RTNH_F_ONLINK; @@ -1974,10 +2224,8 @@ _netlink_route_build_multipath( if (nexthop->type == NEXTHOP_TYPE_IPV4 || nexthop->type == NEXTHOP_TYPE_IPV4_IFINDEX) { - rta_addattr_l (rta, NL_PKT_BUF_SIZE, RTA_GATEWAY, - &nexthop->gate.ipv4, bytelen); - rtnh->rtnh_len += sizeof (struct rtattr) + bytelen; - + _netlink_route_rta_add_gateway_info (rtmsg->rtm_family, AF_INET, rta, + rtnh, NL_PKT_BUF_SIZE, bytelen, nexthop); if (nexthop->rmap_src.ipv4.s_addr) *src = &nexthop->rmap_src; else if (nexthop->src.ipv4.s_addr) @@ -1985,17 +2233,16 @@ _netlink_route_build_multipath( if (IS_ZEBRA_DEBUG_KERNEL) zlog_debug("netlink_route_multipath() (%s): " - "nexthop via %s if %u", + "nexthop via %s %s if %u", routedesc, inet_ntoa (nexthop->gate.ipv4), - nexthop->ifindex); + label_buf, nexthop->ifindex); } if (nexthop->type == NEXTHOP_TYPE_IPV6 || nexthop->type == NEXTHOP_TYPE_IPV6_IFINDEX) { - rta_addattr_l (rta, NL_PKT_BUF_SIZE, RTA_GATEWAY, - &nexthop->gate.ipv6, bytelen); - rtnh->rtnh_len += sizeof (struct rtattr) + bytelen; + _netlink_route_rta_add_gateway_info (rtmsg->rtm_family, AF_INET6, rta, + rtnh, NL_PKT_BUF_SIZE, bytelen, nexthop); if (!IN6_IS_ADDR_UNSPECIFIED(&nexthop->rmap_src.ipv6)) *src = &nexthop->rmap_src; @@ -2004,10 +2251,10 @@ _netlink_route_build_multipath( if (IS_ZEBRA_DEBUG_KERNEL) zlog_debug("netlink_route_multipath() (%s): " - "nexthop via %s if %u", + "nexthop via %s %s if %u", routedesc, inet6_ntoa (nexthop->gate.ipv6), - nexthop->ifindex); + label_buf, nexthop->ifindex); } /* ifindex */ if (nexthop->type == NEXTHOP_TYPE_IPV4_IFINDEX @@ -2038,6 +2285,44 @@ _netlink_route_build_multipath( } } +static inline void +_netlink_mpls_build_singlepath( + const char *routedesc, + zebra_nhlfe_t *nhlfe, + struct nlmsghdr *nlmsg, + struct rtmsg *rtmsg, + size_t req_size, + int cmd) +{ + int bytelen; + u_char family; + + family = NHLFE_FAMILY (nhlfe); + bytelen = (family == AF_INET ? 4 : 16); + _netlink_route_build_singlepath(routedesc, bytelen, nhlfe->nexthop, + nlmsg, rtmsg, req_size, cmd); +} + + +static inline void +_netlink_mpls_build_multipath( + const char *routedesc, + zebra_nhlfe_t *nhlfe, + struct rtattr *rta, + struct rtnexthop *rtnh, + struct rtmsg *rtmsg, + union g_addr **src) +{ + int bytelen; + u_char family; + + family = NHLFE_FAMILY (nhlfe); + bytelen = (family == AF_INET ? 4 : 16); + _netlink_route_build_multipath(routedesc, bytelen, nhlfe->nexthop, + rta, rtnh, rtmsg, src); +} + + /* Log debug information for netlink_route_multipath * if debug logging is enabled. * @@ -2063,9 +2348,20 @@ _netlink_route_debug( zlog_debug ("netlink_route_multipath() (%s): %s %s vrf %u type %s", routedesc, lookup (nlmsg_str, cmd), - prefix2str (p, buf, sizeof(buf)), - zvrf->vrf_id, nexthop_type_to_str (nexthop->type)); + prefix2str (p, buf, sizeof(buf)), zvrf->vrf_id, + (nexthop) ? nexthop_type_to_str (nexthop->type) : "UNK"); } + } + +static void +_netlink_mpls_debug( + int cmd, + u_int32_t label, + const char *routedesc) +{ + if (IS_ZEBRA_DEBUG_KERNEL) + zlog_debug ("netlink_mpls_multipath() (%s): %s %u/20", + routedesc, lookup (nlmsg_str, cmd), label); } static int @@ -2508,6 +2804,198 @@ kernel_neigh_update (int add, int ifindex, uint32_t addr, char *lla, int llalen) lla, llalen); } +/* + * MPLS label forwarding table change via netlink interface. + */ +int +netlink_mpls_multipath (int cmd, zebra_lsp_t *lsp) +{ + mpls_lse_t lse; + zebra_nhlfe_t *nhlfe; + struct nexthop *nexthop = NULL; + int nexthop_num; + const char *routedesc; + struct zebra_ns *zns = zebra_ns_lookup (NS_DEFAULT); + + struct + { + struct nlmsghdr n; + struct rtmsg r; + char buf[NL_PKT_BUF_SIZE]; + } req; + + memset (&req, 0, sizeof req - NL_PKT_BUF_SIZE); + + + /* + * Count # nexthops so we can decide whether to use singlepath + * or multipath case. + */ + nexthop_num = 0; + for (nhlfe = lsp->nhlfe_list; nhlfe; nhlfe = nhlfe->next) + { + nexthop = nhlfe->nexthop; + if (!nexthop) + continue; + if (cmd == RTM_NEWROUTE) + { + /* Count all selected NHLFEs */ + if (CHECK_FLAG (nhlfe->flags, NHLFE_FLAG_SELECTED) && + CHECK_FLAG (nexthop->flags, NEXTHOP_FLAG_ACTIVE)) + nexthop_num++; + } + else /* DEL */ + { + /* Count all installed NHLFEs */ + if (CHECK_FLAG (nhlfe->flags, NHLFE_FLAG_INSTALLED) && + CHECK_FLAG (nexthop->flags, NEXTHOP_FLAG_FIB)) + nexthop_num++; + } + } + + if (nexthop_num == 0) // unexpected + return 0; + + req.n.nlmsg_len = NLMSG_LENGTH (sizeof (struct rtmsg)); + req.n.nlmsg_flags = NLM_F_CREATE | NLM_F_REQUEST; + req.n.nlmsg_type = cmd; + req.r.rtm_family = AF_MPLS; + req.r.rtm_table = RT_TABLE_MAIN; + req.r.rtm_dst_len = MPLS_LABEL_LEN_BITS; + req.r.rtm_protocol = RTPROT_ZEBRA; + req.r.rtm_scope = RT_SCOPE_UNIVERSE; + req.r.rtm_type = RTN_UNICAST; + + if (cmd == RTM_NEWROUTE) + /* We do a replace to handle update. */ + req.n.nlmsg_flags |= NLM_F_REPLACE; + + /* Fill destination */ + lse = mpls_lse_encode (lsp->ile.in_label, 0, 0, 1); + addattr_l (&req.n, sizeof req, RTA_DST, &lse, sizeof(mpls_lse_t)); + + /* Fill nexthops (paths) based on single-path or multipath. The paths + * chosen depend on the operation. + */ + if (nexthop_num == 1 || MULTIPATH_NUM == 1) + { + routedesc = "single hop"; + _netlink_mpls_debug(cmd, lsp->ile.in_label, routedesc); + + nexthop_num = 0; + for (nhlfe = lsp->nhlfe_list; nhlfe; nhlfe = nhlfe->next) + { + nexthop = nhlfe->nexthop; + if (!nexthop) + continue; + + if ((cmd == RTM_NEWROUTE && + (CHECK_FLAG (nhlfe->flags, NHLFE_FLAG_SELECTED) && + CHECK_FLAG (nexthop->flags, NEXTHOP_FLAG_ACTIVE))) || + (cmd == RTM_DELROUTE && + (CHECK_FLAG (nhlfe->flags, NHLFE_FLAG_INSTALLED) && + CHECK_FLAG (nexthop->flags, NEXTHOP_FLAG_FIB)))) + { + /* Add the gateway */ + _netlink_mpls_build_singlepath(routedesc, nhlfe, + &req.n, &req.r, sizeof req, cmd); + if (cmd == RTM_NEWROUTE) + { + SET_FLAG (nhlfe->flags, NHLFE_FLAG_INSTALLED); + SET_FLAG (nexthop->flags, NEXTHOP_FLAG_FIB); + } + else + { + UNSET_FLAG (nhlfe->flags, NHLFE_FLAG_INSTALLED); + UNSET_FLAG (nexthop->flags, NEXTHOP_FLAG_FIB); + } + nexthop_num++; + break; + } + } + } + else /* Multipath case */ + { + char buf[NL_PKT_BUF_SIZE]; + struct rtattr *rta = (void *) buf; + struct rtnexthop *rtnh; + union g_addr *src1 = NULL; + + rta->rta_type = RTA_MULTIPATH; + rta->rta_len = RTA_LENGTH (0); + rtnh = RTA_DATA (rta); + + routedesc = "multihop"; + _netlink_mpls_debug(cmd, lsp->ile.in_label, routedesc); + + nexthop_num = 0; + for (nhlfe = lsp->nhlfe_list; nhlfe; nhlfe = nhlfe->next) + { + nexthop = nhlfe->nexthop; + if (!nexthop) + continue; + + if (MULTIPATH_NUM != 0 && nexthop_num >= MULTIPATH_NUM) + break; + + if ((cmd == RTM_NEWROUTE && + (CHECK_FLAG (nhlfe->flags, NHLFE_FLAG_SELECTED) && + CHECK_FLAG (nexthop->flags, NEXTHOP_FLAG_ACTIVE))) || + (cmd == RTM_DELROUTE && + (CHECK_FLAG (nhlfe->flags, NHLFE_FLAG_INSTALLED) && + CHECK_FLAG (nexthop->flags, NEXTHOP_FLAG_FIB)))) + { + nexthop_num++; + + /* Build the multipath */ + _netlink_mpls_build_multipath(routedesc, nhlfe, rta, + rtnh, &req.r, &src1); + rtnh = RTNH_NEXT (rtnh); + + if (cmd == RTM_NEWROUTE) + { + SET_FLAG (nhlfe->flags, NHLFE_FLAG_INSTALLED); + SET_FLAG (nexthop->flags, NEXTHOP_FLAG_FIB); + } + else + { + UNSET_FLAG (nhlfe->flags, NHLFE_FLAG_INSTALLED); + UNSET_FLAG (nexthop->flags, NEXTHOP_FLAG_FIB); + } + + } + } + + /* Add the multipath */ + if (rta->rta_len > RTA_LENGTH (0)) + addattr_l (&req.n, NL_PKT_BUF_SIZE, RTA_MULTIPATH, RTA_DATA (rta), + RTA_PAYLOAD (rta)); + } + + /* Talk to netlink socket. */ + return netlink_talk (&req.n, &zns->netlink_cmd, zns); +} + +/* + * Handle failure in LSP install, clear flags for NHLFE. + */ +void +clear_nhlfe_installed (zebra_lsp_t *lsp) +{ + zebra_nhlfe_t *nhlfe; + struct nexthop *nexthop; + + for (nhlfe = lsp->nhlfe_list; nhlfe; nhlfe = nhlfe->next) + { + nexthop = nhlfe->nexthop; + if (!nexthop) + continue; + + UNSET_FLAG (nhlfe->flags, NHLFE_FLAG_INSTALLED); + UNSET_FLAG (nexthop->flags, NEXTHOP_FLAG_FIB); + } +} + extern struct thread_master *master; /* Kernel route reflection. */ diff --git a/zebra/rt_netlink.h b/zebra/rt_netlink.h index 80d035e839..55af237b64 100644 --- a/zebra/rt_netlink.h +++ b/zebra/rt_netlink.h @@ -24,6 +24,8 @@ #ifdef HAVE_NETLINK +#include "zebra/zebra_mpls.h" + #define NL_PKT_BUF_SIZE 8192 #define NL_DEFAULT_ROUTE_METRIC 20 @@ -33,7 +35,7 @@ extern int addattr_l (struct nlmsghdr *n, unsigned int maxlen, int type, void *data, int alen); extern int -rta_addattr_l (struct rtattr *rta, int maxlen, int type, void *data, int alen); +rta_addattr_l (struct rtattr *rta, unsigned int maxlen, int type, void *data, int alen); extern const char * nl_msg_type_to_str (uint16_t msg_type); @@ -41,6 +43,11 @@ nl_msg_type_to_str (uint16_t msg_type); extern const char * nl_rtproto_to_str (u_char rtproto); +extern void +clear_nhlfe_installed (zebra_lsp_t *lsp); +extern int +netlink_mpls_multipath (int cmd, zebra_lsp_t *lsp); + extern int interface_lookup_netlink (struct zebra_ns *zns); extern int netlink_route_read (struct zebra_ns *zns); diff --git a/zebra/rt_socket.c b/zebra/rt_socket.c index 24671829f0..f23f9d5da3 100644 --- a/zebra/rt_socket.c +++ b/zebra/rt_socket.c @@ -21,6 +21,9 @@ */ #include +#ifdef __OpenBSD__ +#include +#endif #include "if.h" #include "prefix.h" @@ -33,13 +36,15 @@ #include "zebra/rib.h" #include "zebra/rt.h" #include "zebra/kernel_socket.h" +#include "zebra/zebra_mpls.h" extern struct zebra_privs_t zserv_privs; /* kernel socket export */ extern int rtm_write (int message, union sockunion *dest, union sockunion *mask, union sockunion *gate, - unsigned int index, int zebra_flags, int metric); + union sockunion *mpls, unsigned int index, + int zebra_flags, int metric); #ifdef HAVE_STRUCT_SOCKADDR_IN_SIN_LEN /* Adjust netmask socket length. Return value is a adjusted sin_len @@ -73,6 +78,10 @@ kernel_rtm_ipv4 (int cmd, struct prefix *p, struct rib *rib, int family) { struct sockaddr_in *mask = NULL; struct sockaddr_in sin_dest, sin_mask, sin_gate; +#ifdef __OpenBSD__ + struct sockaddr_mpls smpls; +#endif + union sockunion *smplsp = NULL; struct nexthop *nexthop, *tnexthop; int recursing; int nexthop_num = 0; @@ -147,10 +156,23 @@ kernel_rtm_ipv4 (int cmd, struct prefix *p, struct rib *rib, int family) mask = &sin_mask; } +#ifdef __OpenBSD__ + if (nexthop->nh_label) + { + memset (&smpls, 0, sizeof (smpls)); + smpls.smpls_len = sizeof (smpls); + smpls.smpls_family = AF_MPLS; + smpls.smpls_label = + htonl (nexthop->nh_label->label[0] << MPLS_LABEL_OFFSET); + smplsp = (union sockunion *)&smpls; + } +#endif + error = rtm_write (cmd, (union sockunion *)&sin_dest, (union sockunion *)mask, gate ? (union sockunion *)&sin_gate : NULL, + smplsp, ifindex, rib->flags, rib->metric); @@ -365,6 +387,7 @@ kernel_rtm_ipv6_multipath (int cmd, struct prefix *p, struct rib *rib, (union sockunion *) &sin_dest, (union sockunion *) mask, gate ? (union sockunion *)&sin_gate : NULL, + NULL, ifindex, rib->flags, rib->metric); diff --git a/zebra/rtadv.c b/zebra/rtadv.c index 7edba55953..dcf31ff450 100644 --- a/zebra/rtadv.c +++ b/zebra/rtadv.c @@ -872,10 +872,8 @@ DEFUN (ipv6_nd_suppress_ra, "Neighbor discovery\n" "Suppress Router Advertisement\n") { - struct interface *ifp; - struct zebra_if *zif; - - ifp = vty->index; + VTY_DECLVAR_CONTEXT (interface, ifp); + struct zebra_if *zif = ifp->info; if (if_is_loopback (ifp) || CHECK_FLAG(ifp->status, ZEBRA_INTERFACE_VRF_LOOPBACK)) @@ -885,7 +883,6 @@ DEFUN (ipv6_nd_suppress_ra, } ipv6_nd_suppress_ra_set (ifp, RA_SUPPRESS); - zif = ifp->info; zif->rtadv.configured = 0; return CMD_SUCCESS; } @@ -898,10 +895,8 @@ DEFUN (no_ipv6_nd_suppress_ra, "Neighbor discovery\n" "Suppress Router Advertisement\n") { - struct interface *ifp; - struct zebra_if *zif; - - ifp = vty->index; + VTY_DECLVAR_CONTEXT (interface, ifp); + struct zebra_if *zif = ifp->info; if (if_is_loopback (ifp) || CHECK_FLAG(ifp->status, ZEBRA_INTERFACE_VRF_LOOPBACK)) @@ -911,7 +906,6 @@ DEFUN (no_ipv6_nd_suppress_ra, } ipv6_nd_suppress_ra_set (ifp, RA_ENABLE); - zif = ifp->info; zif->rtadv.configured = 1; return CMD_SUCCESS; } @@ -925,8 +919,8 @@ DEFUN (ipv6_nd_ra_interval_msec, "Router Advertisement interval in milliseconds\n") { int idx_number = 4; + VTY_DECLVAR_CONTEXT (interface, ifp); unsigned interval; - struct interface *ifp = (struct interface *) vty->index; struct zebra_if *zif = ifp->info; struct zebra_vrf *zvrf = vrf_info_lookup (ifp->vrf_id); struct zebra_ns *zns; @@ -961,8 +955,8 @@ DEFUN (ipv6_nd_ra_interval, "Router Advertisement interval in seconds\n") { int idx_number = 3; + VTY_DECLVAR_CONTEXT (interface, ifp); unsigned interval; - struct interface *ifp = (struct interface *) vty->index; struct zebra_if *zif = ifp->info; struct zebra_vrf *zvrf = vrf_info_lookup (ifp->vrf_id); struct zebra_ns *zns; @@ -999,13 +993,11 @@ DEFUN (no_ipv6_nd_ra_interval, "Specify millisecond router advertisement interval\n" "Router Advertisement interval in milliseconds\n") { - struct interface *ifp; - struct zebra_if *zif; + VTY_DECLVAR_CONTEXT (interface, ifp); + struct zebra_if *zif = ifp->info; struct zebra_vrf *zvrf; struct zebra_ns *zns; - ifp = (struct interface *) vty->index; - zif = ifp->info; zvrf = vrf_info_lookup (ifp->vrf_id); zns = zvrf->zns; @@ -1028,12 +1020,9 @@ DEFUN (ipv6_nd_ra_lifetime, "Router lifetime in seconds (0 stands for a non-default gw)\n") { int idx_number = 3; + VTY_DECLVAR_CONTEXT (interface, ifp); + struct zebra_if *zif = ifp->info; int lifetime; - struct interface *ifp; - struct zebra_if *zif; - - ifp = (struct interface *) vty->index; - zif = ifp->info; VTY_GET_INTEGER_RANGE ("router lifetime", lifetime, argv[idx_number]->arg, 0, 9000); @@ -1061,11 +1050,8 @@ DEFUN (no_ipv6_nd_ra_lifetime, "Router lifetime\n" "Router lifetime in seconds (0 stands for a non-default gw)\n") { - struct interface *ifp; - struct zebra_if *zif; - - ifp = (struct interface *) vty->index; - zif = ifp->info; + VTY_DECLVAR_CONTEXT (interface, ifp); + struct zebra_if *zif = ifp->info; zif->rtadv.AdvDefaultLifetime = -1; @@ -1081,7 +1067,7 @@ DEFUN (ipv6_nd_reachable_time, "Reachable time in milliseconds\n") { int idx_number = 3; - struct interface *ifp = (struct interface *) vty->index; + VTY_DECLVAR_CONTEXT (interface, ifp); struct zebra_if *zif = ifp->info; VTY_GET_INTEGER_RANGE ("reachable time", zif->rtadv.AdvReachableTime, argv[idx_number]->arg, 1, RTADV_MAX_REACHABLE_TIME); return CMD_SUCCESS; @@ -1096,11 +1082,8 @@ DEFUN (no_ipv6_nd_reachable_time, "Reachable time\n" "Reachable time in milliseconds\n") { - struct interface *ifp; - struct zebra_if *zif; - - ifp = (struct interface *) vty->index; - zif = ifp->info; + VTY_DECLVAR_CONTEXT (interface, ifp); + struct zebra_if *zif = ifp->info; zif->rtadv.AdvReachableTime = 0; @@ -1116,7 +1099,7 @@ DEFUN (ipv6_nd_homeagent_preference, "preference value (default is 0, least preferred)\n") { int idx_number = 3; - struct interface *ifp = (struct interface *) vty->index; + VTY_DECLVAR_CONTEXT (interface, ifp); struct zebra_if *zif = ifp->info; VTY_GET_INTEGER_RANGE ("home agent preference", zif->rtadv.HomeAgentPreference, argv[idx_number]->arg, 0, 65535); return CMD_SUCCESS; @@ -1131,11 +1114,8 @@ DEFUN (no_ipv6_nd_homeagent_preference, "Home Agent preference\n" "preference value (default is 0, least preferred)\n") { - struct interface *ifp; - struct zebra_if *zif; - - ifp = (struct interface *) vty->index; - zif = ifp->info; + VTY_DECLVAR_CONTEXT (interface, ifp); + struct zebra_if *zif = ifp->info; zif->rtadv.HomeAgentPreference = 0; @@ -1151,7 +1131,7 @@ DEFUN (ipv6_nd_homeagent_lifetime, "Home Agent lifetime in seconds (0 to track ra-lifetime)\n") { int idx_number = 3; - struct interface *ifp = (struct interface *) vty->index; + VTY_DECLVAR_CONTEXT (interface, ifp); struct zebra_if *zif = ifp->info; VTY_GET_INTEGER_RANGE ("home agent lifetime", zif->rtadv.HomeAgentLifetime, argv[idx_number]->arg, 0, RTADV_MAX_HALIFETIME); return CMD_SUCCESS; @@ -1166,11 +1146,8 @@ DEFUN (no_ipv6_nd_homeagent_lifetime, "Home Agent lifetime\n" "Home Agent lifetime in seconds (0 to track ra-lifetime)\n") { - struct interface *ifp; - struct zebra_if *zif; - - ifp = (struct interface *) vty->index; - zif = ifp->info; + VTY_DECLVAR_CONTEXT (interface, ifp); + struct zebra_if *zif = ifp->info; zif->rtadv.HomeAgentLifetime = -1; @@ -1184,11 +1161,8 @@ DEFUN (ipv6_nd_managed_config_flag, "Neighbor discovery\n" "Managed address configuration flag\n") { - struct interface *ifp; - struct zebra_if *zif; - - ifp = (struct interface *) vty->index; - zif = ifp->info; + VTY_DECLVAR_CONTEXT (interface, ifp); + struct zebra_if *zif = ifp->info; zif->rtadv.AdvManagedFlag = 1; @@ -1203,11 +1177,8 @@ DEFUN (no_ipv6_nd_managed_config_flag, "Neighbor discovery\n" "Managed address configuration flag\n") { - struct interface *ifp; - struct zebra_if *zif; - - ifp = (struct interface *) vty->index; - zif = ifp->info; + VTY_DECLVAR_CONTEXT (interface, ifp); + struct zebra_if *zif = ifp->info; zif->rtadv.AdvManagedFlag = 0; @@ -1221,11 +1192,8 @@ DEFUN (ipv6_nd_homeagent_config_flag, "Neighbor discovery\n" "Home Agent configuration flag\n") { - struct interface *ifp; - struct zebra_if *zif; - - ifp = (struct interface *) vty->index; - zif = ifp->info; + VTY_DECLVAR_CONTEXT (interface, ifp); + struct zebra_if *zif = ifp->info; zif->rtadv.AdvHomeAgentFlag = 1; @@ -1240,11 +1208,8 @@ DEFUN (no_ipv6_nd_homeagent_config_flag, "Neighbor discovery\n" "Home Agent configuration flag\n") { - struct interface *ifp; - struct zebra_if *zif; - - ifp = (struct interface *) vty->index; - zif = ifp->info; + VTY_DECLVAR_CONTEXT (interface, ifp); + struct zebra_if *zif = ifp->info; zif->rtadv.AdvHomeAgentFlag = 0; @@ -1258,11 +1223,8 @@ DEFUN (ipv6_nd_adv_interval_config_option, "Neighbor discovery\n" "Advertisement Interval Option\n") { - struct interface *ifp; - struct zebra_if *zif; - - ifp = (struct interface *) vty->index; - zif = ifp->info; + VTY_DECLVAR_CONTEXT (interface, ifp); + struct zebra_if *zif = ifp->info; zif->rtadv.AdvIntervalOption = 1; @@ -1277,11 +1239,8 @@ DEFUN (no_ipv6_nd_adv_interval_config_option, "Neighbor discovery\n" "Advertisement Interval Option\n") { - struct interface *ifp; - struct zebra_if *zif; - - ifp = (struct interface *) vty->index; - zif = ifp->info; + VTY_DECLVAR_CONTEXT (interface, ifp); + struct zebra_if *zif = ifp->info; zif->rtadv.AdvIntervalOption = 0; @@ -1295,11 +1254,8 @@ DEFUN (ipv6_nd_other_config_flag, "Neighbor discovery\n" "Other statefull configuration flag\n") { - struct interface *ifp; - struct zebra_if *zif; - - ifp = (struct interface *) vty->index; - zif = ifp->info; + VTY_DECLVAR_CONTEXT (interface, ifp); + struct zebra_if *zif = ifp->info; zif->rtadv.AdvOtherConfigFlag = 1; @@ -1314,11 +1270,8 @@ DEFUN (no_ipv6_nd_other_config_flag, "Neighbor discovery\n" "Other statefull configuration flag\n") { - struct interface *ifp; - struct zebra_if *zif; - - ifp = (struct interface *) vty->index; - zif = ifp->info; + VTY_DECLVAR_CONTEXT (interface, ifp); + struct zebra_if *zif = ifp->info; zif->rtadv.AdvOtherConfigFlag = 0; @@ -1367,14 +1320,11 @@ DEFUN (ipv6_nd_prefix, } /* business */ + VTY_DECLVAR_CONTEXT (interface, ifp); + struct zebra_if *zebra_if = ifp->info; int ret; - struct interface *ifp; - struct zebra_if *zebra_if; struct rtadv_prefix rp; - ifp = (struct interface *) vty->index; - zebra_if = ifp->info; - ret = str2prefix_ipv6 (prefix, &rp.prefix); if (!ret) { @@ -1422,16 +1372,12 @@ DEFUN (no_ipv6_nd_prefix, "Do not use prefix for autoconfiguration\n" "Do not use prefix for onlink determination\n") { + VTY_DECLVAR_CONTEXT (interface, ifp); + struct zebra_if *zebra_if = ifp->info; int ret; - struct interface *ifp; - struct zebra_if *zebra_if; struct rtadv_prefix rp; - char *prefix = argv[4]->arg; - ifp = (struct interface *) vty->index; - zebra_if = ifp->info; - ret = str2prefix_ipv6 (prefix, &rp.prefix); if (!ret) { @@ -1461,13 +1407,10 @@ DEFUN (ipv6_nd_router_preference, "Medium default router preference (default)\n") { int idx_high_medium_low = 3; - struct interface *ifp; - struct zebra_if *zif; + VTY_DECLVAR_CONTEXT (interface, ifp); + struct zebra_if *zif = ifp->info; int i = 0; - ifp = (struct interface *) vty->index; - zif = ifp->info; - while (0 != rtadv_pref_strs[i]) { if (strncmp (argv[idx_high_medium_low]->arg, rtadv_pref_strs[i], 1) == 0) @@ -1492,11 +1435,8 @@ DEFUN (no_ipv6_nd_router_preference, "Medium default router preference (default)\n" "Low default router preference\n") { - struct interface *ifp; - struct zebra_if *zif; - - ifp = (struct interface *) vty->index; - zif = ifp->info; + VTY_DECLVAR_CONTEXT (interface, ifp); + struct zebra_if *zif = ifp->info; zif->rtadv.DefaultPreference = RTADV_PREF_MEDIUM; /* Default per RFC4191. */ @@ -1512,7 +1452,7 @@ DEFUN (ipv6_nd_mtu, "MTU in bytes\n") { int idx_number = 3; - struct interface *ifp = (struct interface *) vty->index; + VTY_DECLVAR_CONTEXT (interface, ifp); struct zebra_if *zif = ifp->info; VTY_GET_INTEGER_RANGE ("MTU", zif->rtadv.AdvLinkMTU, argv[idx_number]->arg, 1, 65535); return CMD_SUCCESS; @@ -1527,7 +1467,7 @@ DEFUN (no_ipv6_nd_mtu, "Advertised MTU\n" "MTU in bytes\n") { - struct interface *ifp = (struct interface *) vty->index; + VTY_DECLVAR_CONTEXT (interface, ifp); struct zebra_if *zif = ifp->info; zif->rtadv.AdvLinkMTU = 0; return CMD_SUCCESS; diff --git a/zebra/rtread_getmsg.c b/zebra/rtread_getmsg.c index 0facc1a19f..c6eee75174 100644 --- a/zebra/rtread_getmsg.c +++ b/zebra/rtread_getmsg.c @@ -26,6 +26,7 @@ #include "log.h" #include "if.h" #include "vrf.h" +#include "vty.h" #include "zebra/rib.h" #include "zebra/zserv.h" diff --git a/zebra/test_main.c b/zebra/test_main.c index d3813d7356..2829328546 100644 --- a/zebra/test_main.c +++ b/zebra/test_main.c @@ -125,9 +125,8 @@ DEFUN (test_interface_state, "down\n") { int idx_up_down = 1; - struct interface *ifp; + VTY_DECLVAR_CONTEXT (interface, ifp); - ifp = vty->index; if (ifp->ifindex == IFINDEX_INTERNAL) { ifp->ifindex = ++test_ifindex; @@ -293,6 +292,7 @@ main (int argc, char **argv) /* Vty related initialize. */ signal_init (zebrad.master, array_size(zebra_signals), zebra_signals); cmd_init (1); + vty_config_lockless (); vty_init (zebrad.master); memory_init (); zebra_debug_init (); diff --git a/zebra/zebra_fpm.c b/zebra/zebra_fpm.c index 220fddaf67..b2026c1e0c 100644 --- a/zebra/zebra_fpm.c +++ b/zebra/zebra_fpm.c @@ -141,6 +141,15 @@ typedef enum { } zfpm_state_t; +/* + * Message format to be used to communicate with the FPM. + */ +typedef enum +{ + ZFPM_MSG_FORMAT_NONE, + ZFPM_MSG_FORMAT_NETLINK, + ZFPM_MSG_FORMAT_PROTOBUF, +} zfpm_msg_format_e; /* * Globals. */ @@ -152,10 +161,16 @@ typedef struct zfpm_glob_t_ */ int enabled; + /* + * Message format to be used to communicate with the fpm. + */ + zfpm_msg_format_e message_format; + struct thread_master *master; zfpm_state_t state; + in_addr_t fpm_server; /* * Port on which the FPM is running. */ @@ -865,19 +880,40 @@ zfpm_writes_pending (void) */ static inline int zfpm_encode_route (rib_dest_t *dest, struct rib *rib, char *in_buf, - size_t in_buf_len) + size_t in_buf_len, fpm_msg_type_e *msg_type) { -#ifndef HAVE_NETLINK - return 0; -#else - + size_t len; int cmd; + len = 0; - cmd = rib ? RTM_NEWROUTE : RTM_DELROUTE; + *msg_type = FPM_MSG_TYPE_NONE; - return zfpm_netlink_encode_route (cmd, dest, rib, in_buf, in_buf_len); + switch (zfpm_g->message_format) { + case ZFPM_MSG_FORMAT_PROTOBUF: +#ifdef HAVE_PROTOBUF + len = zfpm_protobuf_encode_route (dest, rib, (uint8_t *) in_buf, + in_buf_len); + *msg_type = FPM_MSG_TYPE_PROTOBUF; +#endif + break; + + case ZFPM_MSG_FORMAT_NETLINK: +#ifdef HAVE_NETLINK + *msg_type = FPM_MSG_TYPE_NETLINK; + cmd = rib ? RTM_NEWROUTE : RTM_DELROUTE; + len = zfpm_netlink_encode_route (cmd, dest, rib, in_buf, in_buf_len); + assert(fpm_msg_align(len) == len); + *msg_type = FPM_MSG_TYPE_NETLINK; #endif /* HAVE_NETLINK */ + break; + + default: + break; + } + + return len; + } /* @@ -885,14 +921,14 @@ zfpm_encode_route (rib_dest_t *dest, struct rib *rib, char *in_buf, * * Returns the rib that is to be sent to the FPM for a given dest. */ -static struct rib * +struct rib * zfpm_route_for_update (rib_dest_t *dest) { struct rib *rib; RIB_DEST_FOREACH_ROUTE (dest, rib) { - if (!CHECK_FLAG (rib->flags, ZEBRA_FLAG_SELECTED)) + if (!CHECK_FLAG (rib->status, RIB_ENTRY_SELECTED_FIB)) continue; return rib; @@ -921,6 +957,7 @@ zfpm_build_updates (void) fpm_msg_hdr_t *hdr; struct rib *rib; int is_add, write_msg; + fpm_msg_type_e msg_type; s = zfpm_g->obuf; @@ -945,7 +982,6 @@ zfpm_build_updates (void) hdr = (fpm_msg_hdr_t *) buf; hdr->version = FPM_PROTO_VERSION; - hdr->msg_type = FPM_MSG_TYPE_NETLINK; data = fpm_msg_data (hdr); @@ -965,11 +1001,13 @@ zfpm_build_updates (void) } if (write_msg) { - data_len = zfpm_encode_route (dest, rib, (char *) data, buf_end - data); + data_len = zfpm_encode_route (dest, rib, (char *) data, buf_end - data, + &msg_type); assert (data_len); if (data_len) { + hdr->msg_type = msg_type; msg_len = fpm_data_len_to_msg_len (data_len); hdr->msg_len = htons (msg_len); stream_forward_endp (s, msg_len); @@ -1129,7 +1167,10 @@ zfpm_connect_cb (struct thread *t) #ifdef HAVE_STRUCT_SOCKADDR_IN_SIN_LEN serv.sin_len = sizeof (struct sockaddr_in); #endif /* HAVE_STRUCT_SOCKADDR_IN_SIN_LEN */ - serv.sin_addr.s_addr = htonl (INADDR_LOOPBACK); + if (!zfpm_g->fpm_server) + serv.sin_addr.s_addr = htonl (INADDR_LOOPBACK); + else + serv.sin_addr.s_addr = (zfpm_g->fpm_server); /* * Connect to the FPM. @@ -1520,6 +1561,134 @@ DEFUN (clear_zebra_fpm_stats, return CMD_SUCCESS; } +/* + * update fpm connection information + */ +DEFUN ( fpm_remote_ip, + fpm_remote_ip_cmd, + "fpm connection ip A.B.C.D port (1-65535)", + "fpm connection remote ip and port\n" + "Remote fpm server ip A.B.C.D\n" + "Enter ip ") +{ + + in_addr_t fpm_server; + uint32_t port_no; + + fpm_server = inet_addr (argv[3]->arg); + if (fpm_server == INADDR_NONE) + return CMD_ERR_INCOMPLETE; + + port_no = atoi (argv[5]->arg); + if (port_no < TCP_MIN_PORT || port_no > TCP_MAX_PORT) + return CMD_ERR_INCOMPLETE; + + zfpm_g->fpm_server = fpm_server; + zfpm_g->fpm_port = port_no; + + + return CMD_SUCCESS; +} + +DEFUN ( no_fpm_remote_ip, + no_fpm_remote_ip_cmd, + "no fpm connection ip A.B.C.D port (1-65535)", + "fpm connection remote ip and port\n" + "Connection\n" + "Remote fpm server ip A.B.C.D\n" + "Enter ip ") +{ + if (zfpm_g->fpm_server != inet_addr (argv[4]->arg) || + zfpm_g->fpm_port != atoi (argv[6]->arg)) + return CMD_ERR_NO_MATCH; + + zfpm_g->fpm_server = FPM_DEFAULT_IP; + zfpm_g->fpm_port = FPM_DEFAULT_PORT; + + return CMD_SUCCESS; +} + + +/* + * zfpm_init_message_format + */ +static inline void +zfpm_init_message_format (const char *format) +{ + int have_netlink, have_protobuf; + + have_netlink = have_protobuf = 0; + +#ifdef HAVE_NETLINK + have_netlink = 1; +#endif + +#ifdef HAVE_PROTOBUF + have_protobuf = 1; +#endif + + zfpm_g->message_format = ZFPM_MSG_FORMAT_NONE; + + if (!format) + { + if (have_netlink) + { + zfpm_g->message_format = ZFPM_MSG_FORMAT_NETLINK; + } + else if (have_protobuf) + { + zfpm_g->message_format = ZFPM_MSG_FORMAT_PROTOBUF; + } + return; + } + + if (!strcmp ("netlink", format)) + { + if (!have_netlink) + { + zlog_err ("FPM netlink message format is not available"); + return; + } + zfpm_g->message_format = ZFPM_MSG_FORMAT_NETLINK; + return; + } + + if (!strcmp ("protobuf", format)) + { + if (!have_protobuf) + { + zlog_err ("FPM protobuf message format is not available"); + return; + } + zfpm_g->message_format = ZFPM_MSG_FORMAT_PROTOBUF; + return; + } + + zlog_warn ("Unknown fpm format '%s'", format); +} + +/** + * fpm_remote_srv_write + * + * Module to write remote fpm connection + * + * Returns ZERO on success. + */ + +int fpm_remote_srv_write (struct vty *vty ) +{ + struct in_addr in; + + in.s_addr = zfpm_g->fpm_server; + + if (zfpm_g->fpm_server != FPM_DEFAULT_IP || + zfpm_g->fpm_port != FPM_DEFAULT_PORT) + vty_out (vty,"fpm connection ip %s port %d%s", inet_ntoa (in),zfpm_g->fpm_port,VTY_NEWLINE); + + return 0; +} + + /** * zfpm_init * @@ -1527,11 +1696,13 @@ DEFUN (clear_zebra_fpm_stats, * * @param[in] port port at which FPM is running. * @param[in] enable TRUE if the zebra FPM module should be enabled + * @param[in] format to use to talk to the FPM. Can be 'netink' or 'protobuf'. * * Returns TRUE on success. */ int -zfpm_init (struct thread_master *master, int enable, uint16_t port) +zfpm_init (struct thread_master *master, int enable, uint16_t port, + const char *format) { static int initialized = 0; @@ -1547,27 +1718,32 @@ zfpm_init (struct thread_master *master, int enable, uint16_t port) zfpm_g->sock = -1; zfpm_g->state = ZFPM_STATE_IDLE; - /* - * Netlink must currently be available for the Zebra-FPM interface - * to be enabled. - */ -#ifndef HAVE_NETLINK - enable = 0; -#endif - - zfpm_g->enabled = enable; - zfpm_stats_init (&zfpm_g->stats); zfpm_stats_init (&zfpm_g->last_ivl_stats); zfpm_stats_init (&zfpm_g->cumulative_stats); install_element (ENABLE_NODE, &show_zebra_fpm_stats_cmd); install_element (ENABLE_NODE, &clear_zebra_fpm_stats_cmd); + install_element (CONFIG_NODE, &fpm_remote_ip_cmd); + install_element (CONFIG_NODE, &no_fpm_remote_ip_cmd); + + zfpm_init_message_format(format); + + /* + * Disable FPM interface if no suitable format is available. + */ + if (zfpm_g->message_format == ZFPM_MSG_FORMAT_NONE) + enable = 0; + + zfpm_g->enabled = enable; if (!enable) { return 1; } + if (!zfpm_g->fpm_server) + zfpm_g->fpm_server = FPM_DEFAULT_IP; + if (!port) port = FPM_DEFAULT_PORT; diff --git a/zebra/zebra_fpm.h b/zebra/zebra_fpm.h index 44dec02868..fdb069965b 100644 --- a/zebra/zebra_fpm.h +++ b/zebra/zebra_fpm.h @@ -28,7 +28,9 @@ /* * Externs. */ -extern int zfpm_init (struct thread_master *master, int enable, uint16_t port); +extern int zfpm_init (struct thread_master *master, int enable, uint16_t port, + const char *message_format); extern void zfpm_trigger_update (struct route_node *rn, const char *reason); +extern int fpm_remote_srv_write (struct vty *vty); #endif /* _ZEBRA_FPM_H */ diff --git a/zebra/zebra_fpm_dt.c b/zebra/zebra_fpm_dt.c new file mode 100644 index 0000000000..bd171c89b2 --- /dev/null +++ b/zebra/zebra_fpm_dt.c @@ -0,0 +1,275 @@ +/* + * zebra_fpm_dt.c + * + * @copyright Copyright (C) 2016 Sproute Networks, Inc. + * + * @author Avneesh Sachdev + * + * This file is part of GNU Zebra. + * + * GNU Zebra is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * GNU Zebra is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU Zebra; see the file COPYING. If not, write to the Free + * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ + +/* + * Developer tests for the zebra code that interfaces with the + * forwarding plane manager. + * + * The functions here are built into developer builds of zebra (when + * DEV_BUILD is defined), and can be called via the 'invoke' cli + * command. + * + * For example: + * + * # invoke zebra function zfpm_dt_benchmark_protobuf_encode 100000 + * + */ + +#include +#include "log.h" +#include "vrf.h" + +#include "zebra/rib.h" + +#include "zebra_fpm_private.h" + +#include "qpb/qpb_allocator.h" +#include "qpb/linear_allocator.h" + +#include "qpb/qpb.h" +#include "fpm/fpm.pb-c.h" + +/* + * Externs. + */ +extern int zfpm_dt_benchmark_netlink_encode (int argc, const char **argv); +extern int zfpm_dt_benchmark_protobuf_encode (int argc, const char **argv); +extern int zfpm_dt_benchmark_protobuf_decode (int argc, const char **argv); + +/* + * zfpm_dt_find_route + * + * Selects a suitable rib destination for fpm interface tests. + */ +static int +zfpm_dt_find_route (rib_dest_t **dest_p, struct rib **rib_p) +{ + struct route_node *rnode; + route_table_iter_t iter; + struct route_table *table; + rib_dest_t *dest; + struct rib *rib; + int ret; + + table = zebra_vrf_table (AFI_IP, SAFI_UNICAST, VRF_DEFAULT); + if (!table) + return 0; + + route_table_iter_init(&iter, table); + while ((rnode = route_table_iter_next(&iter))) + { + dest = rib_dest_from_rnode (rnode); + + if (!dest) + continue; + + rib = zfpm_route_for_update(dest); + if (!rib) + continue; + + if (rib->nexthop_active_num <= 0) + continue; + + *dest_p = dest; + *rib_p = rib; + ret = 1; + goto done; + } + + ret = 0; + + done: + route_table_iter_cleanup(&iter); + return ret; +} +#ifdef HAVE_NETLINK + +/* + * zfpm_dt_benchmark_netlink_encode + */ +int +zfpm_dt_benchmark_netlink_encode (int argc, const char **argv) +{ + int times, i, len; + rib_dest_t *dest; + struct rib *rib; + char buf[4096]; + + times = 100000; + if (argc > 0) { + times = atoi(argv[0]); + } + + if (!zfpm_dt_find_route(&dest, &rib)) { + return 1; + } + + for (i = 0; i < times; i++) { + len = zfpm_netlink_encode_route(RTM_NEWROUTE, dest, rib, buf, sizeof(buf)); + if (len <= 0) { + return 2; + } + } + return 0; +} + +#endif /* HAVE_NETLINK */ + +#ifdef HAVE_PROTOBUF + +/* + * zfpm_dt_benchmark_protobuf_encode + */ +int +zfpm_dt_benchmark_protobuf_encode (int argc, const char **argv) +{ + int times, i, len; + rib_dest_t *dest; + struct rib *rib; + uint8_t buf[4096]; + + times = 100000; + if (argc > 0) { + times = atoi(argv[0]); + } + + if (!zfpm_dt_find_route(&dest, &rib)) { + return 1; + } + + for (i = 0; i < times; i++) { + len = zfpm_protobuf_encode_route(dest, rib, buf, sizeof(buf)); + if (len <= 0) { + return 2; + } + } + return 0; +} + +/* + * zfpm_dt_log_fpm_message + */ +static void +zfpm_dt_log_fpm_message (Fpm__Message *msg) +{ + Fpm__AddRoute *add_route; + Fpm__Nexthop *nexthop; + struct prefix prefix; + u_char family, nh_family; + uint if_index; + char *if_name; + size_t i; + char buf[INET6_ADDRSTRLEN]; + union g_addr nh_addr; + + if (msg->type != FPM__MESSAGE__TYPE__ADD_ROUTE) + return; + + zfpm_debug ("Add route message"); + add_route = msg->add_route; + + if (!qpb_address_family_get (add_route->address_family, &family)) + return; + + if (!qpb_l3_prefix_get (add_route->key->prefix, family, &prefix)) + return; + + zfpm_debug ("Vrf id: %d, Prefix: %s/%d, Metric: %d", add_route->vrf_id, + inet_ntop (family, &prefix.u.prefix, buf, sizeof (buf)), + prefix.prefixlen, add_route->metric); + + /* + * Go over nexthops. + */ + for (i = 0; i < add_route->n_nexthops; i++) + { + nexthop = add_route->nexthops[i]; + if (!qpb_if_identifier_get (nexthop->if_id, &if_index, &if_name)) + continue; + + if (nexthop->address) + qpb_l3_address_get (nexthop->address, &nh_family, &nh_addr); + + zfpm_debug ("Nexthop - if_index: %d (%s), gateway: %s, ", if_index, + if_name ? if_name : "name not specified", + nexthop->address ? inet_ntoa (nh_addr.ipv4) : "None"); + } +} + +/* + * zfpm_dt_benchmark_protobuf_decode + */ +int +zfpm_dt_benchmark_protobuf_decode (int argc, const char **argv) +{ + int times, i, len; + rib_dest_t *dest; + struct rib *rib; + uint8_t msg_buf[4096]; + QPB_DECLARE_STACK_ALLOCATOR (allocator, 8192); + Fpm__Message *fpm_msg; + + QPB_INIT_STACK_ALLOCATOR (allocator); + + times = 100000; + if (argc > 0) + times = atoi(argv[0]); + + if (!zfpm_dt_find_route (&dest, &rib)) + return 1; + + /* + * Encode the route into the message buffer once only. + */ + len = zfpm_protobuf_encode_route (dest, rib, msg_buf, sizeof (msg_buf)); + if (len <= 0) + return 2; + + // Decode once, and display the decoded message + fpm_msg = fpm__message__unpack(&allocator, len, msg_buf); + + if (fpm_msg) + { + zfpm_dt_log_fpm_message(fpm_msg); + QPB_RESET_STACK_ALLOCATOR (allocator); + } + + /* + * Decode encoded message the specified number of times. + */ + for (i = 0; i < times; i++) + { + fpm_msg = fpm__message__unpack (&allocator, len, msg_buf); + + if (!fpm_msg) + return 3; + + // fpm__message__free_unpacked(msg, NULL); + QPB_RESET_STACK_ALLOCATOR (allocator); + } + return 0; +} + +#endif /* HAVE_PROTOBUF */ diff --git a/zebra/zebra_fpm_private.h b/zebra/zebra_fpm_private.h index 809a70a445..1c4fd4c22f 100644 --- a/zebra/zebra_fpm_private.h +++ b/zebra/zebra_fpm_private.h @@ -53,4 +53,9 @@ extern int zfpm_netlink_encode_route (int cmd, rib_dest_t *dest, struct rib *rib, char *in_buf, size_t in_buf_len); +extern int +zfpm_protobuf_encode_route (rib_dest_t *dest, struct rib *rib, + uint8_t *in_buf, size_t in_buf_len); + +extern struct rib *zfpm_route_for_update (rib_dest_t *dest); #endif /* _ZEBRA_FPM_PRIVATE_H */ diff --git a/zebra/zebra_fpm_protobuf.c b/zebra/zebra_fpm_protobuf.c new file mode 100644 index 0000000000..702c355f1a --- /dev/null +++ b/zebra/zebra_fpm_protobuf.c @@ -0,0 +1,311 @@ +/* + * zebra_fpm_protobuf.c + * + * @copyright Copyright (C) 2016 Sproute Networks, Inc. + * + * @author Avneesh Sachdev + * + * This file is part of Quagga. + * + * Quagga is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * Quagga is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Quagga; see the file COPYING. If not, write to the Free + * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#include + +#include "log.h" +#include "rib.h" +#include "zserv.h" +#include "zebra_vrf.h" + +#include "qpb/qpb.pb-c.h" +#include "qpb/qpb.h" +#include "qpb/qpb_allocator.h" +#include "qpb/linear_allocator.h" +#include "fpm/fpm_pb.h" + +#include "zebra_fpm_private.h" + +/* + * create_delete_route_message + */ +static Fpm__DeleteRoute * +create_delete_route_message (qpb_allocator_t *allocator, rib_dest_t *dest, + struct rib *rib) +{ + Fpm__DeleteRoute *msg; + + msg = QPB_ALLOC(allocator, typeof(*msg)); + if (!msg) { + assert(0); + return NULL; + } + + fpm__delete_route__init(msg); + msg->vrf_id = rib_dest_vrf(dest)->vrf_id; + + qpb_address_family_set(&msg->address_family, rib_dest_af(dest)); + + /* + * XXX Hardcode subaddress family for now. + */ + msg->sub_address_family = QPB__SUB_ADDRESS_FAMILY__UNICAST; + msg->key = fpm_route_key_create (allocator, rib_dest_prefix(dest)); + if (!msg->key) { + assert(0); + return NULL; + } + + return msg; +} + +/* + * add_nexthop + */ +static inline int +add_nexthop (qpb_allocator_t *allocator, Fpm__AddRoute *msg, rib_dest_t *dest, + struct nexthop *nexthop) +{ + uint32_t if_index; + union g_addr *gateway, *src; + + gateway = src = NULL; + + if_index = nexthop->ifindex; + + if (nexthop->type == NEXTHOP_TYPE_IPV4 + || nexthop->type == NEXTHOP_TYPE_IPV4_IFINDEX) + { + gateway = &nexthop->gate; + if (nexthop->src.ipv4.s_addr) + src = &nexthop->src; + } + + if (nexthop->type == NEXTHOP_TYPE_IPV6 + || nexthop->type == NEXTHOP_TYPE_IPV6_IFINDEX) + { + gateway = &nexthop->gate; + } + + if (nexthop->type == NEXTHOP_TYPE_IFINDEX) + { + if (nexthop->src.ipv4.s_addr) + src = &nexthop->src; + } + + if (!gateway && if_index == 0) + return 0; + + /* + * We have a valid nexthop. + */ + { + Fpm__Nexthop *pb_nh; + pb_nh = QPB_ALLOC(allocator, typeof(*pb_nh)); + if (!pb_nh) { + assert(0); + return 0; + } + + fpm__nexthop__init(pb_nh); + + if (if_index != 0) { + pb_nh->if_id = qpb_if_identifier_create (allocator, if_index); + } + + if (gateway) { + pb_nh->address = qpb_l3_address_create (allocator, gateway, + rib_dest_af(dest)); + } + + msg->nexthops[msg->n_nexthops++] = pb_nh; + } + + // TODO: Use src. + + return 1; +} + +/* + * create_add_route_message + */ +static Fpm__AddRoute * +create_add_route_message (qpb_allocator_t *allocator, rib_dest_t *dest, + struct rib *rib) +{ + Fpm__AddRoute *msg; + int discard; + struct nexthop *nexthop, *tnexthop; + int recursing; + uint num_nhs, u; + struct nexthop *nexthops[MAX (MULTIPATH_NUM, 64)]; + + msg = QPB_ALLOC(allocator, typeof(*msg)); + if (!msg) { + assert(0); + return NULL; + } + + fpm__add_route__init(msg); + + msg->vrf_id = rib_dest_vrf(dest)->vrf_id; + + qpb_address_family_set (&msg->address_family, rib_dest_af(dest)); + + /* + * XXX Hardcode subaddress family for now. + */ + msg->sub_address_family = QPB__SUB_ADDRESS_FAMILY__UNICAST; + msg->key = fpm_route_key_create (allocator, rib_dest_prefix(dest)); + qpb_protocol_set (&msg->protocol, rib->type); + + if ((rib->flags & ZEBRA_FLAG_BLACKHOLE) || (rib->flags & ZEBRA_FLAG_REJECT)) + discard = 1; + else + discard = 0; + + if (discard) + { + if (rib->flags & ZEBRA_FLAG_BLACKHOLE) { + msg->route_type = FPM__ROUTE_TYPE__BLACKHOLE; + } else if (rib->flags & ZEBRA_FLAG_REJECT) { + msg->route_type = FPM__ROUTE_TYPE__UNREACHABLE; + } else { + assert (0); + } + return msg; + } + else { + msg->route_type = FPM__ROUTE_TYPE__NORMAL; + } + + msg->metric = rib->metric; + + /* + * Figure out the set of nexthops to be added to the message. + */ + num_nhs = 0; + for (ALL_NEXTHOPS_RO (rib->nexthop, nexthop, tnexthop, recursing)) + { + if (MULTIPATH_NUM != 0 && num_nhs >= MULTIPATH_NUM) + break; + + if (num_nhs >= ZEBRA_NUM_OF(nexthops)) + break; + + if (CHECK_FLAG (nexthop->flags, NEXTHOP_FLAG_RECURSIVE)) + continue; + + if (!CHECK_FLAG (nexthop->flags, NEXTHOP_FLAG_ACTIVE)) + continue; + + nexthops[num_nhs] = nexthop; + num_nhs++; + } + + if (!num_nhs) { + zfpm_debug ("netlink_encode_route(): No useful nexthop."); + assert(0); + return NULL; + } + + /* + * And add them to the message. + */ + if (!(msg->nexthops = qpb_alloc_ptr_array(allocator, num_nhs))) { + assert(0); + return NULL; + } + + msg->n_nexthops = 0; + for (u = 0; u < num_nhs; u++) { + if (!add_nexthop(allocator, msg, dest, nexthops[u])) { + assert(0); + return NULL; + } + } + + assert(msg->n_nexthops == num_nhs); + + return msg; +} + +/* + * create_route_message + */ +static Fpm__Message * +create_route_message (qpb_allocator_t *allocator, rib_dest_t *dest, + struct rib *rib) +{ + Fpm__Message *msg; + + msg = QPB_ALLOC(allocator, typeof(*msg)); + if (!msg) { + assert(0); + return NULL; + } + + fpm__message__init(msg); + + if (!rib) { + msg->type = FPM__MESSAGE__TYPE__DELETE_ROUTE; + msg->delete_route = create_delete_route_message(allocator, dest, rib); + if (!msg->delete_route) { + assert(0); + return NULL; + } + return msg; + } + + msg->type = FPM__MESSAGE__TYPE__ADD_ROUTE; + msg->add_route = create_add_route_message(allocator, dest, rib); + if (!msg->add_route) { + assert(0); + return NULL; + } + + return msg; +} + +/* + * zfpm_protobuf_encode_route + * + * Create a protobuf message corresponding to the given route in the + * given buffer space. + * + * Returns the number of bytes written to the buffer. 0 or a negative + * value indicates an error. + */ +int +zfpm_protobuf_encode_route (rib_dest_t *dest, struct rib *rib, + uint8_t *in_buf, size_t in_buf_len) +{ + Fpm__Message *msg; + QPB_DECLARE_STACK_ALLOCATOR (allocator, 4096); + size_t len; + + QPB_INIT_STACK_ALLOCATOR (allocator); + + msg = create_route_message(&allocator, dest, rib); + if (!msg) { + assert(0); + return 0; + } + + len = fpm__message__pack(msg, (uint8_t *) in_buf); + assert(len <= in_buf_len); + + QPB_RESET_STACK_ALLOCATOR (allocator); + return len; +} diff --git a/zebra/zebra_mpls.c b/zebra/zebra_mpls.c new file mode 100644 index 0000000000..15e5c330e4 --- /dev/null +++ b/zebra/zebra_mpls.c @@ -0,0 +1,1917 @@ +/* Zebra MPLS code + * Copyright (C) 2013 Cumulus Networks, Inc. + * + * This file is part of GNU Zebra. + * + * GNU Zebra is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * GNU Zebra is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU Zebra; see the file COPYING. If not, write to the Free + * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ + +#include + +#include "prefix.h" +#include "table.h" +#include "memory.h" +#include "str.h" +#include "command.h" +#include "if.h" +#include "log.h" +#include "sockunion.h" +#include "linklist.h" +#include "thread.h" +#include "workqueue.h" +#include "prefix.h" +#include "routemap.h" +#include "stream.h" +#include "nexthop.h" +#include "lib/json.h" + +#include "zebra/rib.h" +#include "zebra/rt.h" +#include "zebra/zserv.h" +#include "zebra/redistribute.h" +#include "zebra/debug.h" +#include "zebra/zebra_memory.h" +#include "zebra/zebra_vrf.h" +#include "zebra/zebra_mpls.h" + +DEFINE_MTYPE_STATIC(ZEBRA, LSP, "MPLS LSP object") +DEFINE_MTYPE_STATIC(ZEBRA, SLSP, "MPLS static LSP config") +DEFINE_MTYPE_STATIC(ZEBRA, NHLFE, "MPLS nexthop object") +DEFINE_MTYPE_STATIC(ZEBRA, SNHLFE, "MPLS static nexthop object") +DEFINE_MTYPE_STATIC(ZEBRA, SNHLFE_IFNAME, "MPLS static nexthop ifname") + +int mpls_enabled; + +/* Default rtm_table for all clients */ +extern struct zebra_t zebrad; + +/* static function declarations */ +static unsigned int +label_hash (void *p); +static int +label_cmp (const void *p1, const void *p2); +static int +nhlfe_nexthop_active_ipv4 (zebra_nhlfe_t *nhlfe, struct nexthop *nexthop); +static int +nhlfe_nexthop_active_ipv6 (zebra_nhlfe_t *nhlfe, struct nexthop *nexthop); +static int +nhlfe_nexthop_active (zebra_nhlfe_t *nhlfe); +static void +lsp_select_best_nhlfe (zebra_lsp_t *lsp); +static void +lsp_uninstall_from_kernel (struct hash_backet *backet, void *ctxt); +static void +lsp_schedule (struct hash_backet *backet, void *ctxt); +static wq_item_status +lsp_process (struct work_queue *wq, void *data); +static void +lsp_processq_del (struct work_queue *wq, void *data); +static void +lsp_processq_complete (struct work_queue *wq); +static int +lsp_processq_add (zebra_lsp_t *lsp); +static void * +lsp_alloc (void *p); +static char * +nhlfe2str (zebra_nhlfe_t *nhlfe, char *buf, int size); +static int +nhlfe_nhop_match (zebra_nhlfe_t *nhlfe, enum nexthop_types_t gtype, + union g_addr *gate, char *ifname, ifindex_t ifindex); +static zebra_nhlfe_t * +nhlfe_find (zebra_lsp_t *lsp, enum lsp_types_t lsp_type, + enum nexthop_types_t gtype, union g_addr *gate, + char *ifname, ifindex_t ifindex); +static zebra_nhlfe_t * +nhlfe_add (zebra_lsp_t *lsp, enum lsp_types_t lsp_type, + enum nexthop_types_t gtype, union g_addr *gate, + char *ifname, ifindex_t ifindex, mpls_label_t out_label); +static int +nhlfe_del (zebra_nhlfe_t *snhlfe); +static int +mpls_lsp_uninstall_all (struct hash *lsp_table, zebra_lsp_t *lsp, + enum lsp_types_t type); +static int +mpls_static_lsp_uninstall_all (struct zebra_vrf *zvrf, mpls_label_t in_label); +static void +nhlfe_print (zebra_nhlfe_t *nhlfe, struct vty *vty); +static void +lsp_print (zebra_lsp_t *lsp, void *ctxt); +static void * +slsp_alloc (void *p); +static int +snhlfe_match (zebra_snhlfe_t *snhlfe, enum nexthop_types_t gtype, + union g_addr *gate, char *ifname, ifindex_t ifindex); +static zebra_snhlfe_t * +snhlfe_find (zebra_slsp_t *slsp, enum nexthop_types_t gtype, + union g_addr *gate, char *ifname, ifindex_t ifindex); +static zebra_snhlfe_t * +snhlfe_add (zebra_slsp_t *slsp, enum nexthop_types_t gtype, + union g_addr *gate, char *ifname, ifindex_t ifindex, + mpls_label_t out_label); +static int +snhlfe_del (zebra_snhlfe_t *snhlfe); +static int +snhlfe_del_all (zebra_slsp_t *slsp); +static char * +snhlfe2str (zebra_snhlfe_t *snhlfe, char *buf, int size); +static void +mpls_processq_init (struct zebra_t *zebra); + + + + +/* Static functions */ + +/* + * Hash function for label. + */ +static unsigned int +label_hash (void *p) +{ + const zebra_ile_t *ile = p; + + return (jhash_1word(ile->in_label, 0)); +} + +/* + * Compare 2 LSP hash entries based on in-label. + */ +static int +label_cmp (const void *p1, const void *p2) +{ + const zebra_ile_t *ile1 = p1; + const zebra_ile_t *ile2 = p2; + + return (ile1->in_label == ile2->in_label); +} + +/* + * Check if an IPv4 nexthop for a NHLFE is active. Update nexthop based on + * the passed flag. + * NOTE: Looking only for connected routes right now. + */ +static int +nhlfe_nexthop_active_ipv4 (zebra_nhlfe_t *nhlfe, struct nexthop *nexthop) +{ + struct route_table *table; + struct prefix_ipv4 p; + struct route_node *rn; + struct rib *match; + + table = zebra_vrf_table (AFI_IP, SAFI_UNICAST, VRF_DEFAULT); + if (!table) + return 0; + + /* Lookup nexthop in IPv4 routing table. */ + memset (&p, 0, sizeof (struct prefix_ipv4)); + p.family = AF_INET; + p.prefixlen = IPV4_MAX_PREFIXLEN; + p.prefix = nexthop->gate.ipv4; + + rn = route_node_match (table, (struct prefix *) &p); + if (!rn) + return 0; + + route_unlock_node (rn); + + /* Locate a valid connected route. */ + RNODE_FOREACH_RIB (rn, match) + { + if ((match->type == ZEBRA_ROUTE_CONNECT) && + !CHECK_FLAG (match->status, RIB_ENTRY_REMOVED) && + CHECK_FLAG (match->flags, ZEBRA_FLAG_SELECTED)) + break; + } + + if (!match || !match->nexthop) + return 0; + + nexthop->ifindex = match->nexthop->ifindex; + return 1; +} + + +/* + * Check if an IPv6 nexthop for a NHLFE is active. Update nexthop based on + * the passed flag. + * NOTE: Looking only for connected routes right now. + */ +static int +nhlfe_nexthop_active_ipv6 (zebra_nhlfe_t *nhlfe, struct nexthop *nexthop) +{ + struct route_table *table; + struct prefix_ipv6 p; + struct route_node *rn; + struct rib *match; + + table = zebra_vrf_table (AFI_IP6, SAFI_UNICAST, VRF_DEFAULT); + if (!table) + return 0; + + /* Lookup nexthop in IPv6 routing table. */ + memset (&p, 0, sizeof (struct prefix_ipv6)); + p.family = AF_INET6; + p.prefixlen = IPV6_MAX_PREFIXLEN; + p.prefix = nexthop->gate.ipv6; + + rn = route_node_match (table, (struct prefix *) &p); + if (!rn) + return 0; + + route_unlock_node (rn); + + /* Locate a valid connected route. */ + RNODE_FOREACH_RIB (rn, match) + { + if ((match->type == ZEBRA_ROUTE_CONNECT) && + !CHECK_FLAG (match->status, RIB_ENTRY_REMOVED) && + CHECK_FLAG (match->flags, ZEBRA_FLAG_SELECTED)) + break; + } + + if (!match || !match->nexthop) + return 0; + + nexthop->ifindex = match->nexthop->ifindex; + return 1; +} + + +/* + * Check the nexthop reachability for a NHLFE and return if valid (reachable) + * or not. + * NOTE: Each NHLFE points to only 1 nexthop. + */ +static int +nhlfe_nexthop_active (zebra_nhlfe_t *nhlfe) +{ + struct nexthop *nexthop; + struct interface *ifp; + + nexthop = nhlfe->nexthop; + if (!nexthop) // unexpected + return 0; + + /* Check on nexthop based on type. */ + switch (nexthop->type) + { + case NEXTHOP_TYPE_IPV4: + if (nhlfe_nexthop_active_ipv4 (nhlfe, nexthop)) + SET_FLAG (nexthop->flags, NEXTHOP_FLAG_ACTIVE); + else + UNSET_FLAG (nexthop->flags, NEXTHOP_FLAG_ACTIVE); + break; + + case NEXTHOP_TYPE_IPV6: + if (nhlfe_nexthop_active_ipv6 (nhlfe, nexthop)) + SET_FLAG (nexthop->flags, NEXTHOP_FLAG_ACTIVE); + else + UNSET_FLAG (nexthop->flags, NEXTHOP_FLAG_ACTIVE); + break; + + case NEXTHOP_TYPE_IPV6_IFINDEX: + if (IN6_IS_ADDR_LINKLOCAL (&nexthop->gate.ipv6)) + { + ifp = if_lookup_by_index (nexthop->ifindex); + if (ifp && if_is_operative(ifp)) + SET_FLAG (nexthop->flags, NEXTHOP_FLAG_ACTIVE); + else + UNSET_FLAG (nexthop->flags, NEXTHOP_FLAG_ACTIVE); + } + else + { + if (nhlfe_nexthop_active_ipv6 (nhlfe, nexthop)) + SET_FLAG (nexthop->flags, NEXTHOP_FLAG_ACTIVE); + else + UNSET_FLAG (nexthop->flags, NEXTHOP_FLAG_ACTIVE); + } + break; + + default: + break; + } + + return CHECK_FLAG (nexthop->flags, NEXTHOP_FLAG_ACTIVE); +} + +/* + * Walk through NHLFEs for a LSP forwarding entry, verify nexthop + * reachability and select the best. Multipath entries are also + * marked. This is invoked when an LSP scheduled for processing (due + * to some change) is examined. + */ +static void +lsp_select_best_nhlfe (zebra_lsp_t *lsp) +{ + zebra_nhlfe_t *nhlfe; + zebra_nhlfe_t *best; + struct nexthop *nexthop; + int changed = 0; + + if (!lsp) + return; + + best = NULL; + lsp->num_ecmp = 0; + UNSET_FLAG (lsp->flags, LSP_FLAG_CHANGED); + + /* + * First compute the best path, after checking nexthop status. We are only + * concerned with non-deleted NHLFEs. + */ + for (nhlfe = lsp->nhlfe_list; nhlfe; nhlfe = nhlfe->next) + { + /* Clear selection flags. */ + UNSET_FLAG (nhlfe->flags, + (NHLFE_FLAG_SELECTED | NHLFE_FLAG_MULTIPATH)); + + if (!CHECK_FLAG (nhlfe->flags, NHLFE_FLAG_DELETED) && + nhlfe_nexthop_active (nhlfe)) + { + if (!best || (nhlfe->distance < best->distance)) + best = nhlfe; + } + } + + lsp->best_nhlfe = best; + if (!lsp->best_nhlfe) + return; + + /* Mark best NHLFE as selected. */ + SET_FLAG (lsp->best_nhlfe->flags, NHLFE_FLAG_SELECTED); + + /* + * If best path exists, see if there is ECMP. While doing this, note if a + * new (uninstalled) NHLFE has been selected, an installed entry that is + * still selected has a change or an installed entry is to be removed. + */ + for (nhlfe = lsp->nhlfe_list; nhlfe; nhlfe = nhlfe->next) + { + int nh_chg, nh_sel, nh_inst; + + nexthop = nhlfe->nexthop; + if (!nexthop) // unexpected + continue; + + if (!CHECK_FLAG (nhlfe->flags, NHLFE_FLAG_DELETED) && + CHECK_FLAG (nexthop->flags, NEXTHOP_FLAG_ACTIVE) && + (nhlfe->distance == lsp->best_nhlfe->distance)) + { + SET_FLAG (nhlfe->flags, NHLFE_FLAG_SELECTED); + SET_FLAG (nhlfe->flags, NHLFE_FLAG_MULTIPATH); + lsp->num_ecmp++; + } + + if (CHECK_FLAG (lsp->flags, LSP_FLAG_INSTALLED) && + !changed) + { + nh_chg = CHECK_FLAG (nhlfe->flags, NHLFE_FLAG_CHANGED); + nh_sel = CHECK_FLAG (nhlfe->flags, NHLFE_FLAG_SELECTED); + nh_inst = CHECK_FLAG (nhlfe->flags, NHLFE_FLAG_INSTALLED); + + if ((nh_sel && !nh_inst) || + (nh_sel && nh_inst && nh_chg) || + (nh_inst && !nh_sel)) + changed = 1; + } + + /* We have finished examining, clear changed flag. */ + UNSET_FLAG (nhlfe->flags, NHLFE_FLAG_CHANGED); + } + + if (changed) + SET_FLAG (lsp->flags, LSP_FLAG_CHANGED); +} + +/* + * Delete LSP forwarding entry from kernel, if installed. Called upon + * process exit. + */ +static void +lsp_uninstall_from_kernel (struct hash_backet *backet, void *ctxt) +{ + zebra_lsp_t *lsp; + + lsp = (zebra_lsp_t *) backet->data; + if (CHECK_FLAG(lsp->flags, LSP_FLAG_INSTALLED)) + kernel_del_lsp (lsp); +} + +/* + * Schedule LSP forwarding entry for processing. Called upon changes + * that may impact LSPs such as nexthop / connected route changes. + */ +static void +lsp_schedule (struct hash_backet *backet, void *ctxt) +{ + zebra_lsp_t *lsp; + + lsp = (zebra_lsp_t *) backet->data; + lsp_processq_add (lsp); +} + +/* + * Process a LSP entry that is in the queue. Recalculate best NHLFE and + * any multipaths and update or delete from the kernel, as needed. + */ +static wq_item_status +lsp_process (struct work_queue *wq, void *data) +{ + zebra_lsp_t *lsp; + zebra_nhlfe_t *oldbest, *newbest; + char buf[BUFSIZ], buf2[BUFSIZ]; + + lsp = (zebra_lsp_t *)data; + if (!lsp) // unexpected + return WQ_SUCCESS; + + oldbest = lsp->best_nhlfe; + + /* Select best NHLFE(s) */ + lsp_select_best_nhlfe (lsp); + + newbest = lsp->best_nhlfe; + + if (IS_ZEBRA_DEBUG_MPLS) + { + if (oldbest) + nhlfe2str (oldbest, buf, BUFSIZ); + if (newbest) + nhlfe2str (newbest, buf2, BUFSIZ); + zlog_debug ("Process LSP in-label %u oldbest %s newbest %s " + "flags 0x%x ecmp# %d", + lsp->ile.in_label, oldbest ? buf : "NULL", + newbest ? buf2 : "NULL", lsp->flags, lsp->num_ecmp); + } + + if (!CHECK_FLAG (lsp->flags, LSP_FLAG_INSTALLED)) + { + /* Not already installed */ + if (newbest) + kernel_add_lsp (lsp); + } + else + { + /* Installed, may need an update and/or delete. */ + if (!newbest) + kernel_del_lsp (lsp); + else if (CHECK_FLAG (lsp->flags, LSP_FLAG_CHANGED)) + kernel_upd_lsp (lsp); + } + + return WQ_SUCCESS; +} + + +/* + * Callback upon processing completion of a LSP forwarding entry. + */ +static void +lsp_processq_del (struct work_queue *wq, void *data) +{ + struct zebra_vrf *zvrf; + zebra_lsp_t *lsp; + struct hash *lsp_table; + zebra_nhlfe_t *nhlfe, *nhlfe_next; + + zvrf = vrf_info_lookup(VRF_DEFAULT); + assert (zvrf); + + lsp_table = zvrf->lsp_table; + if (!lsp_table) // unexpected + return; + + lsp = (zebra_lsp_t *)data; + if (!lsp) // unexpected + return; + + /* Clear flag, remove any NHLFEs marked for deletion. If no NHLFEs exist, + * delete LSP entry also. + */ + UNSET_FLAG (lsp->flags, LSP_FLAG_SCHEDULED); + + for (nhlfe = lsp->nhlfe_list; nhlfe; nhlfe = nhlfe_next) + { + nhlfe_next = nhlfe->next; + if (CHECK_FLAG(nhlfe->flags, NHLFE_FLAG_DELETED)) + nhlfe_del (nhlfe); + } + + if (!lsp->nhlfe_list) + { + if (IS_ZEBRA_DEBUG_MPLS) + zlog_debug ("Free LSP in-label %u flags 0x%x", + lsp->ile.in_label, lsp->flags); + + lsp = hash_release(lsp_table, &lsp->ile); + if (lsp) + XFREE(MTYPE_LSP, lsp); + } +} + +/* + * Callback upon finishing the processing of all scheduled + * LSP forwarding entries. + */ +static void +lsp_processq_complete (struct work_queue *wq) +{ + /* Nothing to do for now. */ +} + +/* + * Add LSP forwarding entry to queue for subsequent processing. + */ +static int +lsp_processq_add (zebra_lsp_t *lsp) +{ + /* If already scheduled, exit. */ + if (CHECK_FLAG (lsp->flags, LSP_FLAG_SCHEDULED)) + return 0; + + work_queue_add (zebrad.lsp_process_q, lsp); + SET_FLAG (lsp->flags, LSP_FLAG_SCHEDULED); + return 0; +} + +/* + * Callback to allocate LSP forwarding table entry. + */ +static void * +lsp_alloc (void *p) +{ + const zebra_ile_t *ile = p; + zebra_lsp_t *lsp; + + lsp = XCALLOC (MTYPE_LSP, sizeof(zebra_lsp_t)); + lsp->ile = *ile; + + if (IS_ZEBRA_DEBUG_MPLS) + zlog_debug ("Alloc LSP in-label %u", lsp->ile.in_label); + + return ((void *)lsp); +} + +/* + * Create printable string for NHLFE entry. + */ +static char * +nhlfe2str (zebra_nhlfe_t *nhlfe, char *buf, int size) +{ + struct nexthop *nexthop; + + buf[0] = '\0'; + nexthop = nhlfe->nexthop; + switch (nexthop->type) + { + case NEXTHOP_TYPE_IPV4: + inet_ntop (AF_INET, &nexthop->gate.ipv4, buf, size); + break; + case NEXTHOP_TYPE_IPV6: + inet_ntop (AF_INET6, &nexthop->gate.ipv6, buf, size); + break; + default: + break; + } + + return buf; +} + +/* + * Check if NHLFE matches with search info passed. + */ +static int +nhlfe_nhop_match (zebra_nhlfe_t *nhlfe, enum nexthop_types_t gtype, + union g_addr *gate, char *ifname, ifindex_t ifindex) +{ + struct nexthop *nhop; + int cmp = 1; + + nhop = nhlfe->nexthop; + if (!nhop) + return 1; + + if (nhop->type != gtype) + return 1; + + switch (nhop->type) + { + case NEXTHOP_TYPE_IPV4: + cmp = memcmp(&(nhop->gate.ipv4), &(gate->ipv4), + sizeof(struct in_addr)); + break; + case NEXTHOP_TYPE_IPV6: + case NEXTHOP_TYPE_IPV6_IFINDEX: + cmp = memcmp(&(nhop->gate.ipv6), &(gate->ipv6), + sizeof(struct in6_addr)); + if (!cmp && nhop->type == NEXTHOP_TYPE_IPV6_IFINDEX) + cmp = !(nhop->ifindex == ifindex); + break; + default: + break; + } + + return cmp; +} + + +/* + * Locate NHLFE that matches with passed info. + */ +static zebra_nhlfe_t * +nhlfe_find (zebra_lsp_t *lsp, enum lsp_types_t lsp_type, + enum nexthop_types_t gtype, union g_addr *gate, + char *ifname, ifindex_t ifindex) +{ + zebra_nhlfe_t *nhlfe; + + if (!lsp) + return NULL; + + for (nhlfe = lsp->nhlfe_list; nhlfe; nhlfe = nhlfe->next) + { + if (nhlfe->type != lsp_type) + continue; + if (!nhlfe_nhop_match (nhlfe, gtype, gate, ifname, ifindex)) + break; + } + + return nhlfe; +} + +/* + * Add NHLFE. Base entry must have been created and duplicate + * check done. + */ +static zebra_nhlfe_t * +nhlfe_add (zebra_lsp_t *lsp, enum lsp_types_t lsp_type, + enum nexthop_types_t gtype, union g_addr *gate, + char *ifname, ifindex_t ifindex, mpls_label_t out_label) +{ + zebra_nhlfe_t *nhlfe; + struct nexthop *nexthop; + + if (!lsp) + return NULL; + + nhlfe = XCALLOC(MTYPE_NHLFE, sizeof(zebra_nhlfe_t)); + if (!nhlfe) + return NULL; + + nhlfe->lsp = lsp; + nhlfe->type = lsp_type; + nhlfe->distance = lsp_distance (lsp_type); + + nexthop = nexthop_new(); + if (!nexthop) + { + XFREE (MTYPE_NHLFE, nhlfe); + return NULL; + } + nexthop_add_labels (nexthop, lsp_type, 1, &out_label); + + nexthop->type = gtype; + switch (nexthop->type) + { + case NEXTHOP_TYPE_IPV4: + nexthop->gate.ipv4 = gate->ipv4; + break; + case NEXTHOP_TYPE_IPV6: + case NEXTHOP_TYPE_IPV6_IFINDEX: + nexthop->gate.ipv6 = gate->ipv6; + if (ifindex) + nexthop->ifindex = ifindex; + break; + default: + nexthop_free(nexthop); + XFREE (MTYPE_NHLFE, nhlfe); + return NULL; + break; + } + + nhlfe->nexthop = nexthop; + if (lsp->nhlfe_list) + lsp->nhlfe_list->prev = nhlfe; + nhlfe->next = lsp->nhlfe_list; + lsp->nhlfe_list = nhlfe; + + return nhlfe; +} + +/* + * Delete NHLFE. Entry must be present on list. + */ +static int +nhlfe_del (zebra_nhlfe_t *nhlfe) +{ + zebra_lsp_t *lsp; + + if (!nhlfe) + return -1; + + lsp = nhlfe->lsp; + if (!lsp) + return -1; + + /* Free nexthop. */ + if (nhlfe->nexthop) + nexthop_free(nhlfe->nexthop); + + /* Unlink from LSP */ + if (nhlfe->next) + nhlfe->next->prev = nhlfe->prev; + if (nhlfe->prev) + nhlfe->prev->next = nhlfe->next; + else + lsp->nhlfe_list = nhlfe->next; + + XFREE (MTYPE_NHLFE, nhlfe); + + return 0; +} + +static int +mpls_lsp_uninstall_all (struct hash *lsp_table, zebra_lsp_t *lsp, + enum lsp_types_t type) +{ + zebra_nhlfe_t *nhlfe, *nhlfe_next; + int schedule_lsp = 0; + char buf[BUFSIZ]; + + /* Mark NHLFEs for delete or directly delete, as appropriate. */ + for (nhlfe = lsp->nhlfe_list; nhlfe; nhlfe = nhlfe_next) + { + nhlfe_next = nhlfe->next; + + /* Skip non-static NHLFEs */ + if (nhlfe->type != type) + continue; + + if (IS_ZEBRA_DEBUG_MPLS) + { + nhlfe2str (nhlfe, buf, BUFSIZ); + zlog_debug ("Del LSP in-label %u type %d nexthop %s flags 0x%x", + lsp->ile.in_label, type, buf, nhlfe->flags); + } + + if (CHECK_FLAG(nhlfe->flags, NHLFE_FLAG_INSTALLED)) + { + UNSET_FLAG (nhlfe->flags, NHLFE_FLAG_CHANGED); + SET_FLAG (nhlfe->flags, NHLFE_FLAG_DELETED); + schedule_lsp = 1; + } + else + { + nhlfe_del (nhlfe); + } + } + + /* Queue LSP for processing, if needed, else delete. */ + if (schedule_lsp) + { + if (lsp_processq_add (lsp)) + return -1; + } + else if (!lsp->nhlfe_list && + !CHECK_FLAG (lsp->flags, LSP_FLAG_SCHEDULED)) + { + if (IS_ZEBRA_DEBUG_MPLS) + zlog_debug ("Free LSP in-label %u flags 0x%x", + lsp->ile.in_label, lsp->flags); + + lsp = hash_release(lsp_table, &lsp->ile); + if (lsp) + XFREE(MTYPE_LSP, lsp); + } + + return 0; +} + +/* + * Uninstall all static NHLFEs for a particular LSP forwarding entry. + * If no other NHLFEs exist, the entry would be deleted. + */ +static int +mpls_static_lsp_uninstall_all (struct zebra_vrf *zvrf, mpls_label_t in_label) +{ + struct hash *lsp_table; + zebra_ile_t tmp_ile; + zebra_lsp_t *lsp; + + /* Lookup table. */ + lsp_table = zvrf->lsp_table; + if (!lsp_table) + return -1; + + /* If entry is not present, exit. */ + tmp_ile.in_label = in_label; + lsp = hash_lookup (lsp_table, &tmp_ile); + if (!lsp || !lsp->nhlfe_list) + return 0; + + return mpls_lsp_uninstall_all (lsp_table, lsp, ZEBRA_LSP_STATIC); +} + +static json_object * +nhlfe_json (zebra_nhlfe_t *nhlfe) +{ + char buf[BUFSIZ]; + json_object *json_nhlfe = NULL; + struct nexthop *nexthop = nhlfe->nexthop; + + json_nhlfe = json_object_new_object(); + json_object_string_add(json_nhlfe, "type", nhlfe_type2str(nhlfe->type)); + json_object_int_add(json_nhlfe, "outLabel", nexthop->nh_label->label[0]); + json_object_int_add(json_nhlfe, "distance", nhlfe->distance); + + if (CHECK_FLAG (nhlfe->flags, NHLFE_FLAG_INSTALLED)) + json_object_boolean_true_add(json_nhlfe, "installed"); + + switch (nexthop->type) + { + case NEXTHOP_TYPE_IPV4: + json_object_string_add(json_nhlfe, "nexthop", + inet_ntoa (nexthop->gate.ipv4)); + break; + case NEXTHOP_TYPE_IPV6: + case NEXTHOP_TYPE_IPV6_IFINDEX: + json_object_string_add(json_nhlfe, "nexthop", + inet_ntop (AF_INET6, &nexthop->gate.ipv6, buf, BUFSIZ)); + + if (nexthop->ifindex) + json_object_string_add(json_nhlfe, "interface", ifindex2ifname (nexthop->ifindex)); + break; + default: + break; + } + return json_nhlfe; +} + +/* + * Print the NHLFE for a LSP forwarding entry. + */ +static void +nhlfe_print (zebra_nhlfe_t *nhlfe, struct vty *vty) +{ + struct nexthop *nexthop; + char buf[BUFSIZ]; + + nexthop = nhlfe->nexthop; + if (!nexthop || !nexthop->nh_label) // unexpected + return; + + vty_out(vty, " type: %s remote label: %s distance: %d%s", + nhlfe_type2str(nhlfe->type), + label2str(nexthop->nh_label->label[0], buf, BUFSIZ), + nhlfe->distance, VTY_NEWLINE); + switch (nexthop->type) + { + case NEXTHOP_TYPE_IPV4: + vty_out (vty, " via %s", inet_ntoa (nexthop->gate.ipv4)); + break; + case NEXTHOP_TYPE_IPV6: + case NEXTHOP_TYPE_IPV6_IFINDEX: + vty_out (vty, " via %s", + inet_ntop (AF_INET6, &nexthop->gate.ipv6, buf, BUFSIZ)); + if (nexthop->ifindex) + vty_out (vty, " dev %s", ifindex2ifname (nexthop->ifindex)); + break; + default: + break; + } + vty_out(vty, "%s", CHECK_FLAG (nhlfe->flags, NHLFE_FLAG_INSTALLED) ? + " (installed)" : ""); + vty_out(vty, "%s", VTY_NEWLINE); +} + +/* + * Print an LSP forwarding entry. + */ +static void +lsp_print (zebra_lsp_t *lsp, void *ctxt) +{ + zebra_nhlfe_t *nhlfe; + struct vty *vty; + + vty = (struct vty *) ctxt; + + vty_out(vty, "Local label: %u%s%s", + lsp->ile.in_label, + CHECK_FLAG (lsp->flags, LSP_FLAG_INSTALLED) ? " (installed)" : "", + VTY_NEWLINE); + + for (nhlfe = lsp->nhlfe_list; nhlfe; nhlfe = nhlfe->next) + nhlfe_print (nhlfe, vty); +} + +/* + * JSON objects for an LSP forwarding entry. + */ +static json_object * +lsp_json (zebra_lsp_t *lsp) +{ + zebra_nhlfe_t *nhlfe = NULL; + json_object *json = json_object_new_object(); + json_object *json_nhlfe_list = json_object_new_array(); + + json_object_int_add(json, "inLabel", lsp->ile.in_label); + + if (CHECK_FLAG (lsp->flags, LSP_FLAG_INSTALLED)) + json_object_boolean_true_add(json, "installed"); + + for (nhlfe = lsp->nhlfe_list; nhlfe; nhlfe = nhlfe->next) + json_object_array_add(json_nhlfe_list, nhlfe_json(nhlfe)); + + json_object_object_add(json, "nexthops", json_nhlfe_list); + return json; +} + + +/* Return a sorted linked list of the hash contents */ +static struct list * +hash_get_sorted_list (struct hash *hash, void *cmp) +{ + unsigned int i; + struct hash_backet *hb; + struct list *sorted_list = list_new(); + + sorted_list->cmp = (int (*)(void *, void *)) cmp; + + for (i = 0; i < hash->size; i++) + for (hb = hash->index[i]; hb; hb = hb->next) + listnode_add_sort(sorted_list, hb->data); + + return sorted_list; +} + +/* + * Compare two LSPs based on their label values. + */ +static int +lsp_cmp (zebra_lsp_t *lsp1, zebra_lsp_t *lsp2) +{ + if (lsp1->ile.in_label < lsp2->ile.in_label) + return -1; + + if (lsp1->ile.in_label > lsp2->ile.in_label) + return 1; + + return 0; +} + +/* + * Callback to allocate static LSP. + */ +static void * +slsp_alloc (void *p) +{ + const zebra_ile_t *ile = p; + zebra_slsp_t *slsp; + + slsp = XCALLOC (MTYPE_SLSP, sizeof(zebra_slsp_t)); + slsp->ile = *ile; + return ((void *)slsp); +} + +/* + * Compare two static LSPs based on their label values. + */ +static int +slsp_cmp (zebra_slsp_t *slsp1, zebra_slsp_t *slsp2) +{ + if (slsp1->ile.in_label < slsp2->ile.in_label) + return -1; + + if (slsp1->ile.in_label > slsp2->ile.in_label) + return 1; + + return 0; +} + +/* + * Check if static NHLFE matches with search info passed. + */ +static int +snhlfe_match (zebra_snhlfe_t *snhlfe, enum nexthop_types_t gtype, + union g_addr *gate, char *ifname, ifindex_t ifindex) +{ + int cmp = 1; + + if (snhlfe->gtype != gtype) + return 1; + + switch (snhlfe->gtype) + { + case NEXTHOP_TYPE_IPV4: + cmp = memcmp(&(snhlfe->gate.ipv4), &(gate->ipv4), + sizeof(struct in_addr)); + break; + case NEXTHOP_TYPE_IPV6: + case NEXTHOP_TYPE_IPV6_IFINDEX: + cmp = memcmp(&(snhlfe->gate.ipv6), &(gate->ipv6), + sizeof(struct in6_addr)); + if (!cmp && snhlfe->gtype == NEXTHOP_TYPE_IPV6_IFINDEX) + cmp = !(snhlfe->ifindex == ifindex); + break; + default: + break; + } + + return cmp; +} + +/* + * Locate static NHLFE that matches with passed info. + */ +static zebra_snhlfe_t * +snhlfe_find (zebra_slsp_t *slsp, enum nexthop_types_t gtype, + union g_addr *gate, char *ifname, ifindex_t ifindex) +{ + zebra_snhlfe_t *snhlfe; + + if (!slsp) + return NULL; + + for (snhlfe = slsp->snhlfe_list; snhlfe; snhlfe = snhlfe->next) + { + if (!snhlfe_match (snhlfe, gtype, gate, ifname, ifindex)) + break; + } + + return snhlfe; +} + + +/* + * Add static NHLFE. Base LSP config entry must have been created + * and duplicate check done. + */ +static zebra_snhlfe_t * +snhlfe_add (zebra_slsp_t *slsp, enum nexthop_types_t gtype, + union g_addr *gate, char *ifname, ifindex_t ifindex, + mpls_label_t out_label) +{ + zebra_snhlfe_t *snhlfe; + + if (!slsp) + return NULL; + + snhlfe = XCALLOC(MTYPE_SNHLFE, sizeof(zebra_snhlfe_t)); + snhlfe->slsp = slsp; + snhlfe->out_label = out_label; + snhlfe->gtype = gtype; + switch (gtype) + { + case NEXTHOP_TYPE_IPV4: + snhlfe->gate.ipv4 = gate->ipv4; + break; + case NEXTHOP_TYPE_IPV6: + case NEXTHOP_TYPE_IPV6_IFINDEX: + snhlfe->gate.ipv6 = gate->ipv6; + if (ifindex) + snhlfe->ifindex = ifindex; + break; + default: + XFREE (MTYPE_SNHLFE, snhlfe); + return NULL; + } + + if (slsp->snhlfe_list) + slsp->snhlfe_list->prev = snhlfe; + snhlfe->next = slsp->snhlfe_list; + slsp->snhlfe_list = snhlfe; + + return snhlfe; +} + +/* + * Delete static NHLFE. Entry must be present on list. + */ +static int +snhlfe_del (zebra_snhlfe_t *snhlfe) +{ + zebra_slsp_t *slsp; + + if (!snhlfe) + return -1; + + slsp = snhlfe->slsp; + if (!slsp) + return -1; + + if (snhlfe->next) + snhlfe->next->prev = snhlfe->prev; + if (snhlfe->prev) + snhlfe->prev->next = snhlfe->next; + else + slsp->snhlfe_list = snhlfe->next; + + snhlfe->prev = snhlfe->next = NULL; + if (snhlfe->ifname) + XFREE (MTYPE_SNHLFE_IFNAME, snhlfe->ifname); + XFREE (MTYPE_SNHLFE, snhlfe); + + return 0; +} + +/* + * Delete all static NHLFE entries for this LSP (in label). + */ +static int +snhlfe_del_all (zebra_slsp_t *slsp) +{ + zebra_snhlfe_t *snhlfe, *snhlfe_next; + + if (!slsp) + return -1; + + for (snhlfe = slsp->snhlfe_list; snhlfe; snhlfe = snhlfe_next) + { + snhlfe_next = snhlfe->next; + snhlfe_del (snhlfe); + } + + return 0; +} + +/* + * Create printable string for NHLFE configuration. + */ +static char * +snhlfe2str (zebra_snhlfe_t *snhlfe, char *buf, int size) +{ + buf[0] = '\0'; + switch (snhlfe->gtype) + { + case NEXTHOP_TYPE_IPV4: + inet_ntop (AF_INET, &snhlfe->gate.ipv4, buf, size); + break; + case NEXTHOP_TYPE_IPV6: + case NEXTHOP_TYPE_IPV6_IFINDEX: + inet_ntop (AF_INET6, &snhlfe->gate.ipv6, buf, size); + if (snhlfe->ifindex) + strcat (buf, ifindex2ifname (snhlfe->ifindex)); + break; + default: + break; + } + + return buf; +} + +/* + * Initialize work queue for processing changed LSPs. + */ +static void +mpls_processq_init (struct zebra_t *zebra) +{ + zebra->lsp_process_q = work_queue_new (zebra->master, "LSP processing"); + if (!zebra->lsp_process_q) + { + zlog_err ("%s: could not initialise work queue!", __func__); + return; + } + + zebra->lsp_process_q->spec.workfunc = &lsp_process; + zebra->lsp_process_q->spec.del_item_data = &lsp_processq_del; + zebra->lsp_process_q->spec.errorfunc = NULL; + zebra->lsp_process_q->spec.completion_func = &lsp_processq_complete; + zebra->lsp_process_q->spec.max_retries = 0; + zebra->lsp_process_q->spec.hold = 10; +} + + + +/* Public functions */ + +/* + * String to label conversion, labels separated by '/'. + */ +int +mpls_str2label (const char *label_str, u_int8_t *num_labels, + mpls_label_t *labels) +{ + char *endp; + int i; + + *num_labels = 0; + for (i = 0; i < MPLS_MAX_LABELS; i++) + { + u_int32_t label; + + label = strtoul(label_str, &endp, 0); + + /* validity checks */ + if (endp == label_str) + return -1; + + if (!IS_MPLS_UNRESERVED_LABEL(label)) + return -1; + + labels[i] = label; + if (*endp == '\0') + { + *num_labels = i + 1; + return 0; + } + + /* Check separator. */ + if (*endp != '/') + return -1; + + label_str = endp + 1; + } + + /* Too many labels. */ + return -1; +} + +/* + * Label to string conversion, labels in string separated by '/'. + */ +char * +mpls_label2str (u_int8_t num_labels, mpls_label_t *labels, + char *buf, int len) +{ + buf[0] = '\0'; + if (num_labels == 1) + snprintf (buf, len, "%u", labels[0]); + else if (num_labels == 2) + snprintf (buf, len, "%u/%u", labels[0], labels[1]); + return buf; +} + +/* + * Install/uninstall a FEC-To-NHLFE (FTN) binding. + */ +int +mpls_ftn_update (int add, struct zebra_vrf *zvrf, enum lsp_types_t type, + struct prefix *prefix, union g_addr *gate, u_int8_t distance, + mpls_label_t out_label) +{ + struct route_table *table; + struct route_node *rn; + struct rib *rib; + struct nexthop *nexthop; + + /* Lookup table. */ + table = zebra_vrf_table (family2afi(prefix->family), SAFI_UNICAST, zvrf->vrf_id); + if (! table) + return -1; + + /* Lookup existing route */ + rn = route_node_get (table, prefix); + RNODE_FOREACH_RIB (rn, rib) + { + if (CHECK_FLAG (rib->status, RIB_ENTRY_REMOVED)) + continue; + if (rib->distance == distance) + break; + } + + if (rib == NULL) + return -1; + + for (nexthop = rib->nexthop; nexthop; nexthop = nexthop->next) + switch (prefix->family) + { + case AF_INET: + if (nexthop->type != NEXTHOP_TYPE_IPV4 && + nexthop->type != NEXTHOP_TYPE_IPV4_IFINDEX) + continue; + if (! IPV4_ADDR_SAME (&nexthop->gate.ipv4, &gate->ipv4)) + continue; + goto found; + break; + case AF_INET6: + if (nexthop->type != NEXTHOP_TYPE_IPV6 && + nexthop->type != NEXTHOP_TYPE_IPV6_IFINDEX) + continue; + if (! IPV6_ADDR_SAME (&nexthop->gate.ipv6, &gate->ipv6)) + continue; + goto found; + break; + default: + break; + } + /* nexthop not found */ + return -1; + + found: + if (add && nexthop->nh_label_type == ZEBRA_LSP_NONE) + nexthop_add_labels (nexthop, type, 1, &out_label); + else if (!add && nexthop->nh_label_type == type) + nexthop_del_labels (nexthop); + else + return 0; + + SET_FLAG (rib->status, RIB_ENTRY_CHANGED); + SET_FLAG (rib->status, RIB_ENTRY_NEXTHOPS_CHANGED); + rib_queue_add (rn); + + return 0; +} + +/* + * Install/update a NHLFE for an LSP in the forwarding table. This may be + * a new LSP entry or a new NHLFE for an existing in-label or an update of + * the out-label for an existing NHLFE (update case). + */ +int +mpls_lsp_install (struct zebra_vrf *zvrf, enum lsp_types_t type, + mpls_label_t in_label, mpls_label_t out_label, + enum nexthop_types_t gtype, union g_addr *gate, + char *ifname, ifindex_t ifindex) +{ + struct hash *lsp_table; + zebra_ile_t tmp_ile; + zebra_lsp_t *lsp; + zebra_nhlfe_t *nhlfe; + char buf[BUFSIZ]; + + /* Lookup table. */ + lsp_table = zvrf->lsp_table; + if (!lsp_table) + return -1; + + /* If entry is present, exit. */ + tmp_ile.in_label = in_label; + lsp = hash_get (lsp_table, &tmp_ile, lsp_alloc); + if (!lsp) + return -1; + nhlfe = nhlfe_find (lsp, type, gtype, gate, ifname, ifindex); + if (nhlfe) + { + struct nexthop *nh = nhlfe->nexthop; + + assert (nh); + assert (nh->nh_label); + + /* Clear deleted flag (in case it was set) */ + UNSET_FLAG (nhlfe->flags, NHLFE_FLAG_DELETED); + if (nh->nh_label->label[0] == out_label) + /* No change */ + return 0; + + if (IS_ZEBRA_DEBUG_MPLS) + { + nhlfe2str (nhlfe, buf, BUFSIZ); + zlog_debug ("LSP in-label %u type %d nexthop %s " + "out-label changed to %u (old %u)", + in_label, type, buf, + out_label, nh->nh_label->label[0]); + } + + /* Update out label, trigger processing. */ + nh->nh_label->label[0] = out_label; + } + else + { + /* Add LSP entry to this nexthop */ + nhlfe = nhlfe_add (lsp, type, gtype, gate, + ifname, ifindex, out_label); + if (!nhlfe) + return -1; + + if (IS_ZEBRA_DEBUG_MPLS) + { + nhlfe2str (nhlfe, buf, BUFSIZ); + zlog_debug ("Add LSP in-label %u type %d nexthop %s " + "out-label %u", in_label, type, buf, out_label); + } + + lsp->addr_family = NHLFE_FAMILY (nhlfe); + } + + /* Mark NHLFE, queue LSP for processing. */ + SET_FLAG(nhlfe->flags, NHLFE_FLAG_CHANGED); + if (lsp_processq_add (lsp)) + return -1; + + return 0; +} + +/* + * Uninstall a particular NHLFE in the forwarding table. If this is + * the only NHLFE, the entire LSP forwarding entry has to be deleted. + */ +int +mpls_lsp_uninstall (struct zebra_vrf *zvrf, enum lsp_types_t type, + mpls_label_t in_label, enum nexthop_types_t gtype, + union g_addr *gate, char *ifname, ifindex_t ifindex) +{ + struct hash *lsp_table; + zebra_ile_t tmp_ile; + zebra_lsp_t *lsp; + zebra_nhlfe_t *nhlfe; + char buf[BUFSIZ]; + + /* Lookup table. */ + lsp_table = zvrf->lsp_table; + if (!lsp_table) + return -1; + + /* If entry is not present, exit. */ + tmp_ile.in_label = in_label; + lsp = hash_lookup (lsp_table, &tmp_ile); + if (!lsp) + return 0; + nhlfe = nhlfe_find (lsp, type, gtype, gate, ifname, ifindex); + if (!nhlfe) + return 0; + + if (IS_ZEBRA_DEBUG_MPLS) + { + nhlfe2str (nhlfe, buf, BUFSIZ); + zlog_debug ("Del LSP in-label %u type %d nexthop %s flags 0x%x", + in_label, type, buf, nhlfe->flags); + } + + /* Mark NHLFE for delete or directly delete, as appropriate. */ + if (CHECK_FLAG(nhlfe->flags, NHLFE_FLAG_INSTALLED)) + { + UNSET_FLAG (nhlfe->flags, NHLFE_FLAG_CHANGED); + SET_FLAG (nhlfe->flags, NHLFE_FLAG_DELETED); + if (lsp_processq_add (lsp)) + return -1; + } + else + { + nhlfe_del (nhlfe); + + /* Free LSP entry if no other NHLFEs and not scheduled. */ + if (!lsp->nhlfe_list && + !CHECK_FLAG (lsp->flags, LSP_FLAG_SCHEDULED)) + { + if (IS_ZEBRA_DEBUG_MPLS) + zlog_debug ("Free LSP in-label %u flags 0x%x", + lsp->ile.in_label, lsp->flags); + + lsp = hash_release(lsp_table, &lsp->ile); + if (lsp) + XFREE(MTYPE_LSP, lsp); + } + } + return 0; +} + +/* + * Uninstall all LDP NHLFEs for a particular LSP forwarding entry. + * If no other NHLFEs exist, the entry would be deleted. + */ +void +mpls_ldp_lsp_uninstall_all (struct hash_backet *backet, void *ctxt) +{ + zebra_lsp_t *lsp; + struct hash *lsp_table; + + lsp = (zebra_lsp_t *) backet->data; + if (!lsp || !lsp->nhlfe_list) + return; + + lsp_table = ctxt; + if (!lsp_table) + return; + + mpls_lsp_uninstall_all (lsp_table, lsp, ZEBRA_LSP_LDP); +} + +/* + * Uninstall all LDP FEC-To-NHLFE (FTN) bindings of the given address-family. + */ +void +mpls_ldp_ftn_uninstall_all (struct zebra_vrf *zvrf, int afi) +{ + struct route_table *table; + struct route_node *rn; + struct rib *rib; + struct nexthop *nexthop; + int update; + + /* Process routes of interested address-families. */ + table = zebra_vrf_table (afi, SAFI_UNICAST, zvrf->vrf_id); + if (!table) + return; + + for (rn = route_top (table); rn; rn = route_next (rn)) + { + update = 0; + RNODE_FOREACH_RIB (rn, rib) + for (nexthop = rib->nexthop; nexthop; nexthop = nexthop->next) + if (nexthop->nh_label_type == ZEBRA_LSP_LDP) + { + nexthop_del_labels (nexthop); + SET_FLAG (rib->status, RIB_ENTRY_CHANGED); + SET_FLAG (rib->status, RIB_ENTRY_NEXTHOPS_CHANGED); + update = 1; + } + + if (update) + rib_queue_add (rn); + } +} + +#if defined(HAVE_CUMULUS) +/* + * Check that the label values used in LSP creation are consistent. The + * main criteria is that if there is ECMP, the label operation must still + * be consistent - i.e., all paths either do a swap or do PHP. This is due + * to current HW restrictions. + */ +int +zebra_mpls_lsp_label_consistent (struct zebra_vrf *zvrf, mpls_label_t in_label, + mpls_label_t out_label, enum nexthop_types_t gtype, + union g_addr *gate, char *ifname, ifindex_t ifindex) +{ + struct hash *slsp_table; + zebra_ile_t tmp_ile; + zebra_slsp_t *slsp; + zebra_snhlfe_t *snhlfe; + + /* Lookup table. */ + slsp_table = zvrf->slsp_table; + if (!slsp_table) + return 0; + + /* If entry is not present, exit. */ + tmp_ile.in_label = in_label; + slsp = hash_lookup (slsp_table, &tmp_ile); + if (!slsp) + return 1; + + snhlfe = snhlfe_find (slsp, gtype, gate, ifname, ifindex); + if (snhlfe) + { + if (snhlfe->out_label == out_label) + return 1; + + /* If not only NHLFE, cannot allow label change. */ + if (snhlfe != slsp->snhlfe_list || + snhlfe->next) + return 0; + } + else + { + /* If other NHLFEs exist, label operation must match. */ + if (slsp->snhlfe_list) + { + int cur_op, new_op; + + cur_op = (slsp->snhlfe_list->out_label == MPLS_IMP_NULL_LABEL); + new_op = (out_label == MPLS_IMP_NULL_LABEL); + if (cur_op != new_op) + return 0; + } + } + + /* Label values are good. */ + return 1; +} +#endif /* HAVE_CUMULUS */ + +/* + * Add static LSP entry. This may be the first entry for this incoming label + * or an additional nexthop; an existing entry may also have outgoing label + * changed. + * Note: The label operation (swap or PHP) is common for the LSP entry (all + * NHLFEs). + */ +int +zebra_mpls_static_lsp_add (struct zebra_vrf *zvrf, mpls_label_t in_label, + mpls_label_t out_label, enum nexthop_types_t gtype, + union g_addr *gate, char *ifname, ifindex_t ifindex) +{ + struct hash *slsp_table; + zebra_ile_t tmp_ile; + zebra_slsp_t *slsp; + zebra_snhlfe_t *snhlfe; + char buf[BUFSIZ]; + + /* Lookup table. */ + slsp_table = zvrf->slsp_table; + if (!slsp_table) + return -1; + + /* If entry is present, exit. */ + tmp_ile.in_label = in_label; + slsp = hash_get (slsp_table, &tmp_ile, slsp_alloc); + if (!slsp) + return -1; + snhlfe = snhlfe_find (slsp, gtype, gate, ifname, ifindex); + if (snhlfe) + { + if (snhlfe->out_label == out_label) + /* No change */ + return 0; + + if (IS_ZEBRA_DEBUG_MPLS) + { + snhlfe2str (snhlfe, buf, BUFSIZ); + zlog_debug ("Upd static LSP in-label %u nexthop %s " + "out-label %u (old %u)", + in_label, buf, out_label, snhlfe->out_label); + } + snhlfe->out_label = out_label; + } + else + { + /* Add static LSP entry to this nexthop */ + snhlfe = snhlfe_add (slsp, gtype, gate, ifname, ifindex, out_label); + if (!snhlfe) + return -1; + + if (IS_ZEBRA_DEBUG_MPLS) + { + snhlfe2str (snhlfe, buf, BUFSIZ); + zlog_debug ("Add static LSP in-label %u nexthop %s out-label %u", + in_label, buf, out_label); + } + } + + /* (Re)Install LSP in the main table. */ + if (mpls_lsp_install (zvrf, ZEBRA_LSP_STATIC, in_label, out_label, gtype, + gate, ifname, ifindex)) + return -1; + + return 0; +} + +/* + * Delete static LSP entry. This may be the delete of one particular + * NHLFE for this incoming label or the delete of the entire entry (i.e., + * all NHLFEs). + * NOTE: Delete of the only NHLFE will also end up deleting the entire + * LSP configuration. + */ +int +zebra_mpls_static_lsp_del (struct zebra_vrf *zvrf, mpls_label_t in_label, + enum nexthop_types_t gtype, union g_addr *gate, + char *ifname, ifindex_t ifindex) +{ + struct hash *slsp_table; + zebra_ile_t tmp_ile; + zebra_slsp_t *slsp; + zebra_snhlfe_t *snhlfe; + + /* Lookup table. */ + slsp_table = zvrf->slsp_table; + if (!slsp_table) + return -1; + + /* If entry is not present, exit. */ + tmp_ile.in_label = in_label; + slsp = hash_lookup (slsp_table, &tmp_ile); + if (!slsp) + return 0; + + /* Is it delete of entire LSP or a specific NHLFE? */ + if (gtype == NEXTHOP_TYPE_BLACKHOLE) + { + if (IS_ZEBRA_DEBUG_MPLS) + zlog_debug ("Del static LSP in-label %u", in_label); + + /* Uninstall entire LSP from the main table. */ + mpls_static_lsp_uninstall_all (zvrf, in_label); + + /* Delete all static NHLFEs */ + snhlfe_del_all (slsp); + } + else + { + /* Find specific NHLFE, exit if not found. */ + snhlfe = snhlfe_find (slsp, gtype, gate, ifname, ifindex); + if (!snhlfe) + return 0; + + if (IS_ZEBRA_DEBUG_MPLS) + { + char buf[BUFSIZ]; + snhlfe2str (snhlfe, buf, BUFSIZ); + zlog_debug ("Del static LSP in-label %u nexthop %s", + in_label, buf); + } + + /* Uninstall LSP from the main table. */ + mpls_lsp_uninstall (zvrf, ZEBRA_LSP_STATIC, in_label, gtype, gate, + ifname, ifindex); + + /* Delete static LSP NHLFE */ + snhlfe_del (snhlfe); + } + + /* Remove entire static LSP entry if no NHLFE - valid in either case above. */ + if (!slsp->snhlfe_list) + { + slsp = hash_release(slsp_table, &tmp_ile); + if (slsp) + XFREE(MTYPE_SLSP, slsp); + } + + return 0; +} + +/* + * Schedule all MPLS label forwarding entries for processing. + * Called upon changes that may affect one or more of them such as + * interface or nexthop state changes. + */ +void +zebra_mpls_lsp_schedule (struct zebra_vrf *zvrf) +{ + if (!zvrf) + return; + hash_iterate(zvrf->lsp_table, lsp_schedule, NULL); +} + +/* + * Display MPLS label forwarding table for a specific LSP + * (VTY command handler). + */ +void +zebra_mpls_print_lsp (struct vty *vty, struct zebra_vrf *zvrf, mpls_label_t label, + u_char use_json) +{ + struct hash *lsp_table; + zebra_lsp_t *lsp; + zebra_ile_t tmp_ile; + json_object *json = NULL; + + /* Lookup table. */ + lsp_table = zvrf->lsp_table; + if (!lsp_table) + return; + + /* If entry is not present, exit. */ + tmp_ile.in_label = label; + lsp = hash_lookup (lsp_table, &tmp_ile); + if (!lsp) + return; + + if (use_json) + { + json = lsp_json(lsp); + vty_out (vty, "%s%s", json_object_to_json_string_ext(json, JSON_C_TO_STRING_PRETTY), VTY_NEWLINE); + json_object_free(json); + } + else + lsp_print (lsp, (void *)vty); +} + +/* + * Display MPLS label forwarding table (VTY command handler). + */ +void +zebra_mpls_print_lsp_table (struct vty *vty, struct zebra_vrf *zvrf, + u_char use_json) +{ + char buf[BUFSIZ]; + json_object *json = NULL; + zebra_lsp_t *lsp = NULL; + zebra_nhlfe_t *nhlfe = NULL; + struct nexthop *nexthop = NULL; + struct listnode *node = NULL; + struct list *lsp_list = hash_get_sorted_list(zvrf->lsp_table, lsp_cmp); + + if (use_json) + { + json = json_object_new_object(); + + for (ALL_LIST_ELEMENTS_RO(lsp_list, node, lsp)) + json_object_object_add(json, label2str(lsp->ile.in_label, buf, BUFSIZ), + lsp_json(lsp)); + + vty_out (vty, "%s%s", json_object_to_json_string_ext(json, JSON_C_TO_STRING_PRETTY), VTY_NEWLINE); + json_object_free(json); + } + else + { + vty_out (vty, " Inbound Outbound%s", VTY_NEWLINE); + vty_out (vty, " Label Type Nexthop Label%s", VTY_NEWLINE); + vty_out (vty, "-------- ------- --------------- --------%s", VTY_NEWLINE); + + for (ALL_LIST_ELEMENTS_RO(lsp_list, node, lsp)) + { + for (nhlfe = lsp->nhlfe_list; nhlfe; nhlfe = nhlfe->next) + { + vty_out (vty, "%8d %7s ", lsp->ile.in_label, nhlfe_type2str(nhlfe->type)); + nexthop = nhlfe->nexthop; + + switch (nexthop->type) + { + case NEXTHOP_TYPE_IPV4: + vty_out (vty, "%15s", inet_ntoa (nexthop->gate.ipv4)); + break; + case NEXTHOP_TYPE_IPV6: + case NEXTHOP_TYPE_IPV6_IFINDEX: + vty_out (vty, "%15s", inet_ntop (AF_INET6, &nexthop->gate.ipv6, buf, BUFSIZ)); + break; + default: + break; + } + + vty_out (vty, " %8d%s", nexthop->nh_label->label[0], VTY_NEWLINE); + } + } + + vty_out (vty, "%s", VTY_NEWLINE); + } + + list_delete_all_node(lsp_list); +} + +/* + * Display MPLS LSP configuration of all static LSPs (VTY command handler). + */ +int +zebra_mpls_write_lsp_config (struct vty *vty, struct zebra_vrf *zvrf) +{ + zebra_slsp_t *slsp; + zebra_snhlfe_t *snhlfe; + struct listnode *node; + struct list *slsp_list = hash_get_sorted_list(zvrf->slsp_table, slsp_cmp); + + for (ALL_LIST_ELEMENTS_RO(slsp_list, node, slsp)) + { + for (snhlfe = slsp->snhlfe_list; snhlfe; snhlfe = snhlfe->next) + { + char buf[INET6_ADDRSTRLEN]; + char lstr[30]; + + snhlfe2str (snhlfe, buf, BUFSIZ); + switch (snhlfe->out_label) { + case MPLS_V4_EXP_NULL_LABEL: + case MPLS_V6_EXP_NULL_LABEL: + strlcpy(lstr, "explicit-null", sizeof(lstr)); + break; + case MPLS_IMP_NULL_LABEL: + strlcpy(lstr, "implicit-null", sizeof(lstr)); + break; + default: + sprintf(lstr, "%u", snhlfe->out_label); + break; + } + + vty_out (vty, "mpls lsp %u %s %s%s", + slsp->ile.in_label, buf, lstr, VTY_NEWLINE); + } + } + + list_delete_all_node(slsp_list); + return (zvrf->slsp_table->count ? 1 : 0); +} + +/* + * Called upon process exiting, need to delete LSP forwarding + * entries from the kernel. + * NOTE: Currently supported only for default VRF. + */ +void +zebra_mpls_close_tables (struct zebra_vrf *zvrf) +{ + if (!zvrf) + return; + hash_iterate(zvrf->lsp_table, lsp_uninstall_from_kernel, NULL); +} + +/* + * Allocate MPLS tables for this VRF and do other initialization. + * NOTE: Currently supported only for default VRF. + */ +void +zebra_mpls_init_tables (struct zebra_vrf *zvrf) +{ + if (!zvrf) + return; + zvrf->slsp_table = hash_create(label_hash, label_cmp); + zvrf->lsp_table = hash_create(label_hash, label_cmp); + zvrf->mpls_flags = 0; +} + +/* + * Global MPLS initialization. + */ +void +zebra_mpls_init (void) +{ + if (mpls_kernel_init () < 0) + { + zlog_warn ("Disabling MPLS support (no kernel support)"); + return; + } + + mpls_enabled = 1; + mpls_processq_init (&zebrad); +} diff --git a/zebra/zebra_mpls.h b/zebra/zebra_mpls.h new file mode 100644 index 0000000000..9f24689595 --- /dev/null +++ b/zebra/zebra_mpls.h @@ -0,0 +1,376 @@ +/* + * Zebra MPLS Data structures and definitions + * Copyright (C) 2015 Cumulus Networks, Inc. + * + * This file is part of GNU Zebra. + * + * GNU Zebra is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * GNU Zebra is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU Zebra; see the file COPYING. If not, write to the Free + * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ + +#ifndef _ZEBRA_MPLS_H +#define _ZEBRA_MPLS_H + +#include "prefix.h" +#include "table.h" +#include "queue.h" +#include "hash.h" +#include "jhash.h" +#include "nexthop.h" +#include "vty.h" +#include "memory.h" +#include "mpls.h" +#include "zebra/zserv.h" +#include "zebra/zebra_vrf.h" + + +/* Definitions and macros. */ + +#define MPLS_MAX_LABELS 2 /* Maximum # labels that can be pushed. */ + +#define NHLFE_FAMILY(nhlfe) \ + (((nhlfe)->nexthop->type == NEXTHOP_TYPE_IPV6 || \ + (nhlfe)->nexthop->type == NEXTHOP_TYPE_IPV6_IFINDEX) ? AF_INET6 : AF_INET) + + +/* Typedefs */ + +typedef struct zebra_ile_t_ zebra_ile_t; +typedef struct zebra_snhlfe_t_ zebra_snhlfe_t; +typedef struct zebra_slsp_t_ zebra_slsp_t; +typedef struct zebra_nhlfe_t_ zebra_nhlfe_t; +typedef struct zebra_lsp_t_ zebra_lsp_t; + +/* + * (Outgoing) nexthop label forwarding entry configuration + */ +struct zebra_snhlfe_t_ +{ + /* Nexthop information */ + enum nexthop_types_t gtype; + union g_addr gate; + char *ifname; + ifindex_t ifindex; + + /* Out label. */ + mpls_label_t out_label; + + /* Backpointer to base entry. */ + zebra_slsp_t *slsp; + + /* Pointers to more outgoing information for same in-label */ + zebra_snhlfe_t *next; + zebra_snhlfe_t *prev; +}; + +/* + * (Outgoing) nexthop label forwarding entry + */ +struct zebra_nhlfe_t_ +{ + /* Type of entry - static etc. */ + enum lsp_types_t type; + + /* Nexthop information (with outgoing label) */ + struct nexthop *nexthop; + + /* Backpointer to base entry. */ + zebra_lsp_t *lsp; + + /* Runtime info - flags, pointers etc. */ + u_int32_t flags; +#define NHLFE_FLAG_CHANGED (1 << 0) +#define NHLFE_FLAG_SELECTED (1 << 1) +#define NHLFE_FLAG_MULTIPATH (1 << 2) +#define NHLFE_FLAG_DELETED (1 << 3) +#define NHLFE_FLAG_INSTALLED (1 << 4) + + zebra_nhlfe_t *next; + zebra_nhlfe_t *prev; + u_char distance; +}; + +/* + * Incoming label entry + */ +struct zebra_ile_t_ +{ + mpls_label_t in_label; +}; + +/* + * Label swap entry static configuration. + */ +struct zebra_slsp_t_ +{ + /* Incoming label */ + zebra_ile_t ile; + + /* List of outgoing nexthop static configuration */ + zebra_snhlfe_t *snhlfe_list; + +}; + +/* + * Label swap entry (ile -> list of nhlfes) + */ +struct zebra_lsp_t_ +{ + /* Incoming label */ + zebra_ile_t ile; + + /* List of NHLFE, pointer to best and num equal-cost. */ + zebra_nhlfe_t *nhlfe_list; + zebra_nhlfe_t *best_nhlfe; + u_int32_t num_ecmp; + + /* Flags */ + u_int32_t flags; +#define LSP_FLAG_SCHEDULED (1 << 0) +#define LSP_FLAG_INSTALLED (1 << 1) +#define LSP_FLAG_CHANGED (1 << 2) + + /* Address-family of NHLFE - saved here for delete. All NHLFEs */ + /* have to be of the same AF */ + u_char addr_family; +}; + + +/* Function declarations. */ + +/* + * String to label conversion, labels separated by '/'. + */ +int +mpls_str2label (const char *label_str, u_int8_t *num_labels, + mpls_label_t *labels); + +/* + * Label to string conversion, labels in string separated by '/'. + */ +char * +mpls_label2str (u_int8_t num_labels, mpls_label_t *labels, + char *buf, int len); + +/* + * Install/uninstall a FEC-To-NHLFE (FTN) binding. + */ +int +mpls_ftn_update (int add, struct zebra_vrf *zvrf, enum lsp_types_t type, + struct prefix *prefix, union g_addr *gate, u_int8_t distance, + mpls_label_t out_label); + +/* + * Install/update a NHLFE for an LSP in the forwarding table. This may be + * a new LSP entry or a new NHLFE for an existing in-label or an update of + * the out-label for an existing NHLFE (update case). + */ +int +mpls_lsp_install (struct zebra_vrf *zvrf, enum lsp_types_t type, + mpls_label_t in_label, mpls_label_t out_label, + enum nexthop_types_t gtype, union g_addr *gate, + char *ifname, ifindex_t ifindex); + +/* + * Uninstall a particular NHLFE in the forwarding table. If this is + * the only NHLFE, the entire LSP forwarding entry has to be deleted. + */ +int +mpls_lsp_uninstall (struct zebra_vrf *zvrf, enum lsp_types_t type, + mpls_label_t in_label, enum nexthop_types_t gtype, + union g_addr *gate, char *ifname, ifindex_t ifindex); + +/* + * Uninstall all LDP NHLFEs for a particular LSP forwarding entry. + * If no other NHLFEs exist, the entry would be deleted. + */ +void +mpls_ldp_lsp_uninstall_all (struct hash_backet *backet, void *ctxt); + +/* + * Uninstall all LDP FEC-To-NHLFE (FTN) bindings of the given address-family. + */ +void +mpls_ldp_ftn_uninstall_all (struct zebra_vrf *zvrf, int afi); + +#if defined(HAVE_CUMULUS) +/* + * Check that the label values used in LSP creation are consistent. The + * main criteria is that if there is ECMP, the label operation must still + * be consistent - i.e., all paths either do a swap or do PHP. This is due + * to current HW restrictions. + */ +int +zebra_mpls_lsp_label_consistent (struct zebra_vrf *zvrf, mpls_label_t in_label, + mpls_label_t out_label, enum nexthop_types_t gtype, + union g_addr *gate, char *ifname, ifindex_t ifindex); +#endif /* HAVE_CUMULUS */ + +/* + * Add static LSP entry. This may be the first entry for this incoming label + * or an additional nexthop; an existing entry may also have outgoing label + * changed. + * Note: The label operation (swap or PHP) is common for the LSP entry (all + * NHLFEs). + */ +int +zebra_mpls_static_lsp_add (struct zebra_vrf *zvrf, mpls_label_t in_label, + mpls_label_t out_label, enum nexthop_types_t gtype, + union g_addr *gate, char *ifname, ifindex_t ifindex); + +/* + * Delete static LSP entry. This may be the delete of one particular + * NHLFE for this incoming label or the delete of the entire entry (i.e., + * all NHLFEs). + * NOTE: Delete of the only NHLFE will also end up deleting the entire + * LSP configuration. + */ +int +zebra_mpls_static_lsp_del (struct zebra_vrf *zvrf, mpls_label_t in_label, + enum nexthop_types_t gtype, union g_addr *gate, + char *ifname, ifindex_t ifindex); + +/* + * Schedule all MPLS label forwarding entries for processing. + * Called upon changes that may affect one or more of them such as + * interface or nexthop state changes. + */ +void +zebra_mpls_lsp_schedule (struct zebra_vrf *zvrf); + +/* + * Display MPLS label forwarding table for a specific LSP + * (VTY command handler). + */ +void +zebra_mpls_print_lsp (struct vty *vty, struct zebra_vrf *zvrf, mpls_label_t label, + u_char use_json); + +/* + * Display MPLS label forwarding table (VTY command handler). + */ +void +zebra_mpls_print_lsp_table (struct vty *vty, struct zebra_vrf *zvrf, + u_char use_json); + +/* + * Display MPLS LSP configuration of all static LSPs (VTY command handler). + */ +int +zebra_mpls_write_lsp_config (struct vty *vty, struct zebra_vrf *zvrf); + +/* + * Called upon process exiting, need to delete LSP forwarding + * entries from the kernel. + * NOTE: Currently supported only for default VRF. + */ +void +zebra_mpls_close_tables (struct zebra_vrf *zvrf); + +/* + * Allocate MPLS tables for this VRF. + * NOTE: Currently supported only for default VRF. + */ +void +zebra_mpls_init_tables (struct zebra_vrf *zvrf); + +/* + * Global MPLS initialization. + */ +void +zebra_mpls_init (void); + +/* + * MPLS VTY. + */ +void +zebra_mpls_vty_init (void); + +/* Inline functions. */ + +/* + * Distance (priority) definition for LSP NHLFE. + */ +static inline u_char +lsp_distance (enum lsp_types_t type) +{ + if (type == ZEBRA_LSP_STATIC) + return (route_distance (ZEBRA_ROUTE_STATIC)); + + return 150; +} + +/* + * Map RIB type to LSP type. Used when labeled-routes from BGP + * are converted into LSPs. + */ +static inline enum lsp_types_t +lsp_type_from_rib_type (int rib_type) +{ + switch (rib_type) + { + case ZEBRA_ROUTE_STATIC: + return ZEBRA_LSP_STATIC; + default: + return ZEBRA_LSP_NONE; + } +} + +/* NHLFE type as printable string. */ +static inline const char * +nhlfe_type2str(enum lsp_types_t lsp_type) +{ + switch (lsp_type) + { + case ZEBRA_LSP_STATIC: + return "Static"; + case ZEBRA_LSP_LDP: + return "LDP"; + default: + return "Unknown"; + } +} + +static inline void +mpls_mark_lsps_for_processing(struct zebra_vrf *zvrf) +{ + if (!zvrf) + return; + + zvrf->mpls_flags |= MPLS_FLAG_SCHEDULE_LSPS; +} + +static inline void +mpls_unmark_lsps_for_processing(struct zebra_vrf *zvrf) +{ + if (!zvrf) + return; + + zvrf->mpls_flags &= ~MPLS_FLAG_SCHEDULE_LSPS; +} + +static inline int +mpls_should_lsps_be_processed(struct zebra_vrf *zvrf) +{ + if (!zvrf) + return 0; + + return ((zvrf->mpls_flags & MPLS_FLAG_SCHEDULE_LSPS) ? 1 : 0); +} + +/* Global variables. */ +extern int mpls_enabled; + +#endif /*_ZEBRA_MPLS_H */ diff --git a/zebra/zebra_mpls_netlink.c b/zebra/zebra_mpls_netlink.c new file mode 100644 index 0000000000..1f894b33c6 --- /dev/null +++ b/zebra/zebra_mpls_netlink.c @@ -0,0 +1,92 @@ +#include +#include "zebra/rt.h" +#include "zebra/rt_netlink.h" +#include "zebra/zebra_mpls.h" + +/* + * Install Label Forwarding entry into the kernel. + */ +int +kernel_add_lsp (zebra_lsp_t *lsp) +{ + int ret; + + if (!lsp || !lsp->best_nhlfe) // unexpected + return -1; + + UNSET_FLAG (lsp->flags, LSP_FLAG_CHANGED); + ret = netlink_mpls_multipath (RTM_NEWROUTE, lsp); + if (!ret) + SET_FLAG (lsp->flags, LSP_FLAG_INSTALLED); + else + clear_nhlfe_installed (lsp); + + return ret; +} + +/* + * Update Label Forwarding entry in the kernel. This means that the Label + * forwarding entry is already installed and needs an update - either a new + * path is to be added, an installed path has changed (e.g., outgoing label) + * or an installed path (but not all paths) has to be removed. + * TODO: Performs a DEL followed by ADD now, need to change to REPLACE. Note + * that REPLACE was originally implemented for IPv4 nexthops but removed as + * it was not functioning when moving from swap to PHP as that was signaled + * through the metric field (before kernel-MPLS). This shouldn't be an issue + * any longer, so REPLACE can be reintroduced. + */ +int +kernel_upd_lsp (zebra_lsp_t *lsp) +{ + int ret; + + if (!lsp || !lsp->best_nhlfe) // unexpected + return -1; + + UNSET_FLAG (lsp->flags, LSP_FLAG_CHANGED); + + /* First issue a DEL and clear the installed flag. */ + netlink_mpls_multipath (RTM_DELROUTE, lsp); + UNSET_FLAG (lsp->flags, LSP_FLAG_INSTALLED); + + /* Then issue an ADD. */ + ret = netlink_mpls_multipath (RTM_NEWROUTE, lsp); + if (!ret) + SET_FLAG (lsp->flags, LSP_FLAG_INSTALLED); + else + clear_nhlfe_installed (lsp); + + return ret; +} + +/* + * Delete Label Forwarding entry from the kernel. + */ +int +kernel_del_lsp (zebra_lsp_t *lsp) +{ + if (!lsp) // unexpected + return -1; + + if (CHECK_FLAG (lsp->flags, LSP_FLAG_INSTALLED)) + { + netlink_mpls_multipath (RTM_DELROUTE, lsp); + UNSET_FLAG (lsp->flags, LSP_FLAG_INSTALLED); + } + + return 0; +} + +int +mpls_kernel_init (void) +{ + struct stat st; + + /* + * Check if the MPLS module is loaded in the kernel. + */ + if (stat ("/proc/sys/net/mpls", &st) != 0) + return -1; + + return 0; +}; diff --git a/zebra/zebra_mpls_null.c b/zebra/zebra_mpls_null.c new file mode 100644 index 0000000000..7727c84a88 --- /dev/null +++ b/zebra/zebra_mpls_null.c @@ -0,0 +1,8 @@ +#include +#include "zebra/rt.h" +#include "zebra/zebra_mpls.h" + +int kernel_add_lsp (zebra_lsp_t *lsp) { return 0; } +int kernel_upd_lsp (zebra_lsp_t *lsp) { return 0; } +int kernel_del_lsp (zebra_lsp_t *lsp) { return 0; } +int mpls_kernel_init (void) { return -1; }; diff --git a/zebra/zebra_mpls_openbsd.c b/zebra/zebra_mpls_openbsd.c new file mode 100644 index 0000000000..bae1de66bf --- /dev/null +++ b/zebra/zebra_mpls_openbsd.c @@ -0,0 +1,208 @@ +#include +#include +#include "zebra/rt.h" +#include "zebra/zebra_mpls.h" +#include "zebra/debug.h" + +#include "privs.h" +#include "prefix.h" +#include "interface.h" +#include "log.h" + +extern struct zebra_privs_t zserv_privs; + +struct { + u_int32_t rtseq; + int fd; +} kr_state; + +static int +kernel_send_rtmsg (int action, mpls_label_t in_label, zebra_nhlfe_t *nhlfe) +{ + struct iovec iov[5]; + struct rt_msghdr hdr; + struct sockaddr_mpls sa_label_in, sa_label_out; + struct sockaddr_in nexthop; + int iovcnt = 0; + int ret; + + if (IS_ZEBRA_DEBUG_KERNEL) + zlog_debug ("kernel_send_rtmsg: 0x%x, label=%u", action, in_label); + + /* initialize header */ + bzero(&hdr, sizeof (hdr)); + hdr.rtm_version = RTM_VERSION; + + hdr.rtm_type = action; + hdr.rtm_flags = RTF_UP; + hdr.rtm_fmask = RTF_MPLS; + hdr.rtm_seq = kr_state.rtseq++; /* overflow doesn't matter */ + hdr.rtm_msglen = sizeof (hdr); + hdr.rtm_hdrlen = sizeof (struct rt_msghdr); + hdr.rtm_priority = 0; + /* adjust iovec */ + iov[iovcnt].iov_base = &hdr; + iov[iovcnt++].iov_len = sizeof (hdr); + + /* in label */ + bzero(&sa_label_in, sizeof (sa_label_in)); + sa_label_in.smpls_len = sizeof (sa_label_in); + sa_label_in.smpls_family = AF_MPLS; + sa_label_in.smpls_label = htonl(in_label << MPLS_LABEL_OFFSET); + /* adjust header */ + hdr.rtm_flags |= RTF_MPLS | RTF_MPATH; + hdr.rtm_addrs |= RTA_DST; + hdr.rtm_msglen += sizeof (sa_label_in); + /* adjust iovec */ + iov[iovcnt].iov_base = &sa_label_in; + iov[iovcnt++].iov_len = sizeof (sa_label_in); + + /* nexthop */ + bzero(&nexthop, sizeof (nexthop)); + nexthop.sin_len = sizeof (nexthop); + nexthop.sin_family = AF_INET; + nexthop.sin_addr = nhlfe->nexthop->gate.ipv4; + /* adjust header */ + hdr.rtm_flags |= RTF_GATEWAY; + hdr.rtm_addrs |= RTA_GATEWAY; + hdr.rtm_msglen += sizeof (nexthop); + /* adjust iovec */ + iov[iovcnt].iov_base = &nexthop; + iov[iovcnt++].iov_len = sizeof (nexthop); + + /* If action is RTM_DELETE we have to get rid of MPLS infos */ + if (action != RTM_DELETE) + { + bzero(&sa_label_out, sizeof (sa_label_out)); + sa_label_out.smpls_len = sizeof (sa_label_out); + sa_label_out.smpls_family = AF_MPLS; + sa_label_out.smpls_label = + htonl(nhlfe->nexthop->nh_label->label[0] << MPLS_LABEL_OFFSET); + /* adjust header */ + hdr.rtm_addrs |= RTA_SRC; + hdr.rtm_flags |= RTF_MPLS; + hdr.rtm_msglen += sizeof (sa_label_out); + /* adjust iovec */ + iov[iovcnt].iov_base = &sa_label_out; + iov[iovcnt++].iov_len = sizeof (sa_label_out); + + if (nhlfe->nexthop->nh_label->label[0] == MPLS_LABEL_IMPLNULL) + hdr.rtm_mpls = MPLS_OP_POP; + else + hdr.rtm_mpls = MPLS_OP_SWAP; + } + + if (zserv_privs.change(ZPRIVS_RAISE)) + zlog_err ("Can't raise privileges"); + ret = writev (kr_state.fd, iov, iovcnt); + if (zserv_privs.change(ZPRIVS_LOWER)) + zlog_err ("Can't lower privileges"); + + if (ret == -1) + zlog_err ("kernel_send_rtmsg: %s", safe_strerror (errno)); + + return ret; +} + +static int +kernel_lsp_cmd (int action, zebra_lsp_t *lsp) +{ + zebra_nhlfe_t *nhlfe; + struct nexthop *nexthop = NULL; + int nexthop_num = 0; + + for (nhlfe = lsp->nhlfe_list; nhlfe; nhlfe = nhlfe->next) + { + nexthop = nhlfe->nexthop; + if (!nexthop) + continue; + + if (MULTIPATH_NUM != 0 && nexthop_num >= MULTIPATH_NUM) + break; + + /* XXX */ + if (NHLFE_FAMILY(nhlfe) == AF_INET6) + continue; + + if (((action == RTM_ADD || action == RTM_CHANGE) && + (CHECK_FLAG (nhlfe->flags, NHLFE_FLAG_SELECTED) && + CHECK_FLAG (nexthop->flags, NEXTHOP_FLAG_ACTIVE))) || + (action == RTM_DELETE && + (CHECK_FLAG (nhlfe->flags, NHLFE_FLAG_INSTALLED) && + CHECK_FLAG (nexthop->flags, NEXTHOP_FLAG_FIB)))) + { + nexthop_num++; + + kernel_send_rtmsg (action, lsp->ile.in_label, nhlfe); + if (action == RTM_ADD || action == RTM_CHANGE) + { + SET_FLAG (nhlfe->flags, NHLFE_FLAG_INSTALLED); + SET_FLAG (nexthop->flags, NEXTHOP_FLAG_FIB); + } + else + { + UNSET_FLAG (nhlfe->flags, NHLFE_FLAG_INSTALLED); + UNSET_FLAG (nexthop->flags, NEXTHOP_FLAG_FIB); + } + } + } + + return (0); +} + +int +kernel_add_lsp (zebra_lsp_t *lsp) +{ + if (!lsp || !lsp->best_nhlfe) // unexpected + return -1; + + return kernel_lsp_cmd (RTM_ADD, lsp); +} + +int +kernel_upd_lsp (zebra_lsp_t *lsp) +{ + if (!lsp || !lsp->best_nhlfe) // unexpected + return -1; + + return kernel_lsp_cmd (RTM_CHANGE, lsp); +} + +int +kernel_del_lsp (zebra_lsp_t *lsp) +{ + if (!lsp) // unexpected + return -1; + + return kernel_lsp_cmd (RTM_DELETE, lsp); +} + +#define MAX_RTSOCK_BUF 128 * 1024 +int +mpls_kernel_init (void) +{ + int rcvbuf, default_rcvbuf; + socklen_t optlen; + + if ((kr_state.fd = socket(AF_ROUTE, SOCK_RAW, 0)) == -1) { + zlog_warn("%s: socket", __func__); + return -1; + } + + /* grow receive buffer, don't wanna miss messages */ + optlen = sizeof (default_rcvbuf); + if (getsockopt(kr_state.fd, SOL_SOCKET, SO_RCVBUF, + &default_rcvbuf, &optlen) == -1) + zlog_warn("kr_init getsockopt SOL_SOCKET SO_RCVBUF"); + else + for (rcvbuf = MAX_RTSOCK_BUF; + rcvbuf > default_rcvbuf && + setsockopt(kr_state.fd, SOL_SOCKET, SO_RCVBUF, + &rcvbuf, sizeof (rcvbuf)) == -1 && errno == ENOBUFS; + rcvbuf /= 2) + ; /* nothing */ + + kr_state.rtseq = 1; + + return 0; +} diff --git a/zebra/zebra_mpls_vty.c b/zebra/zebra_mpls_vty.c new file mode 100644 index 0000000000..061bb244b2 --- /dev/null +++ b/zebra/zebra_mpls_vty.c @@ -0,0 +1,878 @@ +/* Zebra MPLS VTY functions + * Copyright (C) 2002 Kunihiro Ishiguro + * + * This file is part of GNU Zebra. + * + * GNU Zebra is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * GNU Zebra is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU Zebra; see the file COPYING. If not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 02111-1307, USA. + */ + +#include + +#include "memory.h" +#include "if.h" +#include "prefix.h" +#include "command.h" +#include "table.h" +#include "rib.h" +#include "nexthop.h" +#include "vrf.h" +#include "mpls.h" +#include "lib/json.h" + +#include "zebra/zserv.h" +#include "zebra/zebra_vrf.h" +#include "zebra/zebra_mpls.h" +#include "zebra/zebra_rnh.h" +#include "zebra/redistribute.h" +#include "zebra/zebra_routemap.h" +#include "zebra/zebra_static.h" + +static int +zebra_mpls_transit_lsp (struct vty *vty, int add_cmd, const char *inlabel_str, + const char *gate_str, const char *outlabel_str, + const char *flag_str) +{ + struct zebra_vrf *zvrf; + int ret; + enum nexthop_types_t gtype; + union g_addr gate; + mpls_label_t label; + mpls_label_t in_label, out_label; + + zvrf = vrf_info_lookup(VRF_DEFAULT); + if (!zvrf) + { + vty_out (vty, "%% Default VRF does not exist%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (!inlabel_str) + { + vty_out (vty, "%% No Label Information%s", VTY_NEWLINE); + return CMD_WARNING; + } + + out_label = MPLS_IMP_NULL_LABEL; /* as initialization */ + label = atoi(inlabel_str); + if (!IS_MPLS_UNRESERVED_LABEL(label)) + { + vty_out (vty, "%% Invalid label%s", VTY_NEWLINE); + return CMD_WARNING; + } + + if (add_cmd) + { + if (!gate_str) + { + vty_out (vty, "%% No Nexthop Information%s", VTY_NEWLINE); + return CMD_WARNING; + } + if (!outlabel_str) + { + vty_out (vty, "%% No Outgoing label Information%s", VTY_NEWLINE); + return CMD_WARNING; + } + } + + in_label = label; + gtype = NEXTHOP_TYPE_BLACKHOLE; /* as initialization */ + + if (gate_str) + { + /* Gateway is a IPv4 or IPv6 nexthop. */ + ret = inet_pton (AF_INET6, gate_str, &gate.ipv6); + if (ret) + gtype = NEXTHOP_TYPE_IPV6; + else + { + ret = inet_pton (AF_INET, gate_str, &gate.ipv4); + if (ret) + gtype = NEXTHOP_TYPE_IPV4; + else + { + vty_out (vty, "%% Invalid nexthop%s", VTY_NEWLINE); + return CMD_WARNING; + } + } + } + + if (outlabel_str) + { + if (outlabel_str[0] == 'i') + out_label = MPLS_IMP_NULL_LABEL; + else if (outlabel_str[0] == 'e' && gtype == NEXTHOP_TYPE_IPV4) + out_label = MPLS_V4_EXP_NULL_LABEL; + else if (outlabel_str[0] == 'e' && gtype == NEXTHOP_TYPE_IPV6) + out_label = MPLS_V6_EXP_NULL_LABEL; + else + out_label = atoi(outlabel_str); + } + + if (add_cmd) + { +#if defined(HAVE_CUMULUS) + /* Check that label value is consistent. */ + if (!zebra_mpls_lsp_label_consistent (zvrf, in_label, out_label, gtype, + &gate, NULL, 0)) + { + vty_out (vty, "%% Label value not consistent%s", + VTY_NEWLINE); + return CMD_WARNING; + } +#endif /* HAVE_CUMULUS */ + + ret = zebra_mpls_static_lsp_add (zvrf, in_label, out_label, gtype, + &gate, NULL, 0); + } + else + ret = zebra_mpls_static_lsp_del (zvrf, in_label, gtype, &gate, NULL, 0); + + if (ret) + { + vty_out (vty, "%% LSP cannot be %s%s", + add_cmd ? "added" : "deleted", VTY_NEWLINE); + return CMD_WARNING; + } + + return CMD_SUCCESS; +} + +DEFUN (mpls_transit_lsp, + mpls_transit_lsp_cmd, + "mpls lsp (16-1048575) <(16-1048575)|explicit-null|implicit-null>", + MPLS_STR + "Establish label switched path\n" + "Incoming MPLS label\n" + "IPv4 gateway address\n" + "IPv6 gateway address\n" + "Outgoing MPLS label\n" + "Use Explicit-Null label\n" + "Use Implicit-Null label\n") +{ + return zebra_mpls_transit_lsp (vty, 1, argv[2]->arg, argv[3]->arg, argv[4]->arg, NULL); +} + +DEFUN (no_mpls_transit_lsp, + no_mpls_transit_lsp_cmd, + "no mpls lsp (16-1048575) ", + NO_STR + MPLS_STR + "Establish label switched path\n" + "Incoming MPLS label\n" + "IPv4 gateway address\n" + "IPv6 gateway address\n") +{ + return zebra_mpls_transit_lsp (vty, 0, argv[3]->arg, argv[4]->arg, NULL, NULL); +} + +ALIAS (no_mpls_transit_lsp, + no_mpls_transit_lsp_out_label_cmd, + "no mpls lsp (16-1048575) <(16-1048575)|explicit-null|implicit-null>", + NO_STR + MPLS_STR + "Establish label switched path\n" + "Incoming MPLS label\n" + "IPv4 gateway address\n" + "IPv6 gateway address\n" + "Outgoing MPLS label\n" + "Use Explicit-Null label\n" + "Use Implicit-Null label\n") + +DEFUN (no_mpls_transit_lsp_all, + no_mpls_transit_lsp_all_cmd, + "no mpls lsp (16-1048575)", + NO_STR + MPLS_STR + "Establish label switched path\n" + "Incoming MPLS label\n") +{ + return zebra_mpls_transit_lsp (vty, 0, argv[3]->arg, NULL, NULL, NULL); +} + +/* Static route configuration. */ +DEFUN (ip_route_label, + ip_route_label_cmd, + "ip route A.B.C.D/M label WORD", + IP_STR + "Establish static routes\n" + "IP destination prefix (e.g. 10.0.0.0/8)\n" + "IP gateway address\n" + "IP gateway interface name\n" + "Null interface\n" + "Specify label(s) for this route\n" + "One or more labels separated by '/'\n") +{ + return zebra_static_ipv4 (vty, SAFI_UNICAST, 1, argv[2]->arg, NULL, argv[3]->arg, NULL, NULL, + NULL, NULL, argv[5]->arg); +} + +DEFUN (ip_route_tag_label, + ip_route_tag_label_cmd, + "ip route A.B.C.D/M tag (1-4294967295) label WORD", + IP_STR + "Establish static routes\n" + "IP destination prefix (e.g. 10.0.0.0/8)\n" + "IP gateway address\n" + "IP gateway interface name\n" + "Null interface\n" + "Set tag for this route\n" + "Tag value\n" + "Specify label(s) for this route\n" + "One or more labels separated by '/'\n") +{ + return zebra_static_ipv4 (vty, SAFI_UNICAST, 1, argv[2]->arg, NULL, argv[3]->arg, NULL, argv[5]->arg, + NULL, NULL, argv[7]->arg); +} + +/* Mask as A.B.C.D format. */ +DEFUN (ip_route_mask_label, + ip_route_mask_label_cmd, + "ip route A.B.C.D A.B.C.D label WORD", + IP_STR + "Establish static routes\n" + "IP destination prefix\n" + "IP destination prefix mask\n" + "IP gateway address\n" + "IP gateway interface name\n" + "Null interface\n" + "Specify label(s) for this route\n" + "One or more labels separated by '/'\n") +{ + return zebra_static_ipv4 (vty, SAFI_UNICAST, 1, argv[2]->arg, argv[3]->arg, argv[4]->arg, NULL, NULL, + NULL, NULL, argv[6]->arg); +} + +DEFUN (ip_route_mask_tag_label, + ip_route_mask_tag_label_cmd, + "ip route A.B.C.D A.B.C.D tag (1-4294967295) label WORD", + IP_STR + "Establish static routes\n" + "IP destination prefix\n" + "IP destination prefix mask\n" + "IP gateway address\n" + "IP gateway interface name\n" + "Null interface\n" + "Set tag for this route\n" + "Tag value\n" + "Specify label(s) for this route\n" + "One or more labels separated by '/'\n") + +{ + return zebra_static_ipv4 (vty, SAFI_UNICAST, 1, argv[2]->arg, argv[3]->arg, argv[4]->arg, NULL, argv[6]->arg, + NULL, NULL, argv[8]->arg); +} + +/* Distance option value. */ +DEFUN (ip_route_distance_label, + ip_route_distance_label_cmd, + "ip route A.B.C.D/M (1-255) label WORD", + IP_STR + "Establish static routes\n" + "IP destination prefix (e.g. 10.0.0.0/8)\n" + "IP gateway address\n" + "IP gateway interface name\n" + "Null interface\n" + "Distance value for this route\n" + "Specify label(s) for this route\n" + "One or more labels separated by '/'\n") +{ + return zebra_static_ipv4 (vty, SAFI_UNICAST, 1, argv[2]->arg, NULL, argv[3]->arg, NULL, NULL, + argv[4]->arg, NULL, argv[6]->arg); +} + +DEFUN (ip_route_tag_distance_label, + ip_route_tag_distance_label_cmd, + "ip route A.B.C.D/M tag (1-4294967295) (1-255) label WORD", + IP_STR + "Establish static routes\n" + "IP destination prefix (e.g. 10.0.0.0/8)\n" + "IP gateway address\n" + "IP gateway interface name\n" + "Null interface\n" + "Set tag for this route\n" + "Tag value\n" + "Distance value for this route\n" + "Specify label(s) for this route\n" + "One or more labels separated by '/'\n") + +{ + return zebra_static_ipv4 (vty, SAFI_UNICAST, 1, argv[2]->arg, NULL, argv[3]->arg, NULL, argv[5]->arg, + argv[6]->arg, NULL, argv[8]->arg); +} + +DEFUN (ip_route_mask_distance_label, + ip_route_mask_distance_label_cmd, + "ip route A.B.C.D A.B.C.D (1-255) label WORD", + IP_STR + "Establish static routes\n" + "IP destination prefix\n" + "IP destination prefix mask\n" + "IP gateway address\n" + "IP gateway interface name\n" + "Null interface\n" + "Distance value for this route\n" + "Specify label(s) for this route\n" + "One or more labels separated by '/'\n") +{ + return zebra_static_ipv4 (vty, SAFI_UNICAST, 1, argv[2]->arg, argv[3]->arg, argv[4]->arg, NULL, NULL, + argv[5]->arg, NULL, argv[7]->arg); +} + +DEFUN (ip_route_mask_tag_distance_label, + ip_route_mask_tag_distance_label_cmd, + "ip route A.B.C.D A.B.C.D tag (1-4294967295) (1-255) label WORD", + IP_STR + "Establish static routes\n" + "IP destination prefix\n" + "IP destination prefix mask\n" + "IP gateway address\n" + "IP gateway interface name\n" + "Null interface\n" + "Set tag for this route\n" + "Tag value\n" + "Distance value for this route\n" + "Specify label(s) for this route\n" + "One or more labels separated by '/'\n") +{ + return zebra_static_ipv4 (vty, SAFI_UNICAST, 1, argv[2]->arg, argv[3]->arg, argv[4]->arg, NULL, argv[6]->arg, + argv[7]->arg, NULL, argv[9]->arg); +} + +DEFUN (no_ip_route_label, + no_ip_route_label_cmd, + "no ip route A.B.C.D/M label WORD", + NO_STR + IP_STR + "Establish static routes\n" + "IP destination prefix (e.g. 10.0.0.0/8)\n" + "IP gateway address\n" + "IP gateway interface name\n" + "Null interface\n" + "Specify label(s) for this route\n" + "One or more labels separated by '/'\n") +{ + return zebra_static_ipv4 (vty, SAFI_UNICAST, 0, argv[3]->arg, NULL, argv[4]->arg, NULL, NULL, + NULL, NULL, argv[6]->arg); +} + +DEFUN (no_ip_route_tag_label, + no_ip_route_tag_label_cmd, + "no ip route A.B.C.D/M tag (1-4294967295) label WORD", + NO_STR + IP_STR + "Establish static routes\n" + "IP destination prefix (e.g. 10.0.0.0/8)\n" + "IP gateway address\n" + "IP gateway interface name\n" + "Null interface\n" + "Tag of this route\n" + "Tag value\n" + "Specify label(s) for this route\n" + "One or more labels separated by '/'\n") +{ + return zebra_static_ipv4 (vty, SAFI_UNICAST, 0, argv[3]->arg, NULL, argv[4]->arg, NULL, argv[6]->arg, + NULL, NULL, argv[8]->arg); +} + +DEFUN (no_ip_route_mask_label, + no_ip_route_mask_label_cmd, + "no ip route A.B.C.D A.B.C.D label WORD", + NO_STR + IP_STR + "Establish static routes\n" + "IP destination prefix\n" + "IP destination prefix mask\n" + "IP gateway address\n" + "IP gateway interface name\n" + "Null interface\n" + "Specify label(s) for this route\n" + "One or more labels separated by '/'\n") +{ + return zebra_static_ipv4 (vty, SAFI_UNICAST, 0, argv[3]->arg, argv[4]->arg, argv[5]->arg, NULL, NULL, + NULL, NULL, argv[7]->arg); +} + +DEFUN (no_ip_route_mask_tag_label, + no_ip_route_mask_tag_label_cmd, + "no ip route A.B.C.D A.B.C.D tag (1-4294967295) label WORD", + NO_STR + IP_STR + "Establish static routes\n" + "IP destination prefix\n" + "IP destination prefix mask\n" + "IP gateway address\n" + "IP gateway interface name\n" + "Null interface\n" + "Tag of this route\n" + "Tag value\n" + "Specify label(s) for this route\n" + "One or more labels separated by '/'\n") +{ + return zebra_static_ipv4 (vty, SAFI_UNICAST, 0, argv[3]->arg, argv[4]->arg, argv[5]->arg, NULL, argv[7]->arg, + NULL, NULL, argv[9]->arg); +} + +DEFUN (no_ip_route_distance_label, + no_ip_route_distance_label_cmd, + "no ip route A.B.C.D/M (1-255) label WORD", + NO_STR + IP_STR + "Establish static routes\n" + "IP destination prefix (e.g. 10.0.0.0/8)\n" + "IP gateway address\n" + "IP gateway interface name\n" + "Null interface\n" + "Distance value for this route\n" + "Specify label(s) for this route\n" + "One or more labels separated by '/'\n") +{ + return zebra_static_ipv4 (vty, SAFI_UNICAST, 0, argv[3]->arg, NULL, argv[4]->arg, NULL, NULL, + argv[5]->arg, NULL, argv[7]->arg); +} + +DEFUN (no_ip_route_tag_distance_label, + no_ip_route_tag_distance_label_cmd, + "no ip route A.B.C.D/M tag (1-4294967295) (1-255) label WORD", + NO_STR + IP_STR + "Establish static routes\n" + "IP destination prefix (e.g. 10.0.0.0/8)\n" + "IP gateway address\n" + "IP gateway interface name\n" + "Null interface\n" + "Tag of this route\n" + "Tag value\n" + "Distance value for this route\n" + "Specify label(s) for this route\n" + "One or more labels separated by '/'\n") +{ + return zebra_static_ipv4 (vty, SAFI_UNICAST, 0, argv[3]->arg, NULL, argv[4]->arg, NULL, argv[6]->arg, + argv[7]->arg, NULL, argv[9]->arg); +} + +DEFUN (no_ip_route_mask_distance_label, + no_ip_route_mask_distance_label_cmd, + "no ip route A.B.C.D A.B.C.D (1-255)", + NO_STR + IP_STR + "Establish static routes\n" + "IP destination prefix\n" + "IP destination prefix mask\n" + "IP gateway address\n" + "IP gateway interface name\n" + "Null interface\n" + "Distance value for this route\n" + "Specify label(s) for this route\n" + "One or more labels separated by '/'\n") +{ + return zebra_static_ipv4 (vty, SAFI_UNICAST, 0, argv[3]->arg, argv[4]->arg, argv[5]->arg, NULL, NULL, + argv[6]->arg, NULL, NULL); +} + +DEFUN (no_ip_route_mask_tag_distance_label, + no_ip_route_mask_tag_distance_label_cmd, + "no ip route A.B.C.D A.B.C.D tag (1-4294967295) (1-255) label WORD", + NO_STR + IP_STR + "Establish static routes\n" + "IP destination prefix\n" + "IP destination prefix mask\n" + "IP gateway address\n" + "IP gateway interface name\n" + "Null interface\n" + "Tag of this route\n" + "Tag value\n" + "Distance value for this route\n" + "Specify label(s) for this route\n" + "One or more labels separated by '/'\n") +{ + return zebra_static_ipv4 (vty, SAFI_UNICAST, 0, argv[3]->arg, argv[4]->arg, argv[5]->arg, NULL, argv[7]->arg, + argv[8]->arg, NULL, argv[10]->arg); +} + +DEFUN (ipv6_route_label, + ipv6_route_label_cmd, + "ipv6 route X:X::X:X/M label WORD", + IP_STR + "Establish static routes\n" + "IPv6 destination prefix (e.g. 3ffe:506::/32)\n" + "IPv6 gateway address\n" + "IPv6 gateway interface name\n" + "Specify label(s) for this route\n" + "One or more labels separated by '/'\n") +{ + return static_ipv6_func (vty, 1, argv[2]->arg, argv[3]->arg, NULL, NULL, NULL, NULL, NULL, argv[5]->arg); +} + +DEFUN (ipv6_route_tag_label, + ipv6_route_tag_label_cmd, + "ipv6 route X:X::X:X/M tag (1-4294967295) label WORD", + IP_STR + "Establish static routes\n" + "IPv6 destination prefix (e.g. 3ffe:506::/32)\n" + "IPv6 gateway address\n" + "IPv6 gateway interface name\n" + "Set tag for this route\n" + "Tag value\n" + "Specify label(s) for this route\n" + "One or more labels separated by '/'\n") +{ + return static_ipv6_func (vty, 1, argv[2]->arg, argv[3]->arg, NULL, NULL, argv[5]->arg, NULL, NULL, argv[7]->arg); +} + +DEFUN (ipv6_route_ifname_label, + ipv6_route_ifname_label_cmd, + "ipv6 route X:X::X:X/M X:X::X:X INTERFACE label WORD", + IP_STR + "Establish static routes\n" + "IPv6 destination prefix (e.g. 3ffe:506::/32)\n" + "IPv6 gateway address\n" + "IPv6 gateway interface name\n" + "Specify label(s) for this route\n" + "One or more labels separated by '/'\n") +{ + return static_ipv6_func (vty, 1, argv[2]->arg, argv[3]->arg, argv[4]->arg, NULL, NULL, NULL, NULL, argv[6]->arg); +} +DEFUN (ipv6_route_ifname_tag_label, + ipv6_route_ifname_tag_label_cmd, + "ipv6 route X:X::X:X/M X:X::X:X INTERFACE tag (1-4294967295) label WORD", + IP_STR + "Establish static routes\n" + "IPv6 destination prefix (e.g. 3ffe:506::/32)\n" + "IPv6 gateway address\n" + "IPv6 gateway interface name\n" + "Set tag for this route\n" + "Tag value\n" + "Specify label(s) for this route\n" + "One or more labels separated by '/'\n") +{ + return static_ipv6_func (vty, 1, argv[2]->arg, argv[3]->arg, argv[4]->arg, NULL, argv[6]->arg, NULL, NULL, argv[8]->arg); +} + +DEFUN (ipv6_route_pref_label, + ipv6_route_pref_label_cmd, + "ipv6 route X:X::X:X/M (1-255) label WORD", + IP_STR + "Establish static routes\n" + "IPv6 destination prefix (e.g. 3ffe:506::/32)\n" + "IPv6 gateway address\n" + "IPv6 gateway interface name\n" + "Distance value for this prefix\n" + "Specify label(s) for this route\n" + "One or more labels separated by '/'\n") +{ + return static_ipv6_func (vty, 1, argv[2]->arg, argv[3]->arg, NULL, NULL, NULL, argv[4]->arg, NULL, argv[6]->arg); +} + +DEFUN (ipv6_route_pref_tag_label, + ipv6_route_pref_tag_label_cmd, + "ipv6 route X:X::X:X/M tag (1-4294967295) (1-255) label WORD", + IP_STR + "Establish static routes\n" + "IPv6 destination prefix (e.g. 3ffe:506::/32)\n" + "IPv6 gateway address\n" + "IPv6 gateway interface name\n" + "Set tag for this route\n" + "Tag value\n" + "Distance value for this prefix\n" + "Specify label(s) for this route\n" + "One or more labels separated by '/'\n") +{ + return static_ipv6_func (vty, 1, argv[2]->arg, argv[3]->arg, NULL, NULL, argv[5]->arg, argv[6]->arg, NULL, argv[8]->arg); +} + +DEFUN (ipv6_route_ifname_pref_label, + ipv6_route_ifname_pref_label_cmd, + "ipv6 route X:X::X:X/M X:X::X:X INTERFACE (1-255) label WORD", + IP_STR + "Establish static routes\n" + "IPv6 destination prefix (e.g. 3ffe:506::/32)\n" + "IPv6 gateway address\n" + "IPv6 gateway interface name\n" + "Distance value for this prefix\n" + "Specify label(s) for this route\n" + "One or more labels separated by '/'\n") +{ + return static_ipv6_func (vty, 1, argv[2]->arg, argv[3]->arg, argv[4]->arg, NULL, NULL, argv[5]->arg, NULL, argv[7]->arg); +} + +DEFUN (ipv6_route_ifname_pref_tag_label, + ipv6_route_ifname_pref_tag_label_cmd, + "ipv6 route X:X::X:X/M X:X::X:X INTERFACE tag (1-4294967295) (1-255) label WORD", + IP_STR + "Establish static routes\n" + "IPv6 destination prefix (e.g. 3ffe:506::/32)\n" + "IPv6 gateway address\n" + "IPv6 gateway interface name\n" + "Set tag for this route\n" + "Tag value\n" + "Distance value for this prefix\n" + "Specify label(s) for this route\n" + "One or more labels separated by '/'\n") +{ + return static_ipv6_func (vty, 1, argv[2]->arg, argv[3]->arg, argv[4]->arg, NULL, argv[6]->arg, argv[7]->arg, NULL, argv[9]->arg); +} + +DEFUN (no_ipv6_route_label, + no_ipv6_route_label_cmd, + "no ipv6 route X:X::X:X/M label WORD", + NO_STR + IP_STR + "Establish static routes\n" + "IPv6 destination prefix (e.g. 3ffe:506::/32)\n" + "IPv6 gateway address\n" + "IPv6 gateway interface name\n" + "Specify label(s) for this route\n" + "One or more labels separated by '/'\n") +{ + return static_ipv6_func (vty, 0, argv[3]->arg, argv[4]->arg, NULL, NULL, NULL, NULL, NULL, argv[6]->arg); +} + +DEFUN (no_ipv6_route_tag_label, + no_ipv6_route_tag_label_cmd, + "no ipv6 route X:X::X:X/M tag (1-4294967295) label WORD", + NO_STR + IP_STR + "Establish static routes\n" + "IPv6 destination prefix (e.g. 3ffe:506::/32)\n" + "IPv6 gateway address\n" + "IPv6 gateway interface name\n" + "Set tag for this route\n" + "Tag value\n" + "Specify label(s) for this route\n" + "One or more labels separated by '/'\n") +{ + return static_ipv6_func (vty, 0, argv[3]->arg, argv[4]->arg, NULL, NULL, argv[6]->arg, NULL, NULL, argv[8]->arg); +} + +DEFUN (no_ipv6_route_ifname_label, + no_ipv6_route_ifname_label_cmd, + "no ipv6 route X:X::X:X/M X:X::X:X INTERFACE label WORD", + NO_STR + IP_STR + "Establish static routes\n" + "IPv6 destination prefix (e.g. 3ffe:506::/32)\n" + "IPv6 gateway address\n" + "IPv6 gateway interface name\n" + "Specify label(s) for this route\n" + "One or more labels separated by '/'\n") +{ + return static_ipv6_func (vty, 0, argv[3]->arg, argv[4]->arg, argv[5]->arg, NULL, NULL, NULL, NULL, argv[7]->arg); +} + +DEFUN (no_ipv6_route_ifname_tag_label, + no_ipv6_route_ifname_tag_label_cmd, + "no ipv6 route X:X::X:X/M X:X::X:X INTERFACE tag (1-4294967295) label WORD", + NO_STR + IP_STR + "Establish static routes\n" + "IPv6 destination prefix (e.g. 3ffe:506::/32)\n" + "IPv6 gateway address\n" + "IPv6 gateway interface name\n" + "Set tag for this route\n" + "Tag value\n" + "Specify label(s) for this route\n" + "One or more labels separated by '/'\n") +{ + return static_ipv6_func (vty, 0, argv[3]->arg, argv[4]->arg, argv[5]->arg, NULL, argv[7]->arg, NULL, NULL, argv[9]->arg); +} + +DEFUN (no_ipv6_route_pref_label, + no_ipv6_route_pref_label_cmd, + "no ipv6 route X:X::X:X/M (1-255) label WORD", + NO_STR + IP_STR + "Establish static routes\n" + "IPv6 destination prefix (e.g. 3ffe:506::/32)\n" + "IPv6 gateway address\n" + "IPv6 gateway interface name\n" + "Distance value for this prefix\n" + "Specify label(s) for this route\n" + "One or more labels separated by '/'\n") +{ + return static_ipv6_func (vty, 0, argv[3]->arg, argv[4]->arg, NULL, NULL, NULL, argv[5]->arg, NULL, argv[7]->arg); +} + +DEFUN (no_ipv6_route_pref_tag_label, + no_ipv6_route_pref_tag_label_cmd, + "no ipv6 route X:X::X:X/M tag (1-4294967295) (1-255) label WORD", + NO_STR + IP_STR + "Establish static routes\n" + "IPv6 destination prefix (e.g. 3ffe:506::/32)\n" + "IPv6 gateway address\n" + "IPv6 gateway interface name\n" + "Set tag for this route\n" + "Tag value\n" + "Distance value for this prefix\n" + "Specify label(s) for this route\n" + "One or more labels separated by '/'\n") +{ + return static_ipv6_func (vty, 0, argv[3]->arg, argv[4]->arg, NULL, NULL, argv[6]->arg, argv[7]->arg, NULL, argv[9]->arg); +} + +DEFUN (no_ipv6_route_ifname_pref_label, + no_ipv6_route_ifname_pref_label_cmd, + "no ipv6 route X:X::X:X/M X:X::X:X INTERFACE (1-255) label WORD", + NO_STR + IP_STR + "Establish static routes\n" + "IPv6 destination prefix (e.g. 3ffe:506::/32)\n" + "IPv6 gateway address\n" + "IPv6 gateway interface name\n" + "Distance value for this prefix\n" + "Specify label(s) for this route\n" + "One or more labels separated by '/'\n") +{ + return static_ipv6_func (vty, 0, argv[3]->arg, argv[4]->arg, argv[5]->arg, NULL, NULL, argv[6]->arg, NULL, argv[8]->arg); +} + +DEFUN (no_ipv6_route_ifname_pref_tag_label, + no_ipv6_route_ifname_pref_tag_label_cmd, + "no ipv6 route X:X::X:X/M X:X::X:X INTERFACE tag (1-4294967295) (1-255) label WORD", + NO_STR + IP_STR + "Establish static routes\n" + "IPv6 destination prefix (e.g. 3ffe:506::/32)\n" + "IPv6 gateway address\n" + "IPv6 gateway interface name\n" + "Set tag for this route\n" + "Tag value\n" + "Distance value for this prefix\n" + "Specify label(s) for this route\n" + "One or more labels separated by '/'\n") +{ + return static_ipv6_func (vty, 0, argv[3]->arg, argv[4]->arg, argv[5]->arg, NULL, argv[7]->arg, argv[8]->arg, NULL, argv[10]->arg); +} + +/* MPLS LSP configuration write function. */ +static int +zebra_mpls_config (struct vty *vty) +{ + int write = 0; + struct zebra_vrf *zvrf; + + zvrf = vrf_info_lookup(VRF_DEFAULT); + if (!zvrf) + return 0; + + write += zebra_mpls_write_lsp_config(vty, zvrf); + return write; +} + +DEFUN (show_mpls_table, + show_mpls_table_cmd, + "show mpls table [json]", + SHOW_STR + MPLS_STR + "MPLS table\n" + "JavaScript Object Notation\n") +{ + struct zebra_vrf *zvrf; + u_char use_json = (argv[3]->arg != NULL); + + zvrf = vrf_info_lookup(VRF_DEFAULT); + zebra_mpls_print_lsp_table(vty, zvrf, use_json); + return CMD_SUCCESS; +} + +DEFUN (show_mpls_table_lsp, + show_mpls_table_lsp_cmd, + "show mpls table (16-1048575) [json]", + SHOW_STR + MPLS_STR + "MPLS table\n" + "LSP to display information about\n" + "JavaScript Object Notation\n") +{ + u_int32_t label; + struct zebra_vrf *zvrf; + u_char use_json = (argv[4]->arg != NULL); + + zvrf = vrf_info_lookup(VRF_DEFAULT); + label = atoi(argv[3]->arg); + zebra_mpls_print_lsp (vty, zvrf, label, use_json); + return CMD_SUCCESS; +} + +DEFUN (show_mpls_status, + show_mpls_status_cmd, + "show mpls status", + SHOW_STR + "MPLS information\n" + "MPLS status\n") +{ + vty_out (vty, "MPLS support enabled: %s%s", (mpls_enabled) ? "yes" : + "no (mpls kernel extensions not detected)", VTY_NEWLINE); + return CMD_SUCCESS; +} + +/* MPLS node for MPLS LSP. */ +static struct cmd_node mpls_node = { MPLS_NODE, "", 1 }; + +/* MPLS VTY. */ +void +zebra_mpls_vty_init (void) +{ + install_element (VIEW_NODE, &show_mpls_status_cmd); + + if (! mpls_enabled) + return; + + install_node (&mpls_node, zebra_mpls_config); + + install_element (CONFIG_NODE, &ip_route_label_cmd); + install_element (CONFIG_NODE, &ip_route_tag_label_cmd); + install_element (CONFIG_NODE, &ip_route_mask_label_cmd); + install_element (CONFIG_NODE, &ip_route_mask_tag_label_cmd); + install_element (CONFIG_NODE, &no_ip_route_label_cmd); + install_element (CONFIG_NODE, &no_ip_route_tag_label_cmd); + install_element (CONFIG_NODE, &no_ip_route_mask_label_cmd); + install_element (CONFIG_NODE, &no_ip_route_mask_tag_label_cmd); + install_element (CONFIG_NODE, &ip_route_distance_label_cmd); + install_element (CONFIG_NODE, &ip_route_tag_distance_label_cmd); + install_element (CONFIG_NODE, &ip_route_mask_distance_label_cmd); + install_element (CONFIG_NODE, &ip_route_mask_tag_distance_label_cmd); + install_element (CONFIG_NODE, &no_ip_route_distance_label_cmd); + install_element (CONFIG_NODE, &no_ip_route_tag_distance_label_cmd); + install_element (CONFIG_NODE, &no_ip_route_mask_distance_label_cmd); + install_element (CONFIG_NODE, &no_ip_route_mask_tag_distance_label_cmd); + + install_element (CONFIG_NODE, &ipv6_route_label_cmd); + install_element (CONFIG_NODE, &ipv6_route_ifname_label_cmd); + install_element (CONFIG_NODE, &no_ipv6_route_label_cmd); + install_element (CONFIG_NODE, &no_ipv6_route_ifname_label_cmd); + install_element (CONFIG_NODE, &ipv6_route_pref_label_cmd); + install_element (CONFIG_NODE, &ipv6_route_ifname_pref_label_cmd); + install_element (CONFIG_NODE, &no_ipv6_route_pref_label_cmd); + install_element (CONFIG_NODE, &no_ipv6_route_ifname_pref_label_cmd); + install_element (CONFIG_NODE, &ipv6_route_tag_label_cmd); + install_element (CONFIG_NODE, &ipv6_route_ifname_tag_label_cmd); + install_element (CONFIG_NODE, &ipv6_route_pref_tag_label_cmd); + install_element (CONFIG_NODE, &ipv6_route_ifname_pref_tag_label_cmd); + install_element (CONFIG_NODE, &no_ipv6_route_tag_label_cmd); + install_element (CONFIG_NODE, &no_ipv6_route_ifname_tag_label_cmd); + install_element (CONFIG_NODE, &no_ipv6_route_pref_tag_label_cmd); + install_element (CONFIG_NODE, &no_ipv6_route_ifname_pref_tag_label_cmd); + + install_element (CONFIG_NODE, &mpls_transit_lsp_cmd); + install_element (CONFIG_NODE, &no_mpls_transit_lsp_cmd); + install_element (CONFIG_NODE, &no_mpls_transit_lsp_out_label_cmd); + install_element (CONFIG_NODE, &no_mpls_transit_lsp_all_cmd); + + install_element (VIEW_NODE, &show_mpls_table_cmd); + install_element (VIEW_NODE, &show_mpls_table_lsp_cmd); +} diff --git a/zebra/zebra_ptm.c b/zebra/zebra_ptm.c index 133b0fc2e9..ebae1bd4b9 100644 --- a/zebra/zebra_ptm.c +++ b/zebra/zebra_ptm.c @@ -298,12 +298,11 @@ DEFUN (zebra_ptm_enable_if, "ptm-enable", "Enable neighbor check with specified topology\n") { - struct interface *ifp; + VTY_DECLVAR_CONTEXT (interface, ifp); struct zebra_if *if_data; int old_ptm_enable; int send_linkdown = 0; - ifp = (struct interface *) vty->index; if (ifp->ifindex == IFINDEX_INTERNAL) { return CMD_SUCCESS; @@ -338,12 +337,10 @@ DEFUN (no_zebra_ptm_enable_if, NO_STR "Enable neighbor check with specified topology\n") { - struct interface *ifp; + VTY_DECLVAR_CONTEXT (interface, ifp); int send_linkup = 0; struct zebra_if *if_data; - ifp = (struct interface *) vty->index; - if ((ifp->ifindex != IFINDEX_INTERNAL) && (ifp->ptm_enable)) { if (!if_is_operative(ifp)) diff --git a/zebra/zebra_rib.c b/zebra/zebra_rib.c index e238f8e8eb..b7d12a5cb4 100644 --- a/zebra/zebra_rib.c +++ b/zebra/zebra_rib.c @@ -37,6 +37,7 @@ #include "routemap.h" #include "nexthop.h" #include "vrf.h" +#include "mpls.h" #include "zebra/rib.h" #include "zebra/rt.h" @@ -117,6 +118,19 @@ _rnode_zlog(const char *_func, vrf_id_t vrf_id, struct route_node *rn, int prior #define rnode_info(node, ...) \ _rnode_zlog(__func__, vrf_id, node, LOG_INFO, __VA_ARGS__) +u_char +route_distance (int type) +{ + u_char distance; + + if ((unsigned)type >= array_size(route_info)) + distance = 150; + else + distance = route_info[type].distance; + + return distance; +} + int is_zebra_valid_kernel_table(u_int32_t table_id) { @@ -190,6 +204,9 @@ rib_copy_nexthops (struct rib *rib, struct nexthop *nh) nexthop->ifindex = nh->ifindex; memcpy(&(nexthop->gate), &(nh->gate), sizeof(union g_addr)); memcpy(&(nexthop->src), &(nh->src), sizeof(union g_addr)); + if (nh->nh_label) + nexthop_add_labels (nexthop, nh->nh_label_type, nh->nh_label->num_labels, + &nh->nh_label->label[0]); rib_nexthop_add(rib, nexthop); if (CHECK_FLAG(nh->flags, NEXTHOP_FLAG_RECURSIVE)) copy_nexthops(&nexthop->resolved, nh->resolved); @@ -415,7 +432,7 @@ nexthop_active_ipv4 (struct rib *rib, struct nexthop *nexthop, int set, /* if the next hop is imported from another table, skip it */ if (match->type == ZEBRA_ROUTE_TABLE) continue; - if (CHECK_FLAG (match->flags, ZEBRA_FLAG_SELECTED)) + if (CHECK_FLAG (match->status, RIB_ENTRY_SELECTED_FIB)) break; } @@ -622,7 +639,7 @@ nexthop_active_ipv6 (struct rib *rib, struct nexthop *nexthop, int set, { if (CHECK_FLAG (match->status, RIB_ENTRY_REMOVED)) continue; - if (CHECK_FLAG (match->flags, ZEBRA_FLAG_SELECTED)) + if (CHECK_FLAG (match->status, RIB_ENTRY_SELECTED_FIB)) break; } @@ -787,7 +804,7 @@ rib_match (afi_t afi, safi_t safi, vrf_id_t vrf_id, { if (CHECK_FLAG (match->status, RIB_ENTRY_REMOVED)) continue; - if (CHECK_FLAG (match->flags, ZEBRA_FLAG_SELECTED)) + if (CHECK_FLAG (match->status, RIB_ENTRY_SELECTED_FIB)) break; } @@ -923,7 +940,7 @@ rib_lookup_ipv4 (struct prefix_ipv4 *p, vrf_id_t vrf_id) { if (CHECK_FLAG (match->status, RIB_ENTRY_REMOVED)) continue; - if (CHECK_FLAG (match->flags, ZEBRA_FLAG_SELECTED)) + if (CHECK_FLAG (match->status, RIB_ENTRY_SELECTED_FIB)) break; } @@ -943,7 +960,7 @@ rib_lookup_ipv4 (struct prefix_ipv4 *p, vrf_id_t vrf_id) /* * This clone function, unlike its original rib_lookup_ipv4(), checks * if specified IPv4 route record (prefix/mask -> gate) exists in - * the whole RIB and has ZEBRA_FLAG_SELECTED set. + * the whole RIB and has RIB_ENTRY_SELECTED_FIB set. * * Return values: * -1: error @@ -983,7 +1000,7 @@ rib_lookup_ipv4_route (struct prefix_ipv4 *p, union sockunion * qgate, { if (CHECK_FLAG (match->status, RIB_ENTRY_REMOVED)) continue; - if (CHECK_FLAG (match->flags, ZEBRA_FLAG_SELECTED)) + if (CHECK_FLAG (match->status, RIB_ENTRY_SELECTED_FIB)) break; } @@ -1293,14 +1310,19 @@ rib_uninstall (struct route_node *rn, struct rib *rib) { rib_table_info_t *info = rn->table->info; - if (CHECK_FLAG (rib->flags, ZEBRA_FLAG_SELECTED)) + if (CHECK_FLAG (rib->status, RIB_ENTRY_SELECTED_FIB)) { if (info->safi == SAFI_UNICAST) zfpm_trigger_update (rn, "rib_uninstall"); - redistribute_delete (&rn->p, rib); if (! RIB_SYSTEM_ROUTE (rib)) rib_uninstall_kernel (rn, rib); + UNSET_FLAG (rib->status, RIB_ENTRY_SELECTED_FIB); + } + + if (CHECK_FLAG (rib->flags, ZEBRA_FLAG_SELECTED)) + { + redistribute_delete (&rn->p, rib); UNSET_FLAG (rib->flags, ZEBRA_FLAG_SELECTED); } } @@ -1368,76 +1390,70 @@ rib_gc_dest (struct route_node *rn) } static void -rib_process_add_route (struct zebra_vrf *zvrf, struct route_node *rn, - struct rib *select) +rib_process_add_fib(struct zebra_vrf *zvrf, struct route_node *rn, + struct rib *new) { char buf[INET6_ADDRSTRLEN]; - int installed = 1; zfpm_trigger_update (rn, "new route selected"); /* Update real nexthop. This may actually determine if nexthop is active or not. */ - if (!nexthop_active_update (rn, select, 1)) + if (!nexthop_active_update (rn, new, 1)) { - UNSET_FLAG(select->status, RIB_ENTRY_CHANGED); + UNSET_FLAG(new->status, RIB_ENTRY_CHANGED); return; } - SET_FLAG (select->flags, ZEBRA_FLAG_SELECTED); + SET_FLAG (new->status, RIB_ENTRY_SELECTED_FIB); if (IS_ZEBRA_DEBUG_RIB) { inet_ntop (rn->p.family, &rn->p.u.prefix, buf, INET6_ADDRSTRLEN); zlog_debug ("%u:%s/%d: Adding route rn %p, rib %p (type %d)", - zvrf->vrf_id, buf, rn->p.prefixlen, rn, select, select->type); + zvrf->vrf_id, buf, rn->p.prefixlen, rn, new, new->type); } - if (!RIB_SYSTEM_ROUTE (select)) + if (!RIB_SYSTEM_ROUTE (new)) { - if (rib_install_kernel (rn, select, 0)) + if (rib_install_kernel (rn, new, 0)) { - installed = 0; inet_ntop (rn->p.family, &rn->p.u.prefix, buf, INET6_ADDRSTRLEN); zlog_warn ("%u:%s/%d: Route install failed", zvrf->vrf_id, buf, rn->p.prefixlen); } } - /* Update for redistribution. */ - if (installed) - redistribute_update (&rn->p, select, NULL); - UNSET_FLAG(select->status, RIB_ENTRY_CHANGED); + UNSET_FLAG(new->status, RIB_ENTRY_CHANGED); } static void -rib_process_del_route (struct zebra_vrf *zvrf, struct route_node *rn, - struct rib *fib) +rib_process_del_fib(struct zebra_vrf *zvrf, struct route_node *rn, + struct rib *old) { char buf[INET6_ADDRSTRLEN]; zfpm_trigger_update (rn, "removing existing route"); - /* Withdraw redistribute and uninstall from kernel. */ + /* Uninstall from kernel. */ if (IS_ZEBRA_DEBUG_RIB) { inet_ntop (rn->p.family, &rn->p.u.prefix, buf, INET6_ADDRSTRLEN); zlog_debug ("%u:%s/%d: Deleting route rn %p, rib %p (type %d)", - zvrf->vrf_id, buf, rn->p.prefixlen, rn, fib, fib->type); + zvrf->vrf_id, buf, rn->p.prefixlen, rn, old, old->type); } - redistribute_delete(&rn->p, fib); - if (!RIB_SYSTEM_ROUTE (fib)) - rib_uninstall_kernel (rn, fib); + if (!RIB_SYSTEM_ROUTE (old)) + rib_uninstall_kernel (rn, old); - UNSET_FLAG (fib->flags, ZEBRA_FLAG_SELECTED); + UNSET_FLAG (old->status, RIB_ENTRY_SELECTED_FIB); /* Update nexthop for route, reset changed flag. */ - nexthop_active_update (rn, fib, 1); - UNSET_FLAG(fib->status, RIB_ENTRY_CHANGED); + nexthop_active_update (rn, old, 1); + UNSET_FLAG(old->status, RIB_ENTRY_CHANGED); } static void -rib_process_update_route (struct zebra_vrf *zvrf, struct route_node *rn, - struct rib *select, struct rib *fib) +rib_process_update_fib (struct zebra_vrf *zvrf, struct route_node *rn, + struct rib *old, struct rib *new) { char buf[INET6_ADDRSTRLEN]; struct nexthop *nexthop = NULL, *tnexthop; @@ -1452,13 +1468,13 @@ rib_process_update_route (struct zebra_vrf *zvrf, struct route_node *rn, * We have to install or update if a new route has been selected or * something has changed. */ - if (select != fib || - CHECK_FLAG (select->status, RIB_ENTRY_CHANGED)) + if (new != old || + CHECK_FLAG (new->status, RIB_ENTRY_CHANGED)) { zfpm_trigger_update (rn, "updating existing route"); /* Update the nexthop; we could determine here that nexthop is inactive. */ - if (nexthop_active_update (rn, select, 1)) + if (nexthop_active_update (rn, new, 1)) nh_active = 1; /* If nexthop is active, install the selected route, if appropriate. If @@ -1469,18 +1485,18 @@ rib_process_update_route (struct zebra_vrf *zvrf, struct route_node *rn, { if (IS_ZEBRA_DEBUG_RIB) { - if (select != fib) + if (new != old) zlog_debug ("%u:%s/%d: Updating route rn %p, rib %p (type %d) " "old %p (type %d)", zvrf->vrf_id, buf, rn->p.prefixlen, - rn, select, select->type, fib, fib->type); + rn, new, new->type, old, old->type); else zlog_debug ("%u:%s/%d: Updating route rn %p, rib %p (type %d)", - zvrf->vrf_id, buf, rn->p.prefixlen, rn, select, select->type); + zvrf->vrf_id, buf, rn->p.prefixlen, rn, new, new->type); } /* Non-system route should be installed. */ - if (!RIB_SYSTEM_ROUTE (select)) + if (!RIB_SYSTEM_ROUTE (new)) { - if (rib_install_kernel (rn, select, 1)) + if (rib_install_kernel (rn, new, 1)) { installed = 0; inet_ntop (rn->p.family, &rn->p.u.prefix, buf, INET6_ADDRSTRLEN); @@ -1490,26 +1506,23 @@ rib_process_update_route (struct zebra_vrf *zvrf, struct route_node *rn, } /* If install succeeded or system route, cleanup flags for prior route. */ - if (installed && select != fib) + if (installed && new != old) { - if (RIB_SYSTEM_ROUTE(select)) + if (RIB_SYSTEM_ROUTE(new)) { - if (!RIB_SYSTEM_ROUTE (fib)) - rib_uninstall_kernel (rn, fib); + if (!RIB_SYSTEM_ROUTE (old)) + rib_uninstall_kernel (rn, old); } else { - for (nexthop = fib->nexthop; nexthop; nexthop = nexthop->next) + for (nexthop = old->nexthop; nexthop; nexthop = nexthop->next) UNSET_FLAG (nexthop->flags, NEXTHOP_FLAG_FIB); } } /* Update for redistribution. */ if (installed) - { - SET_FLAG (select->flags, ZEBRA_FLAG_SELECTED); - redistribute_update (&rn->p, select, (select == fib) ? NULL : fib); - } + SET_FLAG (new->status, RIB_ENTRY_SELECTED_FIB); } /* @@ -1518,28 +1531,22 @@ rib_process_update_route (struct zebra_vrf *zvrf, struct route_node *rn, */ if (!nh_active || !installed) { - struct rib *del; - if (IS_ZEBRA_DEBUG_RIB) { - if (select != fib) + if (new != old) zlog_debug ("%u:%s/%d: Deleting route rn %p, rib %p (type %d) " "old %p (type %d) - %s", zvrf->vrf_id, buf, rn->p.prefixlen, - rn, select, select->type, fib, fib->type, + rn, new, new->type, old, old->type, nh_active ? "install failed" : "nexthop inactive"); else zlog_debug ("%u:%s/%d: Deleting route rn %p, rib %p (type %d) - %s", - zvrf->vrf_id, buf, rn->p.prefixlen, rn, select, select->type, + zvrf->vrf_id, buf, rn->p.prefixlen, rn, new, new->type, nh_active ? "install failed" : "nexthop inactive"); } - del = (select == fib) ? select : fib; - - redistribute_delete(&rn->p, del); - - if (!RIB_SYSTEM_ROUTE (del)) - rib_uninstall_kernel (rn, del); - UNSET_FLAG (select->flags, ZEBRA_FLAG_SELECTED); + if (!RIB_SYSTEM_ROUTE (old)) + rib_uninstall_kernel (rn, old); + UNSET_FLAG (new->status, RIB_ENTRY_SELECTED_FIB); } } else @@ -1550,33 +1557,33 @@ rib_process_update_route (struct zebra_vrf *zvrf, struct route_node *rn, * netlink reporting interface up before IPv4 or IPv6 protocol is ready * to add routes. */ - if (!RIB_SYSTEM_ROUTE (select)) + if (!RIB_SYSTEM_ROUTE (new)) { int in_fib = 0; - for (ALL_NEXTHOPS_RO(select->nexthop, nexthop, tnexthop, recursing)) + for (ALL_NEXTHOPS_RO(new->nexthop, nexthop, tnexthop, recursing)) if (CHECK_FLAG (nexthop->flags, NEXTHOP_FLAG_FIB)) { in_fib = 1; break; } if (!in_fib) - rib_install_kernel (rn, select, 0); + rib_install_kernel (rn, new, 0); } } /* Update prior route. */ - if (select != fib) + if (new != old) { - UNSET_FLAG (fib->flags, ZEBRA_FLAG_SELECTED); + UNSET_FLAG (old->status, RIB_ENTRY_SELECTED_FIB); /* Set real nexthop. */ - nexthop_active_update (rn, fib, 1); - UNSET_FLAG(fib->status, RIB_ENTRY_CHANGED); + nexthop_active_update (rn, old, 1); + UNSET_FLAG(old->status, RIB_ENTRY_CHANGED); } /* Clear changed flag. */ - UNSET_FLAG(select->status, RIB_ENTRY_CHANGED); + UNSET_FLAG(new->status, RIB_ENTRY_CHANGED); } /* Check if 'alternate' RIB entry is better than 'current'. */ @@ -1627,33 +1634,32 @@ rib_process (struct route_node *rn) { struct rib *rib; struct rib *next; - struct rib *fib = NULL; - struct rib *select = NULL; - struct rib *del = NULL; + struct rib *old_selected = NULL; + struct rib *new_selected = NULL; + struct rib *old_fib = NULL; + struct rib *new_fib = NULL; struct rib *best = NULL; char buf[INET6_ADDRSTRLEN]; rib_dest_t *dest; struct zebra_vrf *zvrf = NULL; vrf_id_t vrf_id = VRF_UNKNOWN; - rib_table_info_t *info; assert (rn); - info = rn->table->info; - + dest = rib_dest_from_rnode (rn); if (dest) { zvrf = rib_dest_vrf (dest); vrf_id = zvrf->vrf_id; } - + if (IS_ZEBRA_DEBUG_RIB) inet_ntop (rn->p.family, &rn->p.u.prefix, buf, INET6_ADDRSTRLEN); if (IS_ZEBRA_DEBUG_RIB_DETAILED) zlog_debug ("%u:%s/%d: Processing rn %p", vrf_id, buf, rn->p.prefixlen, rn); - RNODE_FOREACH_RIB_SAFE (rn, rib, next) + RNODE_FOREACH_RIB (rn, rib) { if (IS_ZEBRA_DEBUG_RIB_DETAILED) zlog_debug ("%u:%s/%d: Examine rib %p (type %d) status %x flags %x " @@ -1663,31 +1669,23 @@ rib_process (struct route_node *rn) UNSET_FLAG(rib->status, RIB_ENTRY_NEXTHOPS_CHANGED); - /* Currently installed rib. */ + /* Currently selected rib. */ if (CHECK_FLAG (rib->flags, ZEBRA_FLAG_SELECTED)) { - assert (fib == NULL); - fib = rib; + assert (old_selected == NULL); + old_selected = rib; + } + /* Currently in fib */ + if (CHECK_FLAG (rib->status, RIB_ENTRY_SELECTED_FIB)) + { + assert (old_fib == NULL); + old_fib = rib; } - /* Unlock removed routes, so they'll be freed, bar the FIB entry, - * which we need to do do further work with below. - */ + /* Skip deleted entries from selection */ if (CHECK_FLAG (rib->status, RIB_ENTRY_REMOVED)) - { - if (rib != fib) - { - if (IS_ZEBRA_DEBUG_RIB) - rnode_debug (rn, vrf_id, "rn %p, removing rib %p", - (void *)rn, (void *)rib); - rib_unlink (rn, rib); - } - else - del = rib; - - continue; - } - + continue; + /* Skip unreachable nexthop. */ /* This first call to nexthop_active_update is merely to determine if * there's any change to nexthops associated with this RIB entry. Now, @@ -1704,9 +1702,15 @@ rib_process (struct route_node *rn) { if (rib->type == ZEBRA_ROUTE_TABLE) { + /* XXX: HERE BE DRAGONS!!!!! + * In all honesty, I have not yet figured out what this part + * does or why the RIB_ENTRY_CHANGED test above is correct + * or why we need to delete a route here, and also not whether + * this concerns both selected and fib route, or only selected + * or only fib */ /* This entry was denied by the 'ip protocol table' route-map, we * need to delete it */ - if (rib != fib) + if (rib != old_selected) { if (IS_ZEBRA_DEBUG_RIB) zlog_debug ("%s: %s/%d: imported via import-table but denied " @@ -1715,15 +1719,12 @@ rib_process (struct route_node *rn) rib_unlink (rn, rib); } else - del = rib; + SET_FLAG (rib->status, RIB_ENTRY_REMOVED); } continue; } - if (info->safi == SAFI_MULTICAST) - continue; - /* Infinite distance. */ if (rib->distance == DISTANCE_INFINITY) { @@ -1731,33 +1732,101 @@ rib_process (struct route_node *rn) continue; } - best = rib_choose_best(select, rib); - if (select && best != select) - UNSET_FLAG (select->status, RIB_ENTRY_CHANGED); + if (CHECK_FLAG (rib->flags, ZEBRA_FLAG_FIB_OVERRIDE)) + { + best = rib_choose_best(new_fib, rib); + if (new_fib && best != new_fib) + UNSET_FLAG (new_fib->status, RIB_ENTRY_CHANGED); + new_fib = best; + } + else + { + best = rib_choose_best(new_selected, rib); + if (new_selected && best != new_selected) + UNSET_FLAG (new_selected->status, RIB_ENTRY_CHANGED); + new_selected = best; + } if (best != rib) UNSET_FLAG (rib->status, RIB_ENTRY_CHANGED); - select = best; - } /* RNODE_FOREACH_RIB_SAFE */ + } /* RNODE_FOREACH_RIB */ + + /* If no FIB override route, use the selected route also for FIB */ + if (new_fib == NULL) + new_fib = new_selected; /* After the cycle is finished, the following pointers will be set: - * select --- the winner RIB entry, if any was found, otherwise NULL - * fib --- the SELECTED RIB entry, if any, otherwise NULL - * del --- equal to fib, if fib is queued for deletion, NULL otherwise - * rib --- NULL + * old_selected --- RIB entry currently having SELECTED + * new_selected --- RIB entry that is newly SELECTED + * old_fib --- RIB entry currently in kernel FIB + * new_fib --- RIB entry that is newly to be in kernel FIB + * + * new_selected will get SELECTED flag, and is going to be redistributed + * the zclients. new_fib (which can be new_selected) will be installed in kernel. */ - if (IS_ZEBRA_DEBUG_RIB_DETAILED) - zlog_debug ("%u:%s/%d: After processing: select %p fib %p del %p", - vrf_id, buf, rn->p.prefixlen, select, fib, del); - /* Same RIB entry is selected. Update FIB and finish. */ - if (select && select == fib) - rib_process_update_route (zvrf, rn, select, select); - else if (select && fib) - rib_process_update_route (zvrf, rn, select, fib); - else if (select) - rib_process_add_route (zvrf, rn, select); - else if (fib) - rib_process_del_route (zvrf, rn, fib); + if (IS_ZEBRA_DEBUG_RIB_DETAILED) + { + zlog_debug ("%u:%s/%d: After processing: old_selected %p new_selected %p old_fib %p new_fib %p", + vrf_id, buf, rn->p.prefixlen, + (void *)old_selected, + (void *)new_selected, + (void *)old_fib, + (void *)new_fib); + } + + /* Buffer RIB_ENTRY_CHANGED here, because it will get cleared if + * fib == selected */ + bool selected_changed = new_selected && CHECK_FLAG(new_selected->status, + RIB_ENTRY_CHANGED); + + /* Update fib according to selection results */ + if (new_fib && old_fib) + rib_process_update_fib (zvrf, rn, old_fib, new_fib); + else if (new_fib) + rib_process_add_fib (zvrf, rn, new_fib); + else if (old_fib) + rib_process_del_fib (zvrf, rn, old_fib); + + /* Redistribute SELECTED entry */ + if (old_selected != new_selected || selected_changed) + { + struct nexthop *nexthop, *tnexthop; + int recursing; + + /* Check if we have a FIB route for the destination, otherwise, + * don't redistribute it */ + for (ALL_NEXTHOPS_RO(new_fib ? new_fib->nexthop : NULL, nexthop, + tnexthop, recursing)) + { + if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB)) + { + break; + } + } + if (!nexthop) + new_selected = NULL; + + if (new_selected && new_selected != new_fib) + { + nexthop_active_update(rn, new_selected, 1); + UNSET_FLAG(new_selected->status, RIB_ENTRY_CHANGED); + } + + if (old_selected) + { + if (!new_selected) + redistribute_delete(&rn->p, old_selected); + if (old_selected != new_selected) + UNSET_FLAG (old_selected->flags, ZEBRA_FLAG_SELECTED); + } + + if (new_selected) + { + /* Install new or replace existing redistributed entry */ + SET_FLAG (new_selected->flags, ZEBRA_FLAG_SELECTED); + redistribute_update (&rn->p, new_selected, old_selected); + } + } #if 0 if (select && select == fib) @@ -1934,12 +2003,18 @@ rib_process (struct route_node *rn) } #endif - /* FIB route was removed, should be deleted */ - if (del) + /* Remove all RIB entries queued for removal */ + RNODE_FOREACH_RIB_SAFE (rn, rib, next) { - if (IS_ZEBRA_DEBUG_RIB) - rnode_debug (rn, vrf_id, "Deleting fib %p, rn %p", (void *)del, (void *)rn); - rib_unlink (rn, del); + if (CHECK_FLAG (rib->status, RIB_ENTRY_REMOVED)) + { + if (IS_ZEBRA_DEBUG_RIB) + { + rnode_debug (rn, vrf_id, "rn %p, removing rib %p", + (void *)rn, (void *)rib); + } + rib_unlink(rn, rib); + } } /* @@ -2019,6 +2094,16 @@ meta_queue_process_complete (struct work_queue *dummy) zebra_evaluate_rnh(zvrf->vrf_id, AF_INET6, 0, RNH_IMPORT_CHECK_TYPE, NULL); } } + + /* Schedule LSPs for processing, if needed. */ + zvrf = vrf_info_lookup(VRF_DEFAULT); + if (mpls_should_lsps_be_processed(zvrf)) + { + if (IS_ZEBRA_DEBUG_MPLS) + zlog_debug ("%u: Scheduling all LSPs upon RIB completion", zvrf->vrf_id); + zebra_mpls_lsp_schedule (zvrf); + mpls_unmark_lsps_for_processing(zvrf); + } } /* Dispatch the meta queue by picking, processing and unlocking the next RN from @@ -2496,7 +2581,7 @@ void rib_lookup_and_pushup (struct prefix_ipv4 * p, vrf_id_t vrf_id) */ RNODE_FOREACH_RIB (rn, rib) { - if (CHECK_FLAG (rib->flags, ZEBRA_FLAG_SELECTED) && + if (CHECK_FLAG (rib->status, RIB_ENTRY_SELECTED_FIB) && ! RIB_SYSTEM_ROUTE (rib)) { changed = 1; @@ -2642,7 +2727,7 @@ rib_delete (afi_t afi, safi_t safi, vrf_id_t vrf_id, int type, u_short instance, if (CHECK_FLAG (rib->status, RIB_ENTRY_REMOVED)) continue; - if (CHECK_FLAG (rib->flags, ZEBRA_FLAG_SELECTED)) + if (CHECK_FLAG (rib->status, RIB_ENTRY_SELECTED_FIB)) fib = rib; if (rib->type != type) @@ -2703,7 +2788,7 @@ rib_delete (afi_t afi, safi_t safi, vrf_id_t vrf_id, int type, u_short instance, for (nexthop = fib->nexthop; nexthop; nexthop = nexthop->next) UNSET_FLAG (nexthop->flags, NEXTHOP_FLAG_FIB); - UNSET_FLAG (fib->flags, ZEBRA_FLAG_SELECTED); + UNSET_FLAG (fib->status, RIB_ENTRY_SELECTED_FIB); } else { @@ -3080,7 +3165,7 @@ rib_close_table (struct route_table *table) for (rn = route_top (table); rn; rn = route_next (rn)) RNODE_FOREACH_RIB (rn, rib) { - if (!CHECK_FLAG (rib->flags, ZEBRA_FLAG_SELECTED)) + if (!CHECK_FLAG (rib->status, RIB_ENTRY_SELECTED_FIB)) continue; if (info->safi == SAFI_UNICAST) @@ -3123,6 +3208,9 @@ rib_close (void) if (zvrf->other_table[AFI_IP6][table_id]) rib_close_table (zvrf->other_table[AFI_IP6][table_id]); } + + zebra_mpls_close_tables(zvrf); + } /* Routing information base initialize. */ diff --git a/zebra/zebra_routemap.c b/zebra/zebra_routemap.c index fddc4fd7b1..9b407ed392 100644 --- a/zebra/zebra_routemap.c +++ b/zebra/zebra_routemap.c @@ -52,7 +52,7 @@ struct nh_rmap_obj vrf_id_t vrf_id; u_int32_t source_protocol; int metric; - u_short tag; + route_tag_t tag; }; static void zebra_route_map_set_delay_timer(u_int32_t value); @@ -61,10 +61,11 @@ static void zebra_route_map_set_delay_timer(u_int32_t value); /* Add zebra route map rule */ static int -zebra_route_match_add(struct vty *vty, struct route_map_index *index, +zebra_route_match_add(struct vty *vty, const char *command, const char *arg, route_map_event_t type) { + VTY_DECLVAR_CONTEXT (route_map_index, index); int ret; ret = route_map_add_match (index, command, arg); @@ -90,10 +91,11 @@ zebra_route_match_add(struct vty *vty, struct route_map_index *index, /* Delete zebra route map rule. */ static int -zebra_route_match_delete (struct vty *vty, struct route_map_index *index, +zebra_route_match_delete (struct vty *vty, const char *command, const char *arg, route_map_event_t type) { + VTY_DECLVAR_CONTEXT (route_map_index, index); int ret; char *dep_name = NULL; const char *tmpstr; @@ -146,7 +148,7 @@ static route_map_result_t route_match_tag (void *rule, struct prefix *prefix, route_map_object_t type, void *object) { - u_short *tag; + route_tag_t *tag; struct nh_rmap_obj *nh_data; if (type == RMAP_ZEBRA) @@ -160,45 +162,13 @@ route_match_tag (void *rule, struct prefix *prefix, return RMAP_NOMATCH; } -/* Route map 'match tag' match statement. 'arg' is TAG value */ -static void * -route_match_tag_compile (const char *arg) -{ - u_short *tag; - u_short tmp; - - /* tag value shoud be integer. */ - if (! all_digit (arg)) - return NULL; - - tmp = atoi(arg); - if (tmp < 1) - return NULL; - - tag = XMALLOC (MTYPE_ROUTE_MAP_COMPILED, sizeof (u_short)); - - if (!tag) - return tag; - - *tag = tmp; - - return tag; -} - -/* Free route map's compiled 'match tag' value. */ -static void -route_match_tag_free (void *rule) -{ - XFREE (MTYPE_ROUTE_MAP_COMPILED, rule); -} - /* Route map commands for tag matching */ -struct route_map_rule_cmd route_match_tag_cmd = +static struct route_map_rule_cmd route_match_tag_cmd = { "tag", route_match_tag, - route_match_tag_compile, - route_match_tag_free + route_map_rule_tag_compile, + route_map_rule_tag_free, }; @@ -260,7 +230,7 @@ DEFUN (match_ip_address_prefix_len, "Match prefix length of ip address\n" "Prefix length\n") { - return zebra_route_match_add (vty, vty->index, "ip address prefix-len", + return zebra_route_match_add (vty, "ip address prefix-len", argv[4]->arg, RMAP_EVENT_MATCH_ADDED); } @@ -274,7 +244,7 @@ DEFUN (no_match_ip_address_prefix_len, "Prefix length\n") { char *plen = (argc == 6) ? argv[5]->arg : NULL; - return zebra_route_match_delete (vty, vty->index, + return zebra_route_match_delete (vty, "ip address prefix-len", plen, RMAP_EVENT_MATCH_DELETED); } @@ -289,7 +259,7 @@ DEFUN (match_ip_nexthop_prefix_len, "Match prefixlen of given nexthop\n" "Prefix length\n") { - return zebra_route_match_add (vty, vty->index, "ip next-hop prefix-len", + return zebra_route_match_add (vty, "ip next-hop prefix-len", argv[4]->arg, RMAP_EVENT_MATCH_ADDED); } @@ -304,7 +274,7 @@ DEFUN (no_match_ip_nexthop_prefix_len, "Prefix length\n") { char *plen = (argc == 6) ? argv[5]->arg : NULL; - return zebra_route_match_delete (vty, vty->index, + return zebra_route_match_delete (vty, "ip next-hop prefix-len", plen, RMAP_EVENT_MATCH_DELETED); } @@ -325,7 +295,7 @@ DEFUN (match_source_protocol, vty_out (vty, "invalid protocol name \"%s\"%s", proto, VTY_NEWLINE); return CMD_WARNING; } - return zebra_route_match_add (vty, vty->index, "source-protocol", proto, RMAP_EVENT_MATCH_ADDED); + return zebra_route_match_add (vty, "source-protocol", proto, RMAP_EVENT_MATCH_ADDED); } DEFUN (no_match_source_protocol, @@ -337,7 +307,7 @@ DEFUN (no_match_source_protocol, ) { char *proto = (argc == 4) ? argv[3]->text : NULL; - return zebra_route_match_delete (vty, vty->index, "source-protocol", proto, RMAP_EVENT_MATCH_DELETED); + return zebra_route_match_delete (vty, "source-protocol", proto, RMAP_EVENT_MATCH_DELETED); } /* set functions */ @@ -399,7 +369,9 @@ DEFUN (set_src, vty_out (vty, "%% not a local address%s", VTY_NEWLINE); return CMD_WARNING; } - return generic_set_add (vty, vty->index, "src", argv[idx_ip]->arg); + + VTY_DECLVAR_CONTEXT (route_map_index, index); + return generic_set_add (vty, index, "src", argv[idx_ip]->arg); } DEFUN (no_set_src, @@ -410,7 +382,8 @@ DEFUN (no_set_src, "Source address for route\n") { char *ip = (argc == 4) ? argv[3]->arg : NULL; - return generic_set_delete (vty, vty->index, "src", ip); + VTY_DECLVAR_CONTEXT (route_map_index, index); + return generic_set_delete (vty, index, "src", ip); } DEFUN (zebra_route_map_timer, @@ -1318,7 +1291,7 @@ zebra_route_map_write_delay_timer (struct vty *vty) route_map_result_t zebra_route_map_check (int family, int rib_type, struct prefix *p, - struct nexthop *nexthop, vrf_id_t vrf_id, u_short tag) + struct nexthop *nexthop, vrf_id_t vrf_id, route_tag_t tag) { struct route_map *rmap = NULL; route_map_result_t ret = RMAP_MATCH; @@ -1361,7 +1334,7 @@ zebra_del_import_table_route_map (afi_t afi, uint32_t table) route_map_result_t zebra_import_table_route_map_check (int family, int rib_type, struct prefix *p, - struct nexthop *nexthop, vrf_id_t vrf_id, u_short tag, const char *rmap_name) + struct nexthop *nexthop, vrf_id_t vrf_id, route_tag_t tag, const char *rmap_name) { struct route_map *rmap = NULL; route_map_result_t ret = RMAP_DENYMATCH; @@ -1490,19 +1463,15 @@ zebra_route_map_init () install_element (CONFIG_NODE, &ip_protocol_cmd); install_element (CONFIG_NODE, &no_ip_protocol_cmd); install_element (VIEW_NODE, &show_ip_protocol_cmd); - install_element (ENABLE_NODE, &show_ip_protocol_cmd); install_element (CONFIG_NODE, &ipv6_protocol_cmd); install_element (CONFIG_NODE, &no_ipv6_protocol_cmd); install_element (VIEW_NODE, &show_ipv6_protocol_cmd); - install_element (ENABLE_NODE, &show_ipv6_protocol_cmd); install_element (CONFIG_NODE, &ip_protocol_nht_rmap_cmd); install_element (CONFIG_NODE, &no_ip_protocol_nht_rmap_cmd); install_element (VIEW_NODE, &show_ip_protocol_nht_cmd); - install_element (ENABLE_NODE, &show_ip_protocol_nht_cmd); install_element (CONFIG_NODE, &ipv6_protocol_nht_rmap_cmd); install_element (CONFIG_NODE, &no_ipv6_protocol_nht_rmap_cmd); install_element (VIEW_NODE, &show_ipv6_protocol_nht_cmd); - install_element (ENABLE_NODE, &show_ipv6_protocol_nht_cmd); install_element (CONFIG_NODE, &zebra_route_map_timer_cmd); install_element (CONFIG_NODE, &no_zebra_route_map_timer_cmd); diff --git a/zebra/zebra_routemap.h b/zebra/zebra_routemap.h index 5eb3740909..bf418ccacc 100644 --- a/zebra/zebra_routemap.h +++ b/zebra/zebra_routemap.h @@ -34,13 +34,13 @@ extern route_map_result_t zebra_import_table_route_map_check (int family, int ri struct prefix *p, struct nexthop *nexthop, vrf_id_t vrf_id, - u_short tag, + route_tag_t tag, const char *rmap_name); extern route_map_result_t zebra_route_map_check (int family, int rib_type, struct prefix *p, struct nexthop *nexthop, vrf_id_t vrf_id, - u_short tag); + route_tag_t tag); extern route_map_result_t zebra_nht_route_map_check (int family, int client_proto, struct prefix *p, diff --git a/zebra/zebra_static.c b/zebra/zebra_static.c index d05b6e13ac..d336b81520 100644 --- a/zebra/zebra_static.c +++ b/zebra/zebra_static.c @@ -43,6 +43,7 @@ static_install_route (afi_t afi, safi_t safi, struct prefix *p, struct static_ro struct route_node *rn; struct route_table *table; struct prefix nh_p; + struct nexthop *nexthop = NULL; /* Lookup table. */ table = zebra_vrf_table (afi, safi, si->vrf_id); @@ -72,29 +73,34 @@ static_install_route (afi_t afi, safi_t safi, struct prefix *p, struct static_ro switch (si->type) { case STATIC_IPV4_GATEWAY: - rib_nexthop_ipv4_add (rib, &si->addr.ipv4, NULL); + nexthop = rib_nexthop_ipv4_add (rib, &si->addr.ipv4, NULL); nh_p.family = AF_INET; nh_p.prefixlen = IPV4_MAX_BITLEN; nh_p.u.prefix4 = si->addr.ipv4; zebra_register_rnh_static_nh(si->vrf_id, &nh_p, rn); break; case STATIC_IFINDEX: - rib_nexthop_ifindex_add (rib, si->ifindex); + nexthop = rib_nexthop_ifindex_add (rib, si->ifindex); break; case STATIC_IPV4_BLACKHOLE: - rib_nexthop_blackhole_add (rib); + nexthop = rib_nexthop_blackhole_add (rib); break; case STATIC_IPV6_GATEWAY: - rib_nexthop_ipv6_add (rib, &si->addr.ipv6); + nexthop = rib_nexthop_ipv6_add (rib, &si->addr.ipv6); nh_p.family = AF_INET6; nh_p.prefixlen = IPV6_MAX_BITLEN; nh_p.u.prefix6 = si->addr.ipv6; zebra_register_rnh_static_nh(si->vrf_id, &nh_p, rn); break; case STATIC_IPV6_GATEWAY_IFINDEX: - rib_nexthop_ipv6_ifindex_add (rib, &si->addr.ipv6, si->ifindex); + nexthop = rib_nexthop_ipv6_ifindex_add (rib, &si->addr.ipv6, + si->ifindex); break; } + /* Update label(s), if present. */ + if (si->snh_label.num_labels) + nexthop_add_labels (nexthop, ZEBRA_LSP_STATIC, si->snh_label.num_labels, + &si->snh_label.label[0]); if (IS_ZEBRA_DEBUG_RIB) { @@ -131,29 +137,34 @@ static_install_route (afi_t afi, safi_t safi, struct prefix *p, struct static_ro switch (si->type) { case STATIC_IPV4_GATEWAY: - rib_nexthop_ipv4_add (rib, &si->addr.ipv4, NULL); + nexthop = rib_nexthop_ipv4_add (rib, &si->addr.ipv4, NULL); nh_p.family = AF_INET; nh_p.prefixlen = IPV4_MAX_BITLEN; nh_p.u.prefix4 = si->addr.ipv4; zebra_register_rnh_static_nh(si->vrf_id, &nh_p, rn); break; case STATIC_IFINDEX: - rib_nexthop_ifindex_add (rib, si->ifindex); + nexthop = rib_nexthop_ifindex_add (rib, si->ifindex); break; case STATIC_IPV4_BLACKHOLE: - rib_nexthop_blackhole_add (rib); + nexthop = rib_nexthop_blackhole_add (rib); break; case STATIC_IPV6_GATEWAY: - rib_nexthop_ipv6_add (rib, &si->addr.ipv6); + nexthop = rib_nexthop_ipv6_add (rib, &si->addr.ipv6); nh_p.family = AF_INET6; nh_p.prefixlen = IPV6_MAX_BITLEN; nh_p.u.prefix6 = si->addr.ipv6; zebra_register_rnh_static_nh(si->vrf_id, &nh_p, rn); break; case STATIC_IPV6_GATEWAY_IFINDEX: - rib_nexthop_ipv6_ifindex_add (rib, &si->addr.ipv6, si->ifindex); + nexthop = rib_nexthop_ipv6_ifindex_add (rib, &si->addr.ipv6, + si->ifindex); break; } + /* Update label(s), if present. */ + if (si->snh_label.num_labels) + nexthop_add_labels (nexthop, ZEBRA_LSP_STATIC, si->snh_label.num_labels, + &si->snh_label.label[0]); /* Save the flags of this static routes (reject, blackhole) */ rib->flags = si->flags; @@ -181,30 +192,60 @@ static_install_route (afi_t afi, safi_t safi, struct prefix *p, struct static_ro rib_addnode (rn, rib, 1); } } + +static int +static_nexthop_label_same (struct nexthop *nexthop, + struct static_nh_label *snh_label) +{ + int i; + + if ((snh_label->num_labels == 0 && nexthop->nh_label) || + (snh_label->num_labels != 0 && !nexthop->nh_label)) + return 0; + + if (snh_label->num_labels != 0) + if (snh_label->num_labels != nexthop->nh_label->num_labels) + return 0; + + for (i = 0; i < snh_label->num_labels; i++) + if (snh_label->label[i] != nexthop->nh_label->label[i]) + return 0; + + return 1; +} + static int static_nexthop_same (struct nexthop *nexthop, struct static_route *si) { - if (nexthop->type == NEXTHOP_TYPE_IPV4 - && si->type == STATIC_IPV4_GATEWAY - && IPV4_ADDR_SAME (&nexthop->gate.ipv4, &si->addr.ipv4)) - return 1; - if (nexthop->type == NEXTHOP_TYPE_IFINDEX - && si->type == STATIC_IFINDEX - && nexthop->ifindex == si->ifindex) - return 1; + int gw_match = 0; + if (nexthop->type == NEXTHOP_TYPE_BLACKHOLE && si->type == STATIC_IPV4_BLACKHOLE) return 1; - if (nexthop->type == NEXTHOP_TYPE_IPV6 + + if (nexthop->type == NEXTHOP_TYPE_IPV4 + && si->type == STATIC_IPV4_GATEWAY + && IPV4_ADDR_SAME (&nexthop->gate.ipv4, &si->addr.ipv4)) + gw_match = 1; + else if (nexthop->type == NEXTHOP_TYPE_IFINDEX + && si->type == STATIC_IFINDEX + && nexthop->ifindex == si->ifindex) + gw_match = 1; + else if (nexthop->type == NEXTHOP_TYPE_IPV6 && si->type == STATIC_IPV6_GATEWAY && IPV6_ADDR_SAME (&nexthop->gate.ipv6, &si->addr.ipv6)) - return 1; - if (nexthop->type == NEXTHOP_TYPE_IPV6_IFINDEX + gw_match = 1; + else if (nexthop->type == NEXTHOP_TYPE_IPV6_IFINDEX && si->type == STATIC_IPV6_GATEWAY_IFINDEX && IPV6_ADDR_SAME (&nexthop->gate.ipv6, &si->addr.ipv6) && nexthop->ifindex == si->ifindex) - return 1; + gw_match = 1; + + if (!gw_match) return 0; + + /* Check match on label(s), if any */ + return static_nexthop_label_same (nexthop, &si->snh_label); } /* Uninstall static route from RIB. */ @@ -280,12 +321,20 @@ static_uninstall_route (afi_t afi, safi_t safi, struct prefix *p, struct static_ /* If there are other active nexthops, do an update. */ if (rib->nexthop_active_num > 1) { + /* Update route in kernel if it's in fib */ + if (CHECK_FLAG(rib->status, RIB_ENTRY_SELECTED_FIB)) rib_install_kernel (rn, rib, 1); + /* Update redistribution if it's selected */ + if (CHECK_FLAG(rib->flags, ZEBRA_FLAG_SELECTED)) redistribute_update (&rn->p, rib, NULL); } else { + /* Remove from redistribute if selected route becomes inactive */ + if (CHECK_FLAG(rib->flags, ZEBRA_FLAG_SELECTED)) redistribute_delete (&rn->p, rib); + /* Remove from kernel if fib route becomes inactive */ + if (CHECK_FLAG(rib->status, RIB_ENTRY_SELECTED_FIB)) rib_uninstall_kernel (rn, rib); } } @@ -314,8 +363,9 @@ static_uninstall_route (afi_t afi, safi_t safi, struct prefix *p, struct static_ int static_add_route (afi_t afi, safi_t safi, u_char type, struct prefix *p, union g_addr *gate, ifindex_t ifindex, - const char *ifname, u_char flags, u_short tag, - u_char distance, struct zebra_vrf *zvrf) + const char *ifname, u_char flags, route_tag_t tag, + u_char distance, struct zebra_vrf *zvrf, + struct static_nh_label *snh_label) { struct route_node *rn; struct static_route *si; @@ -350,7 +400,8 @@ static_add_route (afi_t afi, safi_t safi, u_char type, struct prefix *p, (afi == AFI_IP6 && IPV6_ADDR_SAME (gate, &si->addr.ipv6)))) && (! ifindex || ifindex == si->ifindex)) { - if ((distance == si->distance) && (tag == si->tag)) + if ((distance == si->distance) && (tag == si->tag) && + !memcmp (&si->snh_label, snh_label, sizeof (struct static_nh_label))) { route_unlock_node (rn); return 0; @@ -360,10 +411,10 @@ static_add_route (afi_t afi, safi_t safi, u_char type, struct prefix *p, } } - /* Distance or tag changed. */ + /* Distance or tag or label changed, delete existing first. */ if (update) - static_delete_route (afi, safi, type, p, gate, - ifindex, update->tag, update->distance, zvrf); + static_delete_route (afi, safi, type, p, gate, ifindex, update->tag, + update->distance, zvrf, &update->snh_label); /* Make new static route structure. */ si = XCALLOC (MTYPE_STATIC_ROUTE, sizeof (struct static_route)); @@ -392,6 +443,9 @@ static_add_route (afi_t afi, safi_t safi, u_char type, struct prefix *p, break; } + /* Save labels, if any. */ + memcpy (&si->snh_label, snh_label, sizeof (struct static_nh_label)); + /* Add new static route information to the tree with sort by distance value and gateway address. */ for (pp = NULL, cp = rn->info; cp; pp = cp, cp = cp->next) @@ -428,7 +482,8 @@ static_add_route (afi_t afi, safi_t safi, u_char type, struct prefix *p, int static_delete_route (afi_t afi, safi_t safi, u_char type, struct prefix *p, union g_addr *gate, ifindex_t ifindex, - u_short tag, u_char distance, struct zebra_vrf *zvrf) + route_tag_t tag, u_char distance, struct zebra_vrf *zvrf, + struct static_nh_label *snh_label) { struct route_node *rn; struct static_route *si; @@ -451,7 +506,9 @@ static_delete_route (afi_t afi, safi_t safi, u_char type, struct prefix *p, (afi == AFI_IP && IPV4_ADDR_SAME (gate, &si->addr.ipv4)) || (afi == AFI_IP6 && IPV6_ADDR_SAME (gate, &si->addr.ipv6)))) && (! ifindex || ifindex == si->ifindex) - && (! tag || (tag == si->tag))) + && (! tag || (tag == si->tag)) + && (! snh_label->num_labels || + !memcmp (&si->snh_label, snh_label, sizeof (struct static_nh_label)))) break; /* Can't find static route. */ @@ -462,7 +519,7 @@ static_delete_route (afi_t afi, safi_t safi, u_char type, struct prefix *p, } /* Install into rib. */ - static_uninstall_route (AFI_IP, safi, p, si); + static_uninstall_route (afi, safi, p, si); /* Unlink static route from linked list. */ if (si->prev) diff --git a/zebra/zebra_static.h b/zebra/zebra_static.h index 0f00609b55..5b6f429761 100644 --- a/zebra/zebra_static.h +++ b/zebra/zebra_static.h @@ -23,6 +23,14 @@ #ifndef __ZEBRA_STATIC_H__ #define __ZEBRA_STATIC_H__ +/* Static route label information */ +struct static_nh_label +{ + u_int8_t num_labels; + u_int8_t reserved[3]; + mpls_label_t label[2]; +}; + /* Static route information. */ struct static_route { @@ -37,7 +45,7 @@ struct static_route u_char distance; /* Tag */ - u_short tag; + route_tag_t tag; /* Flag for this static route's type. */ u_char type; @@ -66,6 +74,9 @@ struct static_route see ZEBRA_FLAG_REJECT ZEBRA_FLAG_BLACKHOLE */ + + /* Label information */ + struct static_nh_label snh_label; }; extern void @@ -76,14 +87,28 @@ static_uninstall_route (afi_t afi, safi_t safi, struct prefix *p, struct static_ extern int static_add_route (afi_t, safi_t safi, u_char type, struct prefix *p, union g_addr *gate, ifindex_t ifindex, - const char *ifname, u_char flags, u_short tag, - u_char distance, struct zebra_vrf *zvrf); + const char *ifname, u_char flags, route_tag_t tag, + u_char distance, struct zebra_vrf *zvrf, + struct static_nh_label *snh_label); extern int static_delete_route (afi_t, safi_t safi, u_char type, struct prefix *p, - union g_addr *gate, ifindex_t ifindex, - u_short tag, u_char distance, - struct zebra_vrf *zvrf); + union g_addr *gate, ifindex_t ifindex, route_tag_t tag, + u_char distance, struct zebra_vrf *zvrf, + struct static_nh_label *snh_label); +int +zebra_static_ipv4 (struct vty *vty, safi_t safi, int add_cmd, + const char *dest_str, const char *mask_str, + const char *gate_str, const char *flag_str, + const char *tag_str, const char *distance_str, + const char *vrf_id_str, const char *label_str); + +int +static_ipv6_func (struct vty *vty, int add_cmd, const char *dest_str, + const char *gate_str, const char *ifname, + const char *flag_str, const char *tag_str, + const char *distance_str, const char *vrf_id_str, + const char *label_str); #endif diff --git a/zebra/zebra_vrf.c b/zebra/zebra_vrf.c index 7625b9676e..8db89b1e4e 100644 --- a/zebra/zebra_vrf.c +++ b/zebra/zebra_vrf.c @@ -33,6 +33,7 @@ #include "zebra/router-id.h" #include "zebra/zebra_memory.h" #include "zebra/zebra_static.h" +#include "zebra/zebra_mpls.h" extern struct zebra_t zebrad; struct list *zvrf_list; @@ -333,6 +334,8 @@ zebra_vrf_alloc (vrf_id_t vrf_id, const char *name) zvrf->name[strlen(name)] = '\0'; } + zebra_mpls_init_tables (zvrf); + return zvrf; } diff --git a/zebra/zebra_vrf.h b/zebra/zebra_vrf.h index 456c6fdad8..0baddc1b6a 100644 --- a/zebra/zebra_vrf.h +++ b/zebra/zebra_vrf.h @@ -74,6 +74,16 @@ struct zebra_vrf * Back pointer to the owning namespace. */ struct zebra_ns *zns; + + /* MPLS static LSP config table */ + struct hash *slsp_table; + + /* MPLS label forwarding table */ + struct hash *lsp_table; + + /* MPLS processing flags */ + u_int16_t mpls_flags; +#define MPLS_FLAG_SCHEDULE_LSPS (1 << 0) }; extern struct list *zvrf_list; diff --git a/zebra/zebra_vty.c b/zebra/zebra_vty.c index b3164839fa..03ff5b8763 100644 --- a/zebra/zebra_vty.c +++ b/zebra/zebra_vty.c @@ -14,9 +14,9 @@ * General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with GNU Zebra; see the file COPYING. If not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 02111-1307, USA. + * along with GNU Zebra; see the file COPYING. If not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 02111-1307, USA. */ #include @@ -30,11 +30,13 @@ #include "rib.h" #include "nexthop.h" #include "vrf.h" +#include "mpls.h" #include "lib/json.h" #include "routemap.h" #include "zebra/zserv.h" #include "zebra/zebra_vrf.h" +#include "zebra/zebra_mpls.h" #include "zebra/zebra_rnh.h" #include "zebra/redistribute.h" #include "zebra/zebra_routemap.h" @@ -51,12 +53,12 @@ static void vty_show_ip_route_detail (struct vty *vty, struct route_node *rn, #define ONE_WEEK_SECOND 60*60*24*7 /* General function for static route. */ -static int +int zebra_static_ipv4 (struct vty *vty, safi_t safi, int add_cmd, const char *dest_str, const char *mask_str, const char *gate_str, const char *flag_str, const char *tag_str, const char *distance_str, - const char *vrf_id_str) + const char *vrf_id_str, const char *label_str) { int ret; u_char distance; @@ -64,12 +66,14 @@ zebra_static_ipv4 (struct vty *vty, safi_t safi, int add_cmd, struct in_addr gate; struct in_addr mask; u_char flag = 0; - u_short tag = 0; + route_tag_t tag = 0; struct zebra_vrf *zvrf = NULL; unsigned int ifindex = 0; const char *ifname = NULL; u_char type = STATIC_IPV4_BLACKHOLE; + struct static_nh_label snh_label; + memset (&snh_label, 0, sizeof (struct static_nh_label)); ret = str2prefix (dest_str, &p); if (ret <= 0) { @@ -100,7 +104,7 @@ zebra_static_ipv4 (struct vty *vty, safi_t safi, int add_cmd, /* tag */ if (tag_str) - tag = atoi(tag_str); + tag = atol(tag_str); /* VRF id */ zvrf = zebra_vrf_list_lookup_by_name (vrf_id_str); @@ -111,6 +115,17 @@ zebra_static_ipv4 (struct vty *vty, safi_t safi, int add_cmd, return CMD_WARNING; } + /* Labels */ + if (label_str) + { + if (mpls_str2label (label_str, &snh_label.num_labels, + snh_label.label)) + { + vty_out (vty, "%% Malformed label(s)%s", VTY_NEWLINE); + return CMD_WARNING; + } + } + /* Null0 static route. */ if ((gate_str != NULL) && (strncasecmp (gate_str, "Null0", strlen (gate_str)) == 0)) { @@ -120,9 +135,11 @@ zebra_static_ipv4 (struct vty *vty, safi_t safi, int add_cmd, return CMD_WARNING; } if (add_cmd) - static_add_route (AFI_IP, safi, type, &p, NULL, ifindex, ifname, ZEBRA_FLAG_BLACKHOLE, tag, distance, zvrf); + static_add_route (AFI_IP, safi, type, &p, NULL, ifindex, ifname, + ZEBRA_FLAG_BLACKHOLE, tag, distance, zvrf, &snh_label); else - static_delete_route (AFI_IP, safi, type, &p, NULL, ifindex, tag, distance, zvrf); + static_delete_route (AFI_IP, safi, type, &p, NULL, ifindex, tag, + distance, zvrf, &snh_label); return CMD_SUCCESS; } @@ -146,13 +163,15 @@ zebra_static_ipv4 (struct vty *vty, safi_t safi, int add_cmd, if (gate_str == NULL) { if (add_cmd) - static_add_route (AFI_IP, safi, type, &p, NULL, ifindex, ifname, flag, tag, distance, zvrf); + static_add_route (AFI_IP, safi, type, &p, NULL, ifindex, ifname, flag, + tag, distance, zvrf, &snh_label); else - static_delete_route (AFI_IP, safi, type, &p, NULL, ifindex, tag, distance, zvrf); + static_delete_route (AFI_IP, safi, type, &p, NULL, ifindex, tag, distance, + zvrf, &snh_label); return CMD_SUCCESS; } - + /* When gateway is A.B.C.D format, gate is treated as nexthop address other case gate is treated as interface name. */ ret = inet_aton (gate_str, &gate); @@ -173,9 +192,13 @@ zebra_static_ipv4 (struct vty *vty, safi_t safi, int add_cmd, type = STATIC_IPV4_GATEWAY; if (add_cmd) - static_add_route (AFI_IP, safi, type, &p, ifindex ? NULL : (union g_addr *)&gate, ifindex, ifname, flag, tag, distance, zvrf); + static_add_route (AFI_IP, safi, type, &p, + ifindex ? NULL : (union g_addr *)&gate, ifindex, ifname, + flag, tag, distance, zvrf, &snh_label); else - static_delete_route (AFI_IP, safi, type, &p, ifindex ? NULL : (union g_addr *)&gate, ifindex, tag, distance, zvrf); + static_delete_route (AFI_IP, safi, type, &p, + ifindex ? NULL : (union g_addr *)&gate, ifindex, tag, + distance, zvrf, &snh_label); return CMD_SUCCESS; } @@ -195,7 +218,7 @@ DEFUN (ip_mroute_dist, char *nexthop = argv[3]->arg; char *distance = (argc == 5) ? argv[4]->arg : NULL; - return zebra_static_ipv4 (vty, SAFI_MULTICAST, 1, destprefix, NULL, nexthop, NULL, NULL, distance, NULL); + return zebra_static_ipv4 (vty, SAFI_MULTICAST, 1, destprefix, NULL, nexthop, NULL, NULL, distance, NULL, NULL); } DEFUN (no_ip_mroute_dist, @@ -212,7 +235,7 @@ DEFUN (no_ip_mroute_dist, char *nexthop = argv[4]->arg; char *distance = (argc == 6) ? argv[5]->arg : NULL; - return zebra_static_ipv4 (vty, SAFI_MULTICAST, 0, destprefix, NULL, nexthop, NULL, NULL, distance, NULL); + return zebra_static_ipv4 (vty, SAFI_MULTICAST, 0, destprefix, NULL, nexthop, NULL, NULL, distance, NULL, NULL); } DEFUN (ip_multicast_mode, @@ -310,24 +333,33 @@ DEFUN (show_ip_rpf_addr, static void zebra_vty_ip_route_tdv_helper (int argc, struct cmd_token *argv[], int idx_curr, char **tag, - char **distance, char **vrf) + char **distance, char **vrf, char **labels) { *distance = NULL; while (idx_curr < argc) { if (strmatch (argv[idx_curr]->text, "tag")) { - *tag = argv[idx_curr+1]->arg; + if (tag) + *tag = argv[idx_curr+1]->arg; idx_curr += 2; } else if (strmatch (argv[idx_curr]->text, "vrf")) { - *vrf = argv[idx_curr+1]->arg; + if (vrf) + *vrf = argv[idx_curr+1]->arg; + idx_curr += 2; + } + else if (strmatch (argv[idx_curr]->text, "label")) + { + if (labels) + *labels = argv[idx_curr+1]->arg; idx_curr += 2; } else { - *distance = argv[idx_curr]->arg; + if (distance) + *distance = argv[idx_curr]->arg; idx_curr++; } } @@ -335,11 +367,10 @@ zebra_vty_ip_route_tdv_helper (int argc, struct cmd_token *argv[], return; } - /* Static route configuration. */ DEFUN (ip_route, ip_route_cmd, - "ip route A.B.C.D/M [tag (1-65535)] [(1-255)] [vrf NAME]", + "ip route A.B.C.D/M [tag (1-4294967295)] [(1-255)] [vrf NAME]", IP_STR "Establish static routes\n" "IP destination prefix (e.g. 10.0.0.0/8)\n" @@ -357,19 +388,19 @@ DEFUN (ip_route, char *tag, *distance, *vrf; tag = distance = vrf = NULL; - zebra_vty_ip_route_tdv_helper (argc, argv, idx_curr, &tag, &distance, &vrf); + zebra_vty_ip_route_tdv_helper (argc, argv, idx_curr, &tag, &distance, &vrf, NULL); return zebra_static_ipv4 (vty, SAFI_UNICAST, 1, argv[idx_ipv4_prefixlen]->arg, NULL, argv[idx_ipv4_ifname_null]->arg, NULL, - tag, distance, vrf); + tag, distance, vrf, NULL); } DEFUN (ip_route_flags, ip_route_flags_cmd, - "ip route A.B.C.D/M [tag (1-65535)] [(1-255)] [vrf NAME]", + "ip route A.B.C.D/M [tag (1-4294967295)] [(1-255)] [vrf NAME]", IP_STR "Establish static routes\n" "IP destination prefix (e.g. 10.0.0.0/8)\n" @@ -380,7 +411,9 @@ DEFUN (ip_route_flags, "Set tag for this route\n" "Tag value\n" "Distance value for this route\n" - VRF_CMD_HELP_STR) + VRF_CMD_HELP_STR + "Specify labels for this route\n" + "One or more labels separated by '/'\n") { int idx_ipv4_prefixlen = 2; int idx_ipv4_ifname = 3; @@ -389,19 +422,19 @@ DEFUN (ip_route_flags, char *tag, *distance, *vrf; tag = distance = vrf = NULL; - zebra_vty_ip_route_tdv_helper (argc, argv, idx_curr, &tag, &distance, &vrf); + zebra_vty_ip_route_tdv_helper (argc, argv, idx_curr, &tag, &distance, &vrf, NULL); return zebra_static_ipv4 (vty, SAFI_UNICAST, 1, argv[idx_ipv4_prefixlen]->arg, NULL, argv[idx_ipv4_ifname]->arg, argv[idx_reject_blackhole]->arg, - tag, distance, vrf); + tag, distance, vrf, NULL); } DEFUN (ip_route_flags2, ip_route_flags2_cmd, - "ip route A.B.C.D/M [tag (1-65535)] [(1-255)] [vrf NAME]", + "ip route A.B.C.D/M [tag (1-4294967295)] [(1-255)] [vrf NAME]", IP_STR "Establish static routes\n" "IP destination prefix (e.g. 10.0.0.0/8)\n" @@ -410,7 +443,9 @@ DEFUN (ip_route_flags2, "Set tag for this route\n" "Tag value\n" "Distance value for this route\n" - VRF_CMD_HELP_STR) + VRF_CMD_HELP_STR + "Specify labels for this route\n" + "One or more labels separated by '/'\n") { int idx_ipv4_prefixlen = 2; int idx_reject_blackhole = 3; @@ -418,20 +453,20 @@ DEFUN (ip_route_flags2, char *tag, *distance, *vrf; tag = distance = vrf = NULL; - zebra_vty_ip_route_tdv_helper (argc, argv, idx_curr, &tag, &distance, &vrf); + zebra_vty_ip_route_tdv_helper (argc, argv, idx_curr, &tag, &distance, &vrf, NULL); return zebra_static_ipv4 (vty, SAFI_UNICAST, 1, argv[idx_ipv4_prefixlen]->arg, NULL, NULL, argv[idx_reject_blackhole]->arg, - tag, distance, vrf); + tag, distance, vrf, NULL); } /* Mask as A.B.C.D format. */ DEFUN (ip_route_mask, ip_route_mask_cmd, - "ip route A.B.C.D A.B.C.D [tag (1-65535)] [(1-255)] [vrf NAME]", + "ip route A.B.C.D A.B.C.D [tag (1-4294967295)] [(1-255)] [vrf NAME]", IP_STR "Establish static routes\n" "IP destination prefix\n" @@ -442,7 +477,9 @@ DEFUN (ip_route_mask, "Set tag for this route\n" "Tag value\n" "Distance value for this route\n" - VRF_CMD_HELP_STR) + VRF_CMD_HELP_STR + "Specify labels for this route\n" + "One or more labels separated by '/'\n") { int idx_ipv4 = 2; int idx_ipv4_2 = 3; @@ -451,18 +488,18 @@ DEFUN (ip_route_mask, char *tag, *distance, *vrf; tag = distance = vrf = NULL; - zebra_vty_ip_route_tdv_helper (argc, argv, idx_curr, &tag, &distance, &vrf); + zebra_vty_ip_route_tdv_helper (argc, argv, idx_curr, &tag, &distance, &vrf, NULL); return zebra_static_ipv4 (vty, SAFI_UNICAST, 1, argv[idx_ipv4]->arg, argv[idx_ipv4_2]->arg, argv[idx_ipv4_ifname_null]->arg, - NULL, tag, distance, vrf); + NULL, tag, distance, vrf, NULL); } DEFUN (ip_route_mask_flags, ip_route_mask_flags_cmd, - "ip route A.B.C.D A.B.C.D [tag (1-65535)] [(1-255)] [vrf NAME]", + "ip route A.B.C.D A.B.C.D [tag (1-4294967295)] [(1-255)] [vrf NAME]", IP_STR "Establish static routes\n" "IP destination prefix\n" @@ -474,7 +511,9 @@ DEFUN (ip_route_mask_flags, "Set tag for this route\n" "Tag value\n" "Distance value for this route\n" - VRF_CMD_HELP_STR) + VRF_CMD_HELP_STR + "Specify labels for this route\n" + "One or more labels separated by '/'\n") { int idx_ipv4 = 2; int idx_ipv4_2 = 3; @@ -484,20 +523,20 @@ DEFUN (ip_route_mask_flags, char *tag, *distance, *vrf; tag = distance = vrf = NULL; - zebra_vty_ip_route_tdv_helper (argc, argv, idx_curr, &tag, &distance, &vrf); + zebra_vty_ip_route_tdv_helper (argc, argv, idx_curr, &tag, &distance, &vrf, NULL); return zebra_static_ipv4 (vty, SAFI_UNICAST, 1, argv[idx_ipv4]->arg, argv[idx_ipv4_2]->arg, argv[idx_ipv4_ifname]->arg, argv[idx_reject_blackhole]->arg, - tag, distance, vrf); + tag, distance, vrf, NULL); } DEFUN (ip_route_mask_flags2, ip_route_mask_flags2_cmd, - "ip route A.B.C.D A.B.C.D [tag (1-65535)] [(1-255)] [vrf NAME]", + "ip route A.B.C.D A.B.C.D [tag (1-4294967295)] [(1-255)] [vrf NAME]", IP_STR "Establish static routes\n" "IP destination prefix\n" @@ -507,7 +546,9 @@ DEFUN (ip_route_mask_flags2, "Set tag for this route\n" "Tag value\n" "Distance value for this route\n" - VRF_CMD_HELP_STR) + VRF_CMD_HELP_STR + "Specify labels for this route\n" + "One or more labels separated by '/'\n") { int idx_ipv4 = 2; int idx_ipv4_2 = 3; @@ -516,19 +557,19 @@ DEFUN (ip_route_mask_flags2, char *tag, *distance, *vrf; tag = distance = vrf = NULL; - zebra_vty_ip_route_tdv_helper (argc, argv, idx_curr, &tag, &distance, &vrf); + zebra_vty_ip_route_tdv_helper (argc, argv, idx_curr, &tag, &distance, &vrf, NULL); return zebra_static_ipv4 (vty, SAFI_UNICAST, 1, argv[idx_ipv4]->arg, argv[idx_ipv4_2]->arg, NULL, argv[idx_reject_blackhole]->arg, - tag, distance, vrf); + tag, distance, vrf, NULL); } DEFUN (no_ip_route, no_ip_route_cmd, - "no ip route A.B.C.D/M [tag (1-65535)] [(1-255)] [vrf NAME]", + "no ip route A.B.C.D/M [tag (1-4294967295)] [(1-255)] [vrf NAME]", NO_STR IP_STR "Establish static routes\n" @@ -539,7 +580,9 @@ DEFUN (no_ip_route, "Tag of this route\n" "Tag value\n" "Distance value for this route\n" - VRF_CMD_HELP_STR) + VRF_CMD_HELP_STR + "Specify labels for this route\n" + "One or more labels separated by '/'\n") { int idx_ipv4_prefixlen = 3; int idx_ipv4_ifname_null = 4; @@ -547,19 +590,19 @@ DEFUN (no_ip_route, char *tag, *distance, *vrf; tag = distance = vrf = NULL; - zebra_vty_ip_route_tdv_helper (argc, argv, idx_curr, &tag, &distance, &vrf); + zebra_vty_ip_route_tdv_helper (argc, argv, idx_curr, &tag, &distance, &vrf, NULL); return zebra_static_ipv4 (vty, SAFI_UNICAST, 0, argv[idx_ipv4_prefixlen]->arg, NULL, argv[idx_ipv4_ifname_null]->arg, NULL, - tag, distance, vrf); + tag, distance, vrf, NULL); } DEFUN (no_ip_route_flags2, no_ip_route_flags2_cmd, - "no ip route A.B.C.D/M [tag (1-65535)] [(1-255)] [vrf NAME]", + "no ip route A.B.C.D/M [tag (1-4294967295)] [(1-255)] [vrf NAME]", NO_STR IP_STR "Establish static routes\n" @@ -569,24 +612,26 @@ DEFUN (no_ip_route_flags2, "Tag of this route\n" "Tag value\n" "Distance value for this route\n" - VRF_CMD_HELP_STR) + VRF_CMD_HELP_STR + "Specify labels for this route\n" + "One or more labels separated by '/'\n") { int idx_ipv4_prefixlen = 3; int idx_curr = 5; char *tag, *distance, *vrf; tag = distance = vrf = NULL; - zebra_vty_ip_route_tdv_helper (argc, argv, idx_curr, &tag, &distance, &vrf); + zebra_vty_ip_route_tdv_helper (argc, argv, idx_curr, &tag, &distance, &vrf, NULL); return zebra_static_ipv4 (vty, SAFI_UNICAST, 0, argv[idx_ipv4_prefixlen]->arg, NULL, NULL, NULL, - tag, distance, vrf); + tag, distance, vrf, NULL); } DEFUN (no_ip_route_mask, no_ip_route_mask_cmd, - "no ip route A.B.C.D A.B.C.D [tag (1-65535)] [(1-255)] [vrf NAME]", + "no ip route A.B.C.D A.B.C.D [tag (1-4294967295)] [(1-255)] [vrf NAME]", NO_STR IP_STR "Establish static routes\n" @@ -598,7 +643,9 @@ DEFUN (no_ip_route_mask, "Tag of this route\n" "Tag value\n" "Distance value for this route\n" - VRF_CMD_HELP_STR) + VRF_CMD_HELP_STR + "Specify labels for this route\n" + "One or more labels separated by '/'\n") { int idx_ipv4 = 3; int idx_ipv4_2 = 4; @@ -607,19 +654,19 @@ DEFUN (no_ip_route_mask, char *tag, *distance, *vrf; tag = distance = vrf = NULL; - zebra_vty_ip_route_tdv_helper (argc, argv, idx_curr, &tag, &distance, &vrf); + zebra_vty_ip_route_tdv_helper (argc, argv, idx_curr, &tag, &distance, &vrf, NULL); return zebra_static_ipv4 (vty, SAFI_UNICAST, 0, argv[idx_ipv4]->arg, argv[idx_ipv4_2]->arg, argv[idx_ipv4_ifname_null]->arg, NULL, - tag, distance, vrf); + tag, distance, vrf, NULL); } DEFUN (no_ip_route_mask_flags2, no_ip_route_mask_flags2_cmd, - "no ip route A.B.C.D A.B.C.D [tag (1-65535)] [(1-255)] [vrf NAME]", + "no ip route A.B.C.D A.B.C.D [tag (1-4294967295)] [(1-255)] [vrf NAME]", NO_STR IP_STR "Establish static routes\n" @@ -630,7 +677,9 @@ DEFUN (no_ip_route_mask_flags2, "Tag of this route\n" "Tag value\n" "Distance value for this route\n" - VRF_CMD_HELP_STR) + VRF_CMD_HELP_STR + "Specify labels for this route\n" + "One or more labels separated by '/'\n") { int idx_ipv4 = 3; int idx_ipv4_2 = 4; @@ -638,18 +687,18 @@ DEFUN (no_ip_route_mask_flags2, char *tag, *distance, *vrf; tag = distance = vrf = NULL; - zebra_vty_ip_route_tdv_helper (argc, argv, idx_curr, &tag, &distance, &vrf); + zebra_vty_ip_route_tdv_helper (argc, argv, idx_curr, &tag, &distance, &vrf, NULL); return zebra_static_ipv4 (vty, SAFI_UNICAST, 0, argv[idx_ipv4]->arg, argv[idx_ipv4_2]->arg, NULL, NULL, - tag, distance, vrf); + tag, distance, vrf, NULL); } DEFUN (no_ip_route_flags, no_ip_route_flags_cmd, - "no ip route A.B.C.D/M [tag (1-65535)] [(1-255)] [vrf NAME]", + "no ip route A.B.C.D/M [tag (1-4294967295)] [(1-255)] [vrf NAME]", NO_STR IP_STR "Establish static routes\n" @@ -661,7 +710,9 @@ DEFUN (no_ip_route_flags, "Tag of this route\n" "Tag value\n" "Distance value for this route\n" - VRF_CMD_HELP_STR) + VRF_CMD_HELP_STR + "Specify labels for this route\n" + "One or more labels separated by '/'\n") { int idx_ipv4_prefixlen = 3; int idx_ipv4_ifname = 4; @@ -670,19 +721,19 @@ DEFUN (no_ip_route_flags, char *tag, *distance, *vrf; tag = distance = vrf = NULL; - zebra_vty_ip_route_tdv_helper (argc, argv, idx_curr, &tag, &distance, &vrf); + zebra_vty_ip_route_tdv_helper (argc, argv, idx_curr, &tag, &distance, &vrf, NULL); return zebra_static_ipv4 (vty, SAFI_UNICAST, 0, argv[idx_ipv4_prefixlen]->arg, NULL, argv[idx_ipv4_ifname]->arg, argv[idx_reject_blackhole]->arg, - tag, distance, vrf); + tag, distance, vrf, NULL); } DEFUN (no_ip_route_mask_flags, no_ip_route_mask_flags_cmd, - "no ip route A.B.C.D A.B.C.D [tag (1-65535)] [(1-255)] [vrf NAME]", + "no ip route A.B.C.D A.B.C.D [tag (1-4294967295)] [(1-255)] [vrf NAME]", NO_STR IP_STR "Establish static routes\n" @@ -695,7 +746,9 @@ DEFUN (no_ip_route_mask_flags, "Tag of this route\n" "Tag value\n" "Distance value for this route\n" - VRF_CMD_HELP_STR) + VRF_CMD_HELP_STR + "Specify labels for this route\n" + "One or more labels separated by '/'\n") { int idx_ipv4 = 3; int idx_ipv4_2 = 4; @@ -705,17 +758,16 @@ DEFUN (no_ip_route_mask_flags, char *tag, *distance, *vrf; tag = distance = vrf = NULL; - zebra_vty_ip_route_tdv_helper (argc, argv, idx_curr, &tag, &distance, &vrf); + zebra_vty_ip_route_tdv_helper (argc, argv, idx_curr, &tag, &distance, &vrf, NULL); return zebra_static_ipv4 (vty, SAFI_UNICAST, 0, argv[idx_ipv4]->arg, argv[idx_ipv4_2]->arg, argv[idx_ipv4_ifname]->arg, argv[idx_reject_blackhole]->arg, - tag, distance, vrf); + tag, distance, vrf, NULL); } - /* New RIB. Detailed information for IPv4 route. */ static void vty_show_ip_route_detail (struct vty *vty, struct route_node *rn, int mcast) @@ -736,7 +788,7 @@ vty_show_ip_route_detail (struct vty *vty, struct route_node *rn, int mcast) ? " using Multicast RIB" : " using Unicast RIB"; } - + vty_out (vty, "Routing entry for %s%s%s", prefix2str (&rn->p, buf, sizeof(buf)), mcast_info, VTY_NEWLINE); @@ -780,13 +832,13 @@ vty_show_ip_route_detail (struct vty *vty, struct route_node *rn, int mcast) vty_out (vty, " Last update "); if (uptime < ONE_DAY_SECOND) - vty_out (vty, "%02d:%02d:%02d", + vty_out (vty, "%02d:%02d:%02d", tm->tm_hour, tm->tm_min, tm->tm_sec); else if (uptime < ONE_WEEK_SECOND) - vty_out (vty, "%dd%02dh%02dm", + vty_out (vty, "%dd%02dh%02dm", tm->tm_yday, tm->tm_hour, tm->tm_min); else - vty_out (vty, "%02dw%dd%02dh", + vty_out (vty, "%02dw%dd%02dh", tm->tm_yday/7, tm->tm_yday - ((tm->tm_yday/7) * 7), tm->tm_hour); vty_out (vty, " ago%s", VTY_NEWLINE); @@ -1029,7 +1081,7 @@ vty_show_ip_route (struct vty *vty, struct route_node *rn, struct rib *rib, prefix2str (&rn->p, buf, sizeof buf)); /* Distance and metric display. */ - if (rib->type != ZEBRA_ROUTE_CONNECT + if (rib->type != ZEBRA_ROUTE_CONNECT && rib->type != ZEBRA_ROUTE_KERNEL) len += vty_out (vty, " [%d/%d]", rib->distance, rib->metric); @@ -1118,13 +1170,13 @@ vty_show_ip_route (struct vty *vty, struct route_node *rn, struct rib *rib, tm = gmtime (&uptime); if (uptime < ONE_DAY_SECOND) - vty_out (vty, ", %02d:%02d:%02d", + vty_out (vty, ", %02d:%02d:%02d", tm->tm_hour, tm->tm_min, tm->tm_sec); else if (uptime < ONE_WEEK_SECOND) - vty_out (vty, ", %dd%02dh%02dm", + vty_out (vty, ", %dd%02dh%02dm", tm->tm_yday, tm->tm_hour, tm->tm_min); else - vty_out (vty, ", %02dw%dd%02dh", + vty_out (vty, ", %02dw%dd%02dh", tm->tm_yday/7, tm->tm_yday - ((tm->tm_yday/7) * 7), tm->tm_hour); } @@ -1203,7 +1255,7 @@ do_show_ip_route (struct vty *vty, const char *vrf_name, safi_t safi, } } - vty_out (vty, "%s%s", json_object_to_json_string(json), VTY_NEWLINE); + vty_out (vty, "%s%s", json_object_to_json_string_ext(json, JSON_C_TO_STRING_PRETTY), VTY_NEWLINE); json_object_free(json); } else @@ -1384,7 +1436,7 @@ DEFUN (no_ipv6_nht_default_route, DEFUN (show_ip_route_tag, show_ip_route_tag_cmd, - "show ip route [vrf NAME] tag (1-65535)", + "show ip route [vrf NAME] tag (1-4294967295)", SHOW_STR IP_STR "IP routing table\n" @@ -1399,18 +1451,18 @@ DEFUN (show_ip_route_tag, struct route_node *rn; struct rib *rib; int first = 1; - u_short tag = 0; + route_tag_t tag = 0; vrf_id_t vrf_id = VRF_DEFAULT; - + if (strmatch(argv[idx_vrf]->text, "vrf")) { VRF_GET_ID (vrf_id, argv[idx_name]->arg); - tag = atoi(argv[idx_tag]->arg); + tag = atol(argv[idx_tag]->arg); } else { idx_tag -= 2; - tag = atoi(argv[idx_tag]->arg); + tag = atol(argv[idx_tag]->arg); } table = zebra_vrf_table (AFI_IP, SAFI_UNICAST, vrf_id); @@ -1968,7 +2020,7 @@ DEFUN (show_ip_route_vrf_all, DEFUN (show_ip_route_vrf_all_tag, show_ip_route_vrf_all_tag_cmd, - "show ip route vrf all tag (1-65535)", + "show ip route vrf all tag (1-4294967295)", SHOW_STR IP_STR "IP routing table\n" @@ -1984,10 +2036,10 @@ DEFUN (show_ip_route_vrf_all_tag, vrf_iter_t iter; int first = 1; int vrf_header = 1; - u_short tag = 0; + route_tag_t tag = 0; if (argv[idx_number]->arg) - tag = atoi(argv[idx_number]->arg); + tag = atol(argv[idx_number]->arg); for (iter = vrf_first (); iter != VRF_ITER_INVALID; iter = vrf_next (iter)) { @@ -2117,7 +2169,6 @@ DEFUN (show_ip_route_vrf_all_supernets, vty_out (vty, SHOW_ROUTE_V4_HEADER); first = 0; } - if (vrf_header) { vty_out (vty, "%sVRF %s:%s", VTY_NEWLINE, zvrf->name, VTY_NEWLINE); @@ -2378,11 +2429,12 @@ static_config_ipv4 (struct vty *vty, safi_t safi, const char *cmd) } /* General fucntion for IPv6 static route. */ -static int +int static_ipv6_func (struct vty *vty, int add_cmd, const char *dest_str, - const char *gate_str, const char *ifname, - const char *flag_str, const char *tag_str, - const char *distance_str, const char *vrf_id_str) + const char *gate_str, const char *ifname, + const char *flag_str, const char *tag_str, + const char *distance_str, const char *vrf_id_str, + const char *label_str) { int ret; u_char distance; @@ -2391,11 +2443,12 @@ static_ipv6_func (struct vty *vty, int add_cmd, const char *dest_str, struct in6_addr gate_addr; u_char type = 0; u_char flag = 0; - u_short tag = 0; + route_tag_t tag = 0; unsigned int ifindex = 0; struct interface *ifp = NULL; struct zebra_vrf *zvrf; - + struct static_nh_label snh_label; + ret = str2prefix (dest_str, &p); if (ret <= 0) { @@ -2431,7 +2484,7 @@ static_ipv6_func (struct vty *vty, int add_cmd, const char *dest_str, /* tag */ if (tag_str) - tag = atoi(tag_str); + tag = atol(tag_str); /* When gateway is valid IPv6 addrees, then gate is treated as nexthop address other case gate is treated as interface name. */ @@ -2446,58 +2499,72 @@ static_ipv6_func (struct vty *vty, int add_cmd, const char *dest_str, return CMD_WARNING; } + /* Labels */ + memset (&snh_label, 0, sizeof (struct static_nh_label)); + if (label_str) + { + if (mpls_str2label (label_str, &snh_label.num_labels, + snh_label.label)) + { + vty_out (vty, "%% Malformed label(s)%s", VTY_NEWLINE); + return CMD_WARNING; + } + } + if (ifname) { /* When ifname is specified. It must be come with gateway address. */ if (ret != 1) - { - vty_out (vty, "%% Malformed address%s", VTY_NEWLINE); - return CMD_WARNING; - } + { + vty_out (vty, "%% Malformed address%s", VTY_NEWLINE); + return CMD_WARNING; + } type = STATIC_IPV6_GATEWAY_IFINDEX; gate = &gate_addr; ifp = if_lookup_by_name_vrf (ifname, zvrf->vrf_id); if (!ifp) - { - vty_out (vty, "%% Malformed Interface name %s%s", ifname, VTY_NEWLINE); - return CMD_WARNING; - } + { + vty_out (vty, "%% Malformed Interface name %s%s", ifname, VTY_NEWLINE); + return CMD_WARNING; + } ifindex = ifp->ifindex; } else { if (ret == 1) - { - type = STATIC_IPV6_GATEWAY; - gate = &gate_addr; - } + { + type = STATIC_IPV6_GATEWAY; + gate = &gate_addr; + } else - { - type = STATIC_IFINDEX; - ifp = if_lookup_by_name_vrf (gate_str, zvrf->vrf_id); - if (!ifp) - { - vty_out (vty, "%% Malformed Interface name %s%s", gate_str, VTY_NEWLINE); + { + type = STATIC_IFINDEX; + ifp = if_lookup_by_name_vrf (gate_str, zvrf->vrf_id); + if (!ifp) + { + vty_out (vty, "%% Malformed Interface name %s%s", gate_str, VTY_NEWLINE); ifindex = IFINDEX_DELETED; - } + } else - ifindex = ifp->ifindex; - ifname = gate_str; - } + ifindex = ifp->ifindex; + ifname = gate_str; + } } if (add_cmd) - static_add_route (AFI_IP6, SAFI_UNICAST, type, &p, (union g_addr *)gate, ifindex, ifname, flag, tag, distance, zvrf); + static_add_route (AFI_IP6, SAFI_UNICAST, type, &p, (union g_addr *)gate, + ifindex, ifname, flag, tag, distance, zvrf, &snh_label); else - static_delete_route (AFI_IP6, SAFI_UNICAST, type, &p, (union g_addr *)gate, ifindex, tag, distance, zvrf); + static_delete_route (AFI_IP6, SAFI_UNICAST, type, &p, (union g_addr *)gate, + ifindex, tag, distance, zvrf, &snh_label); return CMD_SUCCESS; } DEFUN (ipv6_route, ipv6_route_cmd, - "ipv6 route X:X::X:X/M [tag (1-65535)] [(1-255)] [vrf NAME]", + "ipv6 route X:X::X:X/M [tag (1-4294967295)] [(1-255)] [vrf NAME]", IP_STR "Establish static routes\n" "IPv6 destination prefix (e.g. 3ffe:506::/32)\n" @@ -2506,7 +2573,9 @@ DEFUN (ipv6_route, "Set tag for this route\n" "Tag value\n" "Distance value for this prefix\n" - VRF_CMD_HELP_STR) + VRF_CMD_HELP_STR + "Specify labels for this route\n" + "One or more labels separated by '/'\n") { int idx_ipv6_prefixlen = 2; int idx_ipv6_ifname = 3; @@ -2514,19 +2583,18 @@ DEFUN (ipv6_route, char *tag, *distance, *vrf; tag = distance = vrf = NULL; - zebra_vty_ip_route_tdv_helper (argc, argv, idx_curr, &tag, &distance, &vrf); + zebra_vty_ip_route_tdv_helper (argc, argv, idx_curr, &tag, &distance, &vrf, NULL); return static_ipv6_func (vty, 1, argv[idx_ipv6_prefixlen]->arg, argv[idx_ipv6_ifname]->arg, NULL, NULL, - tag, distance, vrf); + tag, distance, vrf, NULL); } - DEFUN (ipv6_route_flags, ipv6_route_flags_cmd, - "ipv6 route X:X::X:X/M [tag (1-65535)] [(1-255)] [vrf NAME]", + "ipv6 route X:X::X:X/M [tag (1-4294967295)] [(1-255)] [vrf NAME]", IP_STR "Establish static routes\n" "IPv6 destination prefix (e.g. 3ffe:506::/32)\n" @@ -2538,7 +2606,9 @@ DEFUN (ipv6_route_flags, "Set tag for this route\n" "Tag value\n" "Distance value for this prefix\n" - VRF_CMD_HELP_STR) + VRF_CMD_HELP_STR + "Specify labels for this route\n" + "One or more labels separated by '/'\n") { int idx_ipv6_prefixlen = 2; int idx_ipv6_ifname = 3; @@ -2547,19 +2617,19 @@ DEFUN (ipv6_route_flags, char *tag, *distance, *vrf; tag = distance = vrf = NULL; - zebra_vty_ip_route_tdv_helper (argc, argv, idx_curr, &tag, &distance, &vrf); + zebra_vty_ip_route_tdv_helper (argc, argv, idx_curr, &tag, &distance, &vrf, NULL); return static_ipv6_func (vty, 1, argv[idx_ipv6_prefixlen]->arg, argv[idx_ipv6_ifname]->arg, NULL, argv[idx_reject_blackhole]->arg, - tag, distance, vrf); + tag, distance, vrf, NULL); } DEFUN (ipv6_route_ifname, ipv6_route_ifname_cmd, - "ipv6 route X:X::X:X/M X:X::X:X INTERFACE [tag (1-65535)] [(1-255)] [vrf NAME]", + "ipv6 route X:X::X:X/M X:X::X:X INTERFACE [tag (1-4294967295)] [(1-255)] [vrf NAME]", IP_STR "Establish static routes\n" "IPv6 destination prefix (e.g. 3ffe:506::/32)\n" @@ -2568,7 +2638,9 @@ DEFUN (ipv6_route_ifname, "Set tag for this route\n" "Tag value\n" "Distance value for this prefix\n" - VRF_CMD_HELP_STR) + VRF_CMD_HELP_STR + "Specify labels for this route\n" + "One or more labels separated by '/'\n") { int idx_ipv6_prefixlen = 2; int idx_ipv6 = 3; @@ -2577,19 +2649,19 @@ DEFUN (ipv6_route_ifname, char *tag, *distance, *vrf; tag = distance = vrf = NULL; - zebra_vty_ip_route_tdv_helper (argc, argv, idx_curr, &tag, &distance, &vrf); + zebra_vty_ip_route_tdv_helper (argc, argv, idx_curr, &tag, &distance, &vrf, NULL); return static_ipv6_func (vty, 1, argv[idx_ipv6_prefixlen]->arg, argv[idx_ipv6]->arg, argv[idx_interface]->arg, NULL, - tag, distance, vrf); + tag, distance, vrf, NULL); } DEFUN (ipv6_route_ifname_flags, ipv6_route_ifname_flags_cmd, - "ipv6 route X:X::X:X/M X:X::X:X INTERFACE [tag (1-65535)] [(1-255)] [vrf NAME]", + "ipv6 route X:X::X:X/M X:X::X:X INTERFACE [tag (1-4294967295)] [(1-255)] [vrf NAME]", IP_STR "Establish static routes\n" "IPv6 destination prefix (e.g. 3ffe:506::/32)\n" @@ -2600,7 +2672,9 @@ DEFUN (ipv6_route_ifname_flags, "Set tag for this route\n" "Tag value\n" "Distance value for this prefix\n" - VRF_CMD_HELP_STR) + VRF_CMD_HELP_STR + "Specify labels for this route\n" + "One or more labels separated by '/'\n") { int idx_ipv6_prefixlen = 2; int idx_ipv6 = 3; @@ -2610,19 +2684,19 @@ DEFUN (ipv6_route_ifname_flags, char *tag, *distance, *vrf; tag = distance = vrf = NULL; - zebra_vty_ip_route_tdv_helper (argc, argv, idx_curr, &tag, &distance, &vrf); + zebra_vty_ip_route_tdv_helper (argc, argv, idx_curr, &tag, &distance, &vrf, NULL); return static_ipv6_func (vty, 1, argv[idx_ipv6_prefixlen]->arg, argv[idx_ipv6]->arg, argv[idx_interface]->arg, argv[idx_reject_blackhole]->arg, - tag, distance, vrf); + tag, distance, vrf, NULL); } DEFUN (no_ipv6_route, no_ipv6_route_cmd, - "no ipv6 route X:X::X:X/M [tag (1-65535)] [(1-255)] [vrf NAME]", + "no ipv6 route X:X::X:X/M [tag (1-4294967295)] [(1-255)] [vrf NAME]", NO_STR IP_STR "Establish static routes\n" @@ -2632,7 +2706,9 @@ DEFUN (no_ipv6_route, "Set tag for this route\n" "Tag value\n" "Distance value for this prefix\n" - VRF_CMD_HELP_STR) + VRF_CMD_HELP_STR + "Specify labels for this route\n" + "One or more labels separated by '/'\n") { int idx_ipv6_prefixlen = 3; int idx_ipv6_ifname = 4; @@ -2640,18 +2716,18 @@ DEFUN (no_ipv6_route, char *tag, *distance, *vrf; tag = distance = vrf = NULL; - zebra_vty_ip_route_tdv_helper (argc, argv, idx_curr, &tag, &distance, &vrf); + zebra_vty_ip_route_tdv_helper (argc, argv, idx_curr, &tag, &distance, &vrf, NULL); return static_ipv6_func (vty, 0, argv[idx_ipv6_prefixlen]->arg, argv[idx_ipv6_ifname]->arg, NULL, NULL, - tag, distance, vrf); + tag, distance, vrf, NULL); } DEFUN (no_ipv6_route_flags, no_ipv6_route_flags_cmd, - "no ipv6 route X:X::X:X/M [tag (1-65535)] [(1-255)] [vrf NAME]", + "no ipv6 route X:X::X:X/M [tag (1-4294967295)] [(1-255)] [vrf NAME]", NO_STR IP_STR "Establish static routes\n" @@ -2663,7 +2739,9 @@ DEFUN (no_ipv6_route_flags, "Set tag for this route\n" "Tag value\n" "Distance value for this prefix\n" - VRF_CMD_HELP_STR) + VRF_CMD_HELP_STR + "Specify labels for this route\n" + "One or more labels separated by '/'\n") { int idx_ipv6_prefixlen = 3; int idx_ipv6_ifname = 4; @@ -2672,19 +2750,19 @@ DEFUN (no_ipv6_route_flags, char *tag, *distance, *vrf; tag = distance = vrf = NULL; - zebra_vty_ip_route_tdv_helper (argc, argv, idx_curr, &tag, &distance, &vrf); + zebra_vty_ip_route_tdv_helper (argc, argv, idx_curr, &tag, &distance, &vrf, NULL); return static_ipv6_func (vty, 0, argv[idx_ipv6_prefixlen]->arg, argv[idx_ipv6_ifname]->arg, NULL, argv[idx_reject_blackhole]->arg, - tag, distance, vrf); + tag, distance, vrf, NULL); } DEFUN (no_ipv6_route_ifname, no_ipv6_route_ifname_cmd, - "no ipv6 route X:X::X:X/M X:X::X:X INTERFACE [tag (1-65535)] [(1-255)] [vrf NAME]", + "no ipv6 route X:X::X:X/M X:X::X:X INTERFACE [tag (1-4294967295)] [(1-255)] [vrf NAME]", NO_STR IP_STR "Establish static routes\n" @@ -2694,7 +2772,9 @@ DEFUN (no_ipv6_route_ifname, "Set tag for this route\n" "Tag value\n" "Distance value for this prefix\n" - VRF_CMD_HELP_STR) + VRF_CMD_HELP_STR + "Specify labels for this route\n" + "One or more labels separated by '/'\n") { int idx_ipv6_prefixlen = 3; int idx_ipv6 = 4; @@ -2703,19 +2783,19 @@ DEFUN (no_ipv6_route_ifname, char *tag, *distance, *vrf; tag = distance = vrf = NULL; - zebra_vty_ip_route_tdv_helper (argc, argv, idx_curr, &tag, &distance, &vrf); + zebra_vty_ip_route_tdv_helper (argc, argv, idx_curr, &tag, &distance, &vrf, NULL); return static_ipv6_func (vty, 0, argv[idx_ipv6_prefixlen]->arg, argv[idx_ipv6]->arg, argv[idx_interface]->arg, NULL, - tag, distance, vrf); + tag, distance, vrf, NULL); } DEFUN (no_ipv6_route_ifname_flags, no_ipv6_route_ifname_flags_cmd, - "no ipv6 route X:X::X:X/M X:X::X:X INTERFACE [tag (1-65535)] [(1-255)] [vrf NAME]", + "no ipv6 route X:X::X:X/M X:X::X:X INTERFACE [tag (1-4294967295)] [(1-255)] [vrf NAME]", NO_STR IP_STR "Establish static routes\n" @@ -2727,7 +2807,9 @@ DEFUN (no_ipv6_route_ifname_flags, "Set tag for this route\n" "Tag value\n" "Distance value for this prefix\n" - VRF_CMD_HELP_STR) + VRF_CMD_HELP_STR + "Specify labels for this route\n" + "One or more labels separated by '/'\n") { int idx_ipv6_prefixlen = 3; int idx_ipv6 = 4; @@ -2737,14 +2819,14 @@ DEFUN (no_ipv6_route_ifname_flags, char *tag, *distance, *vrf; tag = distance = vrf = NULL; - zebra_vty_ip_route_tdv_helper (argc, argv, idx_curr, &tag, &distance, &vrf); + zebra_vty_ip_route_tdv_helper (argc, argv, idx_curr, &tag, &distance, &vrf, NULL); return static_ipv6_func (vty, 0, argv[idx_ipv6_prefixlen]->arg, argv[idx_ipv6]->arg, argv[idx_interface]->arg, argv[idx_reject_blackhole]->arg, - tag, distance, vrf); + tag, distance, vrf, NULL); } DEFUN (show_ipv6_route, @@ -2823,7 +2905,7 @@ DEFUN (show_ipv6_route, } } - vty_out (vty, "%s%s", json_object_to_json_string(json), VTY_NEWLINE); + vty_out (vty, "%s%s", json_object_to_json_string_ext(json, JSON_C_TO_STRING_PRETTY), VTY_NEWLINE); json_object_free(json); } else @@ -2848,7 +2930,7 @@ DEFUN (show_ipv6_route, DEFUN (show_ipv6_route_tag, show_ipv6_route_tag_cmd, - "show ipv6 route [vrf NAME] tag (1-65535)", + "show ipv6 route [vrf NAME] tag (1-4294967295)", SHOW_STR IP_STR "IPv6 routing table\n" @@ -2863,18 +2945,18 @@ DEFUN (show_ipv6_route_tag, struct route_node *rn; struct rib *rib; int first = 1; - u_short tag = 0; + route_tag_t tag = 0; vrf_id_t vrf_id = VRF_DEFAULT; if (strmatch(argv[idx_vrf]->text, "vrf")) { VRF_GET_ID (vrf_id, argv[idx_name]->arg); - tag = atoi(argv[idx_tag]->arg); + tag = atol(argv[idx_tag]->arg); } else { idx_tag -= 2; - tag = atoi(argv[idx_tag]->arg); + tag = atol(argv[idx_tag]->arg); } table = zebra_vrf_table (AFI_IP6, SAFI_UNICAST, vrf_id); @@ -3240,7 +3322,7 @@ DEFUN (show_ipv6_route_vrf_all, DEFUN (show_ipv6_route_vrf_all_tag, show_ipv6_route_vrf_all_tag_cmd, - "show ipv6 route vrf all tag (1-65535)", + "show ipv6 route vrf all tag (1-4294967295)", SHOW_STR IP_STR "IPv6 routing table\n" @@ -3256,10 +3338,10 @@ DEFUN (show_ipv6_route_vrf_all_tag, vrf_iter_t iter; int first = 1; int vrf_header = 1; - u_short tag = 0; + route_tag_t tag = 0; if (argv[idx_number]->arg) - tag = atoi(argv[idx_number]->arg); + tag = atol(argv[idx_number]->arg); for (iter = vrf_first (); iter != VRF_ITER_INVALID; iter = vrf_next (iter)) { @@ -3615,7 +3697,7 @@ static_config_ipv6 (struct vty *vty) vty_out (vty, " %s", "blackhole"); if (si->tag) - vty_out (vty, " tag %d", si->tag); + vty_out (vty, " tag %"ROUTE_TAG_PRI, si->tag); if (si->distance != ZEBRA_STATIC_DISTANCE_DEFAULT) vty_out (vty, " %d", si->distance); @@ -3625,6 +3707,12 @@ static_config_ipv6 (struct vty *vty) vty_out (vty, " vrf %s", zvrf->name); } + /* Label information */ + if (si->snh_label.num_labels) + vty_out (vty, " label %s", + mpls_label2str (si->snh_label.num_labels, + si->snh_label.label, buf, sizeof buf)); + vty_out (vty, "%s", VTY_NEWLINE); write = 1; @@ -3835,26 +3923,9 @@ zebra_vty_init (void) install_element (VIEW_NODE, &show_ip_route_supernets_cmd); install_element (VIEW_NODE, &show_ip_route_summary_cmd); install_element (VIEW_NODE, &show_ip_route_summary_prefix_cmd); - install_element (ENABLE_NODE, &show_vrf_cmd); - install_element (ENABLE_NODE, &show_ip_route_cmd); - install_element (ENABLE_NODE, &show_ip_route_ospf_instance_cmd); - install_element (ENABLE_NODE, &show_ip_route_tag_cmd); - install_element (ENABLE_NODE, &show_ip_nht_cmd); - install_element (ENABLE_NODE, &show_ip_nht_vrf_all_cmd); - install_element (ENABLE_NODE, &show_ipv6_nht_cmd); - install_element (ENABLE_NODE, &show_ipv6_nht_vrf_all_cmd); - install_element (ENABLE_NODE, &show_ip_route_addr_cmd); - install_element (ENABLE_NODE, &show_ip_route_prefix_cmd); - install_element (ENABLE_NODE, &show_ip_route_prefix_longer_cmd); - install_element (ENABLE_NODE, &show_ip_route_protocol_cmd); - install_element (ENABLE_NODE, &show_ip_route_supernets_cmd); - install_element (ENABLE_NODE, &show_ip_route_summary_cmd); - install_element (ENABLE_NODE, &show_ip_route_summary_prefix_cmd); install_element (VIEW_NODE, &show_ip_rpf_cmd); - install_element (ENABLE_NODE, &show_ip_rpf_cmd); install_element (VIEW_NODE, &show_ip_rpf_addr_cmd); - install_element (ENABLE_NODE, &show_ip_rpf_addr_cmd); /* Commands for VRF */ @@ -3862,7 +3933,7 @@ zebra_vty_init (void) install_element (CONFIG_NODE, &no_ip_route_mask_flags_cmd); install_element (VIEW_NODE, &show_ip_route_vrf_cmd); - install_element (ENABLE_NODE, &show_ip_route_vrf_cmd); + install_element (VIEW_NODE, &show_ip_route_vrf_cmd); install_element (VIEW_NODE, &show_ip_route_vrf_all_cmd); install_element (VIEW_NODE, &show_ip_route_vrf_all_tag_cmd); @@ -3873,15 +3944,6 @@ zebra_vty_init (void) install_element (VIEW_NODE, &show_ip_route_vrf_all_supernets_cmd); install_element (VIEW_NODE, &show_ip_route_vrf_all_summary_cmd); install_element (VIEW_NODE, &show_ip_route_vrf_all_summary_prefix_cmd); - install_element (ENABLE_NODE, &show_ip_route_vrf_all_cmd); - install_element (ENABLE_NODE, &show_ip_route_vrf_all_tag_cmd); - install_element (ENABLE_NODE, &show_ip_route_vrf_all_addr_cmd); - install_element (ENABLE_NODE, &show_ip_route_vrf_all_prefix_cmd); - install_element (ENABLE_NODE, &show_ip_route_vrf_all_prefix_longer_cmd); - install_element (ENABLE_NODE, &show_ip_route_vrf_all_protocol_cmd); - install_element (ENABLE_NODE, &show_ip_route_vrf_all_supernets_cmd); - install_element (ENABLE_NODE, &show_ip_route_vrf_all_summary_cmd); - install_element (ENABLE_NODE, &show_ip_route_vrf_all_summary_prefix_cmd); install_element (CONFIG_NODE, &ipv6_route_cmd); install_element (CONFIG_NODE, &ipv6_route_flags_cmd); @@ -3903,17 +3965,8 @@ zebra_vty_init (void) install_element (VIEW_NODE, &show_ipv6_route_addr_cmd); install_element (VIEW_NODE, &show_ipv6_route_prefix_cmd); install_element (VIEW_NODE, &show_ipv6_route_prefix_longer_cmd); - install_element (ENABLE_NODE, &show_ipv6_route_cmd); - install_element (ENABLE_NODE, &show_ipv6_route_tag_cmd); - install_element (ENABLE_NODE, &show_ipv6_route_protocol_cmd); - install_element (ENABLE_NODE, &show_ipv6_route_addr_cmd); - install_element (ENABLE_NODE, &show_ipv6_route_prefix_cmd); - install_element (ENABLE_NODE, &show_ipv6_route_prefix_longer_cmd); - install_element (ENABLE_NODE, &show_ipv6_route_summary_cmd); - install_element (ENABLE_NODE, &show_ipv6_route_summary_prefix_cmd); install_element (VIEW_NODE, &show_ipv6_mroute_cmd); - install_element (ENABLE_NODE, &show_ipv6_mroute_cmd); /* Commands for VRF */ install_element (VIEW_NODE, &show_ipv6_route_vrf_all_cmd); @@ -3924,15 +3977,6 @@ zebra_vty_init (void) install_element (VIEW_NODE, &show_ipv6_route_vrf_all_addr_cmd); install_element (VIEW_NODE, &show_ipv6_route_vrf_all_prefix_cmd); install_element (VIEW_NODE, &show_ipv6_route_vrf_all_prefix_longer_cmd); - install_element (ENABLE_NODE, &show_ipv6_route_vrf_all_cmd); - install_element (ENABLE_NODE, &show_ipv6_route_vrf_all_tag_cmd); - install_element (ENABLE_NODE, &show_ipv6_route_vrf_all_protocol_cmd); - install_element (ENABLE_NODE, &show_ipv6_route_vrf_all_addr_cmd); - install_element (ENABLE_NODE, &show_ipv6_route_vrf_all_prefix_cmd); - install_element (ENABLE_NODE, &show_ipv6_route_vrf_all_prefix_longer_cmd); - install_element (ENABLE_NODE, &show_ipv6_route_vrf_all_summary_cmd); - install_element (ENABLE_NODE, &show_ipv6_route_vrf_all_summary_prefix_cmd); install_element (VIEW_NODE, &show_ipv6_mroute_vrf_all_cmd); - install_element (ENABLE_NODE, &show_ipv6_mroute_vrf_all_cmd); } diff --git a/zebra/zserv.c b/zebra/zserv.c index 98908fb67e..a0f46aadc9 100644 --- a/zebra/zserv.c +++ b/zebra/zserv.c @@ -52,6 +52,8 @@ #include "zebra/interface.h" #include "zebra/zebra_ptm.h" #include "zebra/rtadv.h" +#include "zebra/zebra_mpls.h" +#include "zebra/zebra_fpm.h" /* Event list of zebra. */ enum event { ZEBRA_SERV, ZEBRA_READ, ZEBRA_WRITE }; @@ -627,7 +629,7 @@ zsend_redistribute_route (int cmd, struct zserv *client, struct prefix *p, /* Put type and nexthop. */ stream_putc (s, rib->type); stream_putw (s, rib->instance); - stream_putc (s, rib->flags); + stream_putl (s, rib->flags); /* marker for message flags field */ messmark = stream_get_endp (s); @@ -641,7 +643,7 @@ zsend_redistribute_route (int cmd, struct zserv *client, struct prefix *p, for (nexthop = rib->nexthop; nexthop; nexthop = nexthop->next) { /* We don't send any nexthops when there's a multipath */ - if (rib->nexthop_active_num > 1) + if (rib->nexthop_active_num > 1 && client->proto != ZEBRA_ROUTE_LDP) { SET_FLAG (zapi_flags, ZAPI_MESSAGE_NEXTHOP); SET_FLAG (zapi_flags, ZAPI_MESSAGE_IFINDEX); @@ -667,8 +669,7 @@ zsend_redistribute_route (int cmd, struct zserv *client, struct prefix *p, break; } - if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB) - || nexthop_has_fib_child(nexthop)) + if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE)) { SET_FLAG (zapi_flags, ZAPI_MESSAGE_NEXTHOP); SET_FLAG (zapi_flags, ZAPI_MESSAGE_IFINDEX); @@ -713,27 +714,30 @@ zsend_redistribute_route (int cmd, struct zserv *client, struct prefix *p, stream_putc (s, 1); stream_putl (s, nexthop->ifindex); + /* ldpd needs all nexthops */ + if (client->proto != ZEBRA_ROUTE_LDP) break; } } - /* Metric */ - if (cmd == ZEBRA_REDISTRIBUTE_IPV4_ADD || cmd == ZEBRA_REDISTRIBUTE_IPV6_ADD) - { + /* Distance */ SET_FLAG (zapi_flags, ZAPI_MESSAGE_DISTANCE); stream_putc (s, rib->distance); + + /* Metric */ SET_FLAG (zapi_flags, ZAPI_MESSAGE_METRIC); stream_putl (s, rib->metric); - /* tag */ + /* Tag */ if (rib->tag) { SET_FLAG(zapi_flags, ZAPI_MESSAGE_TAG); - stream_putw(s, rib->tag); + stream_putl(s, rib->tag); } + + /* MTU */ SET_FLAG (zapi_flags, ZAPI_MESSAGE_MTU); stream_putl (s, rib->mtu); - } /* write real message flags value */ stream_putc_at (s, messmark, zapi_flags); @@ -923,7 +927,7 @@ zsend_ipv4_nexthop_lookup_mrib (struct zserv *client, struct in_addr addr, struc * are looking up. Therefore, we will just iterate over the top * chain of nexthops. */ for (nexthop = rib->nexthop; nexthop; nexthop = nexthop->next) - if (CHECK_FLAG (nexthop->flags, NEXTHOP_FLAG_FIB)) + if (CHECK_FLAG (nexthop->flags, NEXTHOP_FLAG_ACTIVE)) num += zsend_write_nexthop (s, nexthop); stream_putc_at (s, nump, num); /* store nexthop_num */ @@ -1051,7 +1055,7 @@ zread_ipv4_add (struct zserv *client, u_short length, struct zebra_vrf *zvrf) /* Type, flags, message. */ rib->type = stream_getc (s); rib->instance = stream_getw (s); - rib->flags = stream_getc (s); + rib->flags = stream_getl (s); message = stream_getc (s); safi = stream_getw (s); rib->uptime = time (NULL); @@ -1110,7 +1114,7 @@ zread_ipv4_add (struct zserv *client, u_short length, struct zebra_vrf *zvrf) /* Tag */ if (CHECK_FLAG (message, ZAPI_MESSAGE_TAG)) - rib->tag = stream_getw (s); + rib->tag = stream_getl (s); else rib->tag = 0; @@ -1155,7 +1159,7 @@ zread_ipv4_delete (struct zserv *client, u_short length, struct zebra_vrf *zvrf) /* Type, flags, message. */ api.type = stream_getc (s); api.instance = stream_getw (s); - api.flags = stream_getc (s); + api.flags = stream_getl (s); api.message = stream_getc (s); api.safi = stream_getw (s); @@ -1209,7 +1213,7 @@ zread_ipv4_delete (struct zserv *client, u_short length, struct zebra_vrf *zvrf) /* tag */ if (CHECK_FLAG (api.message, ZAPI_MESSAGE_TAG)) - api.tag = stream_getw (s); + api.tag = stream_getl (s); else api.tag = 0; @@ -1261,7 +1265,7 @@ zread_ipv4_route_ipv6_nexthop_add (struct zserv *client, u_short length, struct /* Type, flags, message. */ rib->type = stream_getc (s); rib->instance = stream_getw (s); - rib->flags = stream_getc (s); + rib->flags = stream_getl (s); message = stream_getc (s); safi = stream_getw (s); rib->uptime = time (NULL); @@ -1339,7 +1343,7 @@ zread_ipv4_route_ipv6_nexthop_add (struct zserv *client, u_short length, struct /* Tag */ if (CHECK_FLAG (message, ZAPI_MESSAGE_TAG)) - rib->tag = stream_getw (s); + rib->tag = stream_getl (s); else rib->tag = 0; @@ -1388,7 +1392,7 @@ zread_ipv6_add (struct zserv *client, u_short length, struct zebra_vrf *zvrf) /* Type, flags, message. */ rib->type = stream_getc (s); rib->instance = stream_getw (s); - rib->flags = stream_getc (s); + rib->flags = stream_getl (s); message = stream_getc (s); safi = stream_getw (s); rib->uptime = time (NULL); @@ -1460,7 +1464,7 @@ zread_ipv6_add (struct zserv *client, u_short length, struct zebra_vrf *zvrf) /* Tag */ if (CHECK_FLAG (message, ZAPI_MESSAGE_TAG)) - rib->tag = stream_getw (s); + rib->tag = stream_getl (s); else rib->tag = 0; @@ -1502,7 +1506,7 @@ zread_ipv6_delete (struct zserv *client, u_short length, struct zebra_vrf *zvrf) /* Type, flags, message. */ api.type = stream_getc (s); api.instance = stream_getw (s); - api.flags = stream_getc (s); + api.flags = stream_getl (s); api.message = stream_getc (s); api.safi = stream_getw (s); @@ -1549,7 +1553,7 @@ zread_ipv6_delete (struct zserv *client, u_short length, struct zebra_vrf *zvrf) /* tag */ if (CHECK_FLAG (api.message, ZAPI_MESSAGE_TAG)) - api.tag = stream_getw (s); + api.tag = stream_getl (s); else api.tag = 0; @@ -1628,6 +1632,68 @@ zread_vrf_unregister (struct zserv *client, u_short length, struct zebra_vrf *zv return 0; } +static void +zread_mpls_labels (int command, struct zserv *client, u_short length, + vrf_id_t vrf_id) +{ + struct stream *s; + enum lsp_types_t type; + struct prefix prefix; + enum nexthop_types_t gtype; + union g_addr gate; + mpls_label_t in_label, out_label; + u_int8_t distance; + struct zebra_vrf *zvrf; + + zvrf = vrf_info_lookup (vrf_id); + if (!zvrf) + return; + + /* Get input stream. */ + s = client->ibuf; + + /* Get data. */ + type = stream_getc (s); + prefix.family = stream_getl (s); + switch (prefix.family) + { + case AF_INET: + prefix.u.prefix4.s_addr = stream_get_ipv4 (s); + prefix.prefixlen = stream_getc (s); + gtype = NEXTHOP_TYPE_IPV4; + gate.ipv4.s_addr = stream_get_ipv4 (s); + break; + case AF_INET6: + stream_get (&prefix.u.prefix6, s, 16); + prefix.prefixlen = stream_getc (s); + gtype = NEXTHOP_TYPE_IPV6; + stream_get (&gate.ipv6, s, 16); + break; + default: + return; + } + distance = stream_getc (s); + in_label = stream_getl (s); + out_label = stream_getl (s); + + if (! mpls_enabled) + return; + + if (command == ZEBRA_MPLS_LABELS_ADD) + { + mpls_lsp_install (zvrf, type, in_label, out_label, gtype, &gate, + NULL, 0); + if (out_label != MPLS_IMP_NULL_LABEL) + mpls_ftn_update (1, zvrf, type, &prefix, &gate, distance, out_label); + } + else if (command == ZEBRA_MPLS_LABELS_DELETE) + { + mpls_lsp_uninstall (zvrf, type, in_label, gtype, &gate, NULL, 0); + if (out_label != MPLS_IMP_NULL_LABEL) + mpls_ftn_update (0, zvrf, type, &prefix, &gate, distance, out_label); + } +} + /* Cleanup registered nexthops (across VRFs) upon client disconnect. */ static void zebra_client_close_cleanup_rnh (struct zserv *client) @@ -1643,6 +1709,13 @@ zebra_client_close_cleanup_rnh (struct zserv *client) zebra_cleanup_rnh_client(zvrf->vrf_id, AF_INET6, client, RNH_NEXTHOP_TYPE); zebra_cleanup_rnh_client(zvrf->vrf_id, AF_INET, client, RNH_IMPORT_CHECK_TYPE); zebra_cleanup_rnh_client(zvrf->vrf_id, AF_INET6, client, RNH_IMPORT_CHECK_TYPE); + if (client->proto == ZEBRA_ROUTE_LDP) + { + hash_iterate(zvrf->lsp_table, mpls_ldp_lsp_uninstall_all, + zvrf->lsp_table); + mpls_ldp_ftn_uninstall_all (zvrf, AFI_IP); + mpls_ldp_ftn_uninstall_all (zvrf, AFI_IP6); + } } } } @@ -1869,6 +1942,12 @@ zebra_client_read (struct thread *thread) case ZEBRA_IPV4_ROUTE_IPV6_NEXTHOP_ADD: zread_ipv4_route_ipv6_nexthop_add (client, length, zvrf); break; + case ZEBRA_IPV4_NEXTHOP_ADD: + zread_ipv4_add(client, length, zvrf); /* LB: r1.0 merge - id was 1 */ + break; + case ZEBRA_IPV4_NEXTHOP_DELETE: + zread_ipv4_delete(client, length, zvrf); /* LB: r1.0 merge - id was 1 */ + break; case ZEBRA_IPV6_ROUTE_ADD: zread_ipv6_add (client, length, zvrf); break; @@ -1924,6 +2003,10 @@ zebra_client_read (struct thread *thread) case ZEBRA_INTERFACE_DISABLE_RADV: zebra_interface_radv_set (client, sock, length, zvrf, 0); break; + case ZEBRA_MPLS_LABELS_ADD: + case ZEBRA_MPLS_LABELS_DELETE: + zread_mpls_labels (command, client, length, vrf_id); + break; default: zlog_info ("Zebra received unknown command %d", command); break; @@ -2499,6 +2582,24 @@ static struct cmd_node forwarding_node = 1 }; +#ifdef HAVE_FPM +/* function to write the fpm config info */ +static int +config_write_fpm (struct vty *vty) +{ + return + fpm_remote_srv_write (vty); +} + +/* Zebra node */ +static struct cmd_node zebra_node = +{ + ZEBRA_NODE, + "", + 1 +}; +#endif + /* Initialisation of zebra and installation of commands. */ void @@ -2510,9 +2611,11 @@ zebra_init (void) /* Install configuration write function. */ install_node (&table_node, config_write_table); install_node (&forwarding_node, config_write_forwarding); +#ifdef HAVE_FPM + install_node (&zebra_node, config_write_fpm); +#endif install_element (VIEW_NODE, &show_ip_forwarding_cmd); - install_element (ENABLE_NODE, &show_ip_forwarding_cmd); install_element (CONFIG_NODE, &ip_forwarding_cmd); install_element (CONFIG_NODE, &no_ip_forwarding_cmd); install_element (ENABLE_NODE, &show_zebra_client_cmd); @@ -2520,14 +2623,12 @@ zebra_init (void) #ifdef HAVE_NETLINK install_element (VIEW_NODE, &show_table_cmd); - install_element (ENABLE_NODE, &show_table_cmd); install_element (CONFIG_NODE, &config_table_cmd); install_element (CONFIG_NODE, &no_config_table_cmd); #endif /* HAVE_NETLINK */ #ifdef HAVE_IPV6 install_element (VIEW_NODE, &show_ipv6_forwarding_cmd); - install_element (ENABLE_NODE, &show_ipv6_forwarding_cmd); install_element (CONFIG_NODE, &ipv6_forwarding_cmd); install_element (CONFIG_NODE, &no_ipv6_forwarding_cmd); #endif /* HAVE_IPV6 */ diff --git a/zebra/zserv.h b/zebra/zserv.h index 4487957a0c..a0434d299b 100644 --- a/zebra/zserv.h +++ b/zebra/zserv.h @@ -130,6 +130,9 @@ struct zebra_t /* rib work queue */ struct work_queue *ribq; struct meta_queue *mq; + + /* LSP work queue */ + struct work_queue *lsp_process_q; }; extern struct zebra_t zebrad;