Merge pull request #16937 from opensourcerouting/embedded-rp

pim6d: support embedded-rp
This commit is contained in:
Jafar Al-Gharaibeh 2024-11-16 19:30:20 -06:00 committed by GitHub
commit f018a92c3b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
22 changed files with 1038 additions and 4 deletions

View File

@ -103,6 +103,39 @@ PIMv6 Router
interface will be selected.By default, the highest loopback address is interface will be selected.By default, the highest loopback address is
selected, which can also be configured via ``loopback``. selected, which can also be configured via ``loopback``.
.. clicmd:: embedded-rp
Learn the RP via embedded RP address in multicast group.
.. note::
The embedded RP address range is: FF70::/12 (with exception of FFF0::/12).
Example: FF75:0130:2001:DB8:FFFF::100
- First byte is always 0xFF
- Second byte high nibble is always 7 (signifies RPT bits set)
- Second byte low nibble is address scope
- Third byte high nibble is zero (reserved)
- Third byte low nibble is the RP interface ID (RIID)
- Fourth byte is the RP prefix length (must be between 1 and 64)
- Fifth byte + RP prefix length is the RP address prefix
- Last four bytes are the group ID.
The RP in the above multicast address sample is:
2001:DB8:FFFF::1
.. clicmd:: embedded-rp group-list PREFIX_LIST_NAME
Restrict the embedded RP prefix range using only the permitted groups
provided by the prefix-list.
This is useful to restrict what RP addresses can be used.
.. clicmd:: embedded-rp limit (1-4294967295)
Restrict the maximum amount of embedded RPs to learn at same time.
.. clicmd:: spt-switchover infinity-and-beyond [prefix-list PLIST] .. clicmd:: spt-switchover infinity-and-beyond [prefix-list PLIST]
On the last hop router if it is desired to not switch over to the SPT tree On the last hop router if it is desired to not switch over to the SPT tree

View File

@ -1217,6 +1217,52 @@ DEFPY_ATTR(no_ipv6_pim_rp_prefix_list,
return ret; return ret;
} }
DEFPY_YANG(pim6_embedded_rp,
pim6_embedded_rp_cmd,
"[no] embedded-rp",
NO_STR
PIM_EMBEDDED_RP)
{
char xpath[XPATH_MAXLEN];
snprintf(xpath, sizeof(xpath), FRR_PIM_EMBEDDED_RP_XPATH);
nb_cli_enqueue_change(vty, xpath, no ? NB_OP_DESTROY : NB_OP_MODIFY, "true");
return nb_cli_apply_changes(vty, NULL);
}
DEFPY_YANG(pim6_embedded_rp_group_list,
pim6_embedded_rp_group_list_cmd,
"[no] embedded-rp group-list ![WORD$prefix_list]",
NO_STR
PIM_EMBEDDED_RP
"Configure embedded RP permitted groups\n"
"Embedded RP permitted groups\n")
{
char xpath[XPATH_MAXLEN];
snprintf(xpath, sizeof(xpath), FRR_PIM_EMBEDDED_RP_GROUP_LIST_XPATH);
nb_cli_enqueue_change(vty, xpath, no ? NB_OP_DESTROY : NB_OP_MODIFY, prefix_list);
return nb_cli_apply_changes(vty, NULL);
}
DEFPY_YANG(pim6_embedded_rp_limit,
pim6_embedded_rp_limit_cmd,
"[no] embedded-rp limit ![(1-4294967295)$limit]",
NO_STR
PIM_EMBEDDED_RP
"Limit the amount of embedded RPs to learn\n"
"Maximum amount of embedded RPs to learn\n")
{
char xpath[XPATH_MAXLEN];
snprintf(xpath, sizeof(xpath), FRR_PIM_EMBEDDED_RP_MAXIMUM_RPS_XPATH);
nb_cli_enqueue_change(vty, xpath, no ? NB_OP_DESTROY : NB_OP_MODIFY, limit_str);
return nb_cli_apply_changes(vty, NULL);
}
DEFPY (ipv6_pim_bsm, DEFPY (ipv6_pim_bsm,
ipv6_pim_bsm_cmd, ipv6_pim_bsm_cmd,
"ipv6 pim bsm", "ipv6 pim bsm",
@ -2788,6 +2834,11 @@ void pim_cmd_init(void)
install_element(PIM6_NODE, &no_pim6_rp_cmd); install_element(PIM6_NODE, &no_pim6_rp_cmd);
install_element(PIM6_NODE, &pim6_rp_prefix_list_cmd); install_element(PIM6_NODE, &pim6_rp_prefix_list_cmd);
install_element(PIM6_NODE, &no_pim6_rp_prefix_list_cmd); install_element(PIM6_NODE, &no_pim6_rp_prefix_list_cmd);
install_element(PIM6_NODE, &pim6_embedded_rp_cmd);
install_element(PIM6_NODE, &pim6_embedded_rp_group_list_cmd);
install_element(PIM6_NODE, &pim6_embedded_rp_limit_cmd);
install_element(PIM6_NODE, &pim6_ssmpingd_cmd); install_element(PIM6_NODE, &pim6_ssmpingd_cmd);
install_element(PIM6_NODE, &no_pim6_ssmpingd_cmd); install_element(PIM6_NODE, &no_pim6_ssmpingd_cmd);
install_element(PIM6_NODE, &pim6_bsr_candidate_rp_cmd); install_element(PIM6_NODE, &pim6_bsr_candidate_rp_cmd);

View File

@ -46,6 +46,7 @@
#define DEBUG_PIMV6_ZEBRA_STR "ZEBRA protocol activity\n" #define DEBUG_PIMV6_ZEBRA_STR "ZEBRA protocol activity\n"
#define DEBUG_MROUTE6_STR "PIMv6 interaction with kernel MFC cache\n" #define DEBUG_MROUTE6_STR "PIMv6 interaction with kernel MFC cache\n"
#define DEBUG_PIMV6_BSM_STR "BSR message processing activity\n" #define DEBUG_PIMV6_BSM_STR "BSR message processing activity\n"
#define PIM_EMBEDDED_RP "Embedded Rendezvous Point\n"
void pim_cmd_init(void); void pim_cmd_init(void);

View File

@ -319,6 +319,9 @@ static void gm_expiry_calc(struct gm_query_timers *timers)
static void gm_sg_free(struct gm_sg *sg) static void gm_sg_free(struct gm_sg *sg)
{ {
if (pim_embedded_rp_is_embedded(&sg->sgaddr.grp))
pim_embedded_rp_delete(sg->iface->pim, &sg->sgaddr.grp);
/* t_sg_expiry is handled before this is reached */ /* t_sg_expiry is handled before this is reached */
EVENT_OFF(sg->t_sg_query); EVENT_OFF(sg->t_sg_query);
gm_packet_sg_subs_fini(sg->subs_negative); gm_packet_sg_subs_fini(sg->subs_negative);
@ -415,6 +418,13 @@ static void gm_sg_update(struct gm_sg *sg, bool has_expired)
new_join = gm_sg_state_want_join(desired); new_join = gm_sg_state_want_join(desired);
if (new_join && !sg->tib_joined) { if (new_join && !sg->tib_joined) {
pim_addr embedded_rp;
if (sg->iface->pim->embedded_rp.enable &&
pim_embedded_rp_extract(&sg->sgaddr.grp, &embedded_rp) &&
!pim_embedded_rp_filter_match(sg->iface->pim, &sg->sgaddr.grp))
pim_embedded_rp_new(sg->iface->pim, &sg->sgaddr.grp, &embedded_rp);
/* this will retry if join previously failed */ /* this will retry if join previously failed */
sg->tib_joined = tib_sg_gm_join(gm_ifp->pim, sg->sgaddr, sg->tib_joined = tib_sg_gm_join(gm_ifp->pim, sg->sgaddr,
gm_ifp->ifp, &sg->oil); gm_ifp->ifp, &sg->oil);
@ -434,6 +444,13 @@ static void gm_sg_update(struct gm_sg *sg, bool has_expired)
} }
if (desired == GM_SG_NOINFO) { if (desired == GM_SG_NOINFO) {
/*
* If oil is still present then get ride of it or we will leak
* this data structure.
*/
if (sg->oil)
pim_channel_oil_del(sg->oil, __func__);
/* multiple paths can lead to the last state going away; /* multiple paths can lead to the last state going away;
* t_sg_expire can still be running if we're arriving from * t_sg_expire can still be running if we're arriving from
* another path. * another path.

View File

@ -208,6 +208,12 @@ void pim_ifchannel_delete(struct pim_ifchannel *ch)
zlog_debug("%s: ifchannel entry %s(%s) is deleted ", __func__, zlog_debug("%s: ifchannel entry %s(%s) is deleted ", __func__,
ch->sg_str, ch->interface->name); ch->sg_str, ch->interface->name);
#if PIM_IPV == 6
/* Embedded RPs learned via PIM join/connected source are freed here */
if (pim_embedded_rp_is_embedded(&ch->sg.grp))
pim_embedded_rp_delete(pim_ifp->pim, &ch->sg.grp);
#endif /* PIM_IPV == 6 */
XFREE(MTYPE_PIM_IFCHANNEL, ch); XFREE(MTYPE_PIM_IFCHANNEL, ch);
if (up) if (up)

View File

@ -26,6 +26,8 @@
static void pim_instance_terminate(struct pim_instance *pim) static void pim_instance_terminate(struct pim_instance *pim)
{ {
pim->stopping = true;
pim_vxlan_exit(pim); pim_vxlan_exit(pim);
if (pim->ssm_info) { if (pim->ssm_info) {

View File

@ -191,6 +191,24 @@ struct pim_instance {
int64_t last_route_change_time; int64_t last_route_change_time;
uint64_t gm_rx_drop_sys; uint64_t gm_rx_drop_sys;
bool stopping;
#if PIM_IPV == 6
struct {
/** Embedded RP enable state. */
bool enable;
/** Embedded RP group prefix list. */
char *group_list;
/** Maximum allowed number of embedded RPs at a time. */
uint32_t maximum_rps;
/** Embedded RP routing table */
struct route_table *table;
/** Embedded RPs count */
size_t rp_count;
} embedded_rp;
#endif /* PIM_IPV == 6 */
}; };
void pim_vrf_init(void); void pim_vrf_init(void);

View File

@ -42,6 +42,9 @@ static void recv_join(struct interface *ifp, struct pim_neighbor *neigh,
uint8_t source_flags) uint8_t source_flags)
{ {
struct pim_interface *pim_ifp = NULL; struct pim_interface *pim_ifp = NULL;
#if PIM_IPV == 6
pim_addr embedded_rp;
#endif /* PIM_IPV == 6 */
if (PIM_DEBUG_PIM_J_P) if (PIM_DEBUG_PIM_J_P)
zlog_debug( zlog_debug(
@ -53,6 +56,12 @@ static void recv_join(struct interface *ifp, struct pim_neighbor *neigh,
pim_ifp = ifp->info; pim_ifp = ifp->info;
assert(pim_ifp); assert(pim_ifp);
#if PIM_IPV == 6
if (pim_ifp->pim->embedded_rp.enable && pim_embedded_rp_extract(&sg->grp, &embedded_rp) &&
!pim_embedded_rp_filter_match(pim_ifp->pim, &sg->grp))
pim_embedded_rp_new(pim_ifp->pim, &sg->grp, &embedded_rp);
#endif /* PIM_IPV == 6 */
++pim_ifp->pim_ifstat_join_recv; ++pim_ifp->pim_ifstat_join_recv;
/* /*

View File

@ -182,6 +182,14 @@ int pim_mroute_msg_nocache(int fd, struct interface *ifp, const kernmsg *msg)
* so the kernel doesn't keep nagging us. * so the kernel doesn't keep nagging us.
*/ */
struct pim_rpf *rpg; struct pim_rpf *rpg;
#if PIM_IPV == 6
pim_addr embedded_rp;
if (pim_ifp->pim->embedded_rp.enable &&
pim_embedded_rp_extract(&sg.grp, &embedded_rp) &&
!pim_embedded_rp_filter_match(pim_ifp->pim, &sg.grp))
pim_embedded_rp_new(pim_ifp->pim, &sg.grp, &embedded_rp);
#endif /* PIM_IPV == 6 */
rpg = RP(pim_ifp->pim, msg->msg_im_dst); rpg = RP(pim_ifp->pim, msg->msg_im_dst);
if (!rpg) { if (!rpg) {

View File

@ -379,6 +379,25 @@ const struct frr_yang_module_info frr_pim_rp_info = {
.destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_static_rp_rp_list_prefix_list_destroy, .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_static_rp_rp_list_prefix_list_destroy,
} }
}, },
{
.xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/embedded-rp/enable",
.cbs = {
.modify = pim_embedded_rp_enable_modify,
}
},
{
.xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/embedded-rp/group-list",
.cbs = {
.modify = pim_embedded_rp_group_list_modify,
.destroy = pim_embedded_rp_group_list_destroy,
}
},
{
.xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/embedded-rp/maximum-rps",
.cbs = {
.modify = pim_embedded_rp_maximum_rps_modify,
}
},
{ {
.xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/auto-rp/discovery-enabled", .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/auto-rp/discovery-enabled",
.cbs = { .cbs = {

View File

@ -159,6 +159,10 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
struct nb_cb_modify_args *args); struct nb_cb_modify_args *args);
int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_static_rp_rp_list_prefix_list_destroy( int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_static_rp_rp_list_prefix_list_destroy(
struct nb_cb_destroy_args *args); struct nb_cb_destroy_args *args);
int pim_embedded_rp_enable_modify(struct nb_cb_modify_args *args);
int pim_embedded_rp_group_list_modify(struct nb_cb_modify_args *args);
int pim_embedded_rp_group_list_destroy(struct nb_cb_destroy_args *args);
int pim_embedded_rp_maximum_rps_modify(struct nb_cb_modify_args *args);
int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_discovery_enabled_modify( int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_discovery_enabled_modify(
struct nb_cb_modify_args *args); struct nb_cb_modify_args *args);
int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_discovery_enabled_destroy( int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_discovery_enabled_destroy(
@ -286,6 +290,9 @@ int routing_control_plane_protocols_name_validate(
"mroute[source-addr='%s'][group-addr='%s']" "mroute[source-addr='%s'][group-addr='%s']"
#define FRR_PIM_STATIC_RP_XPATH \ #define FRR_PIM_STATIC_RP_XPATH \
"frr-pim-rp:rp/static-rp/rp-list[rp-address='%s']" "frr-pim-rp:rp/static-rp/rp-list[rp-address='%s']"
#define FRR_PIM_EMBEDDED_RP_XPATH "./frr-pim-rp:rp/embedded-rp/enable"
#define FRR_PIM_EMBEDDED_RP_GROUP_LIST_XPATH "./frr-pim-rp:rp/embedded-rp/group-list"
#define FRR_PIM_EMBEDDED_RP_MAXIMUM_RPS_XPATH "./frr-pim-rp:rp/embedded-rp/maximum-rps"
#define FRR_PIM_AUTORP_XPATH "./frr-pim-rp:rp/auto-rp" #define FRR_PIM_AUTORP_XPATH "./frr-pim-rp:rp/auto-rp"
#define FRR_GMP_INTERFACE_XPATH \ #define FRR_GMP_INTERFACE_XPATH \
"./frr-gmp:gmp/address-family[address-family='%s']" "./frr-gmp:gmp/address-family[address-family='%s']"

View File

@ -17,6 +17,7 @@
#include "pim_mlag.h" #include "pim_mlag.h"
#include "pim_bfd.h" #include "pim_bfd.h"
#include "pim_msdp_socket.h" #include "pim_msdp_socket.h"
#include "pimd/pim_rp.h"
#include "pim_static.h" #include "pim_static.h"
#include "pim_ssm.h" #include "pim_ssm.h"
#include "pim_ssmpingd.h" #include "pim_ssmpingd.h"
@ -2684,6 +2685,115 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
return NB_OK; return NB_OK;
} }
/*
* XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/embedded-rp/enable
*/
int pim_embedded_rp_enable_modify(struct nb_cb_modify_args *args)
{
#if PIM_IPV == 6
struct vrf *vrf;
#endif /* PIM_IPV == 6 */
switch (args->event) {
case NB_EV_APPLY:
#if PIM_IPV == 6
vrf = nb_running_get_entry(args->dnode, NULL, true);
pim_embedded_rp_enable(vrf->info, yang_dnode_get_bool(args->dnode, NULL));
return NB_OK;
#else
snprintf(args->errmsg, args->errmsg_len, "embedded RP is IPv6 only");
return NB_ERR;
#endif /* PIM_IPV == 6 */
case NB_EV_ABORT:
case NB_EV_PREPARE:
case NB_EV_VALIDATE:
default:
return NB_OK;
}
}
/*
* XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/embedded-rp/group-list
*/
int pim_embedded_rp_group_list_modify(struct nb_cb_modify_args *args)
{
#if PIM_IPV == 6
struct vrf *vrf;
#endif /* PIM_IPV == 6 */
switch (args->event) {
case NB_EV_APPLY:
#if PIM_IPV == 6
vrf = nb_running_get_entry(args->dnode, NULL, true);
pim_embedded_rp_set_group_list(vrf->info, yang_dnode_get_string(args->dnode, NULL));
return NB_OK;
#else
snprintf(args->errmsg, args->errmsg_len, "embedded RP is IPv6 only");
return NB_ERR;
#endif /* PIM_IPV == 6 */
case NB_EV_ABORT:
case NB_EV_PREPARE:
case NB_EV_VALIDATE:
default:
return NB_OK;
}
}
int pim_embedded_rp_group_list_destroy(struct nb_cb_destroy_args *args)
{
#if PIM_IPV == 6
struct vrf *vrf;
#endif /* PIM_IPV == 6 */
switch (args->event) {
case NB_EV_APPLY:
#if PIM_IPV == 6
vrf = nb_running_get_entry(args->dnode, NULL, true);
pim_embedded_rp_set_group_list(vrf->info, NULL);
return NB_OK;
#else
snprintf(args->errmsg, args->errmsg_len, "embedded RP is IPv6 only");
return NB_ERR;
#endif /* PIM_IPV == 6 */
case NB_EV_ABORT:
case NB_EV_PREPARE:
case NB_EV_VALIDATE:
default:
return NB_OK;
}
}
/*
* XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/embedded-rp/maximum-rps
*/
int pim_embedded_rp_maximum_rps_modify(struct nb_cb_modify_args *args)
{
#if PIM_IPV == 6
struct vrf *vrf;
#endif /* PIM_IPV == 6 */
switch (args->event) {
case NB_EV_APPLY:
#if PIM_IPV == 6
vrf = nb_running_get_entry(args->dnode, NULL, true);
pim_embedded_rp_set_maximum_rps(vrf->info, yang_dnode_get_uint32(args->dnode, NULL));
return NB_OK;
#else
snprintf(args->errmsg, args->errmsg_len, "embedded RP is IPv6 only");
return NB_ERR;
#endif /* PIM_IPV == 6 */
case NB_EV_ABORT:
case NB_EV_PREPARE:
case NB_EV_VALIDATE:
default:
return NB_OK;
}
}
/* /*
* XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/auto-rp/discovery-enabled * XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/auto-rp/discovery-enabled
*/ */

View File

@ -115,10 +115,38 @@ void pim_rp_init(struct pim_instance *pim)
zlog_debug("Allocated: %p for rp_info: %p(%pFX) Lock: %d", rn, zlog_debug("Allocated: %p for rp_info: %p(%pFX) Lock: %d", rn,
rp_info, &rp_info->group, rp_info, &rp_info->group,
route_node_get_lock_count(rn)); route_node_get_lock_count(rn));
#if PIM_IPV == 6
/*
* Embedded RP defaults
*/
pim->embedded_rp.enable = false;
pim->embedded_rp.group_list = NULL;
pim->embedded_rp.maximum_rps = PIM_EMBEDDED_RP_MAXIMUM;
pim->embedded_rp.table = route_table_init();
#endif /* PIM_IPV == 6 */
} }
void pim_rp_free(struct pim_instance *pim) void pim_rp_free(struct pim_instance *pim)
{ {
#if PIM_IPV == 6
struct route_node *rn;
pim_embedded_rp_set_group_list(pim, NULL);
for (rn = route_top(pim->embedded_rp.table); rn; rn = route_next(rn)) {
if (rn->info == NULL)
continue;
pim_embedded_rp_free(pim, rn->info);
rn->info = NULL;
}
route_table_finish(pim->embedded_rp.table);
pim->embedded_rp.table = NULL;
#endif /* PIM_IPV == 6 */
if (pim->rp_table) if (pim->rp_table)
route_table_finish(pim->rp_table); route_table_finish(pim->rp_table);
pim->rp_table = NULL; pim->rp_table = NULL;
@ -216,6 +244,24 @@ struct rp_info *pim_rp_find_match_group(struct pim_instance *pim,
const struct prefix_list_entry *entry; const struct prefix_list_entry *entry;
struct route_node *rn; struct route_node *rn;
#if PIM_IPV == 6
/*
* Embedded RP search. Always try to match against embedded RP first.
*/
rn = route_node_match(pim->embedded_rp.table, group);
if (rn != NULL) {
rp_info = rn->info ? rn->info : NULL;
if (rp_info && PIM_DEBUG_PIM_TRACE_DETAIL) {
zlog_debug("Lookedup(%pFX): rn %p found:%pFX", group, rn, &rp_info->group);
}
route_unlock_node(rn);
if (rp_info)
return rp_info;
}
#endif /* PIM_IPV == 6 */
bp = NULL; bp = NULL;
for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) { for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
if (rp_info->plist) { if (rp_info->plist) {
@ -330,9 +376,8 @@ static int pim_rp_check_interface_addrs(struct rp_info *rp_info,
if (!pim_addr_cmp(pim_ifp->primary_address, rp_info->rp.rpf_addr)) if (!pim_addr_cmp(pim_ifp->primary_address, rp_info->rp.rpf_addr))
return 1; return 1;
if (!pim_ifp->sec_addr_list) { if (!pim_ifp->sec_addr_list)
return 0; return 0;
}
for (ALL_LIST_ELEMENTS_RO(pim_ifp->sec_addr_list, node, sec_addr)) { for (ALL_LIST_ELEMENTS_RO(pim_ifp->sec_addr_list, node, sec_addr)) {
sec_paddr = pim_addr_from_prefix(&sec_addr->addr); sec_paddr = pim_addr_from_prefix(&sec_addr->addr);
@ -1203,6 +1248,10 @@ void pim_rp_show_information(struct pim_instance *pim, struct prefix *range,
strlcpy(source, "BSR", sizeof(source)); strlcpy(source, "BSR", sizeof(source));
else if (rp_info->rp_src == RP_SRC_AUTORP) else if (rp_info->rp_src == RP_SRC_AUTORP)
strlcpy(source, "AutoRP", sizeof(source)); strlcpy(source, "AutoRP", sizeof(source));
#if PIM_IPV == 6
else if (rp_info->rp_src == RP_SRC_EMBEDDED_RP)
strlcpy(source, "Embedded-RP", sizeof(source));
#endif /* PIM_IPV == 6 */
else else
strlcpy(source, "None", sizeof(source)); strlcpy(source, "None", sizeof(source));
if (json) { if (json) {
@ -1329,3 +1378,208 @@ void pim_resolve_rp_nh(struct pim_instance *pim, struct pim_neighbor *nbr)
} }
} }
} }
#if PIM_IPV == 6
DEFINE_MTYPE_STATIC(PIMD, PIM_EMBEDDED_RP_GROUP_LIST, "PIM embedded RP group list");
DEFINE_MTYPE_STATIC(PIMD, PIM_EMBEDDED_RP_ENTRY, "PIM embedded RP configuration");
void pim_embedded_rp_enable(struct pim_instance *pim, bool enable)
{
struct route_node *rn;
pim->embedded_rp.enable = enable;
if (enable)
return;
/* Remove all learned embedded RPs and reallocate data structure. */
for (rn = route_top(pim->embedded_rp.table); rn; rn = route_next(rn)) {
pim_embedded_rp_free(pim, rn->info);
rn->info = NULL;
}
route_table_finish(pim->embedded_rp.table);
pim->embedded_rp.table = route_table_init();
}
void pim_embedded_rp_set_group_list(struct pim_instance *pim, const char *group_list)
{
if (pim->embedded_rp.group_list)
XFREE(MTYPE_PIM_EMBEDDED_RP_GROUP_LIST, pim->embedded_rp.group_list);
if (group_list == NULL)
return;
pim->embedded_rp.group_list = XSTRDUP(MTYPE_PIM_EMBEDDED_RP_GROUP_LIST, group_list);
}
void pim_embedded_rp_set_maximum_rps(struct pim_instance *pim, uint32_t maximum)
{
pim->embedded_rp.maximum_rps = maximum;
}
bool pim_embedded_rp_filter_match(const struct pim_instance *pim, const pim_addr *group)
{
struct prefix_list *list;
struct prefix group_prefix = {
.family = PIM_AF,
.prefixlen = PIM_MAX_BITLEN,
.u.prefix6 = *group,
};
list = prefix_list_lookup(PIM_AFI, pim->embedded_rp.group_list);
if (list == NULL)
return false;
if (prefix_list_apply_ext(list, NULL, &group_prefix, true) == PREFIX_DENY) {
if (PIM_DEBUG_PIM_TRACE)
zlog_debug("filtering embedded-rp group %pPA", group);
return true;
}
return false;
}
bool pim_embedded_rp_is_embedded(const pim_addr *group)
{
/*
* Embedded RP basic format:
* - First byte: 0xFF
* - Third nibble: 0x7 (binary 0111)
* - Fourth nibble: Scope
* - Fifth nibble: Reserved (zero)
* - Sixth nibble: RIID (RP interface ID)
* - Fourth byte: Prefix length (1..64)
* - Fifth byte and on: RP address prefix
* - Last four bytes: Multicast group ID
*/
if (group->s6_addr[0] != 0xFF)
return false;
/* Embedded RP flags must all be set. */
if ((group->s6_addr[1] & 0xF0) != 0x70)
return false;
/* Reserved nibble */
if ((group->s6_addr[2] & 0xF0) != 0x00)
return false;
/* RP Interface ID must not be zero */
if ((group->s6_addr[2] & 0x0F) == 0x00)
return false;
/* Prefix length must be between 1 and 64. */
if (group->s6_addr[3] == 0 || group->s6_addr[3] > 64)
return false;
return true;
}
bool pim_embedded_rp_extract(const pim_addr *group, pim_addr *rp)
{
struct prefix prefix;
if (!pim_embedded_rp_is_embedded(group))
return false;
/* Copy at most the prefix bytes length to RP prefix. */
prefix = (struct prefix){
.family = PIM_AF,
.prefixlen = group->s6_addr[3],
};
memcpy(&prefix.u.prefix6, &group->s6_addr[4],
(prefix.prefixlen % 8) == 0 ? (prefix.prefixlen / 8) : (prefix.prefixlen / 8) + 1);
/* Zero unused address bits. */
apply_mask(&prefix);
/* Return assembled RP address. */
*rp = prefix.u.prefix6;
rp->s6_addr[15] = group->s6_addr[2] & 0x0F;
return true;
}
void pim_embedded_rp_new(struct pim_instance *pim, const pim_addr *group, const pim_addr *rp)
{
struct route_node *rnode;
struct rp_info *rp_info;
struct prefix group_prefix = {
.family = PIM_AF,
.prefixlen = PIM_MAX_BITLEN,
.u.prefix6 = *group,
};
rnode = route_node_get(pim->embedded_rp.table, &group_prefix);
if (rnode->info != NULL) {
route_unlock_node(rnode);
return;
}
if (pim->embedded_rp.rp_count >= pim->embedded_rp.maximum_rps) {
zlog_info("Embedded RP maximum (%u) has been reached. Disregarding new RP %pPA",
pim->embedded_rp.maximum_rps, rp);
route_unlock_node(rnode);
return;
}
pim->embedded_rp.rp_count++;
rnode->info = rp_info = XCALLOC(MTYPE_PIM_EMBEDDED_RP_ENTRY, sizeof(struct rp_info));
rp_info->rp.rpf_addr = *rp;
prefix_copy(&rp_info->group, &group_prefix);
rp_info->rp_src = RP_SRC_EMBEDDED_RP;
listnode_add_sort(pim->rp_list, rp_info);
if (PIM_DEBUG_TRACE)
zlog_debug("add embedded RP %pPA for group %pPA", rp, group);
/*
* PIM RP regular maintenance
*/
pim_zebra_update_all_interfaces(pim);
pim_rp_check_interfaces(pim, rp_info);
if (rp_info->i_am_rp && PIM_DEBUG_PIM_NHT_RP)
zlog_debug("new RP %pPA for %pFX is ourselves", &rp_info->rp.rpf_addr,
&rp_info->group);
pim_rp_refresh_group_to_rp_mapping(pim);
if (PIM_DEBUG_PIM_NHT_RP)
zlog_debug("%s: NHT Register RP addr %pPA grp %pFX with Zebra", __func__,
&rp_info->rp.rpf_addr, &rp_info->group);
pim_find_or_track_nexthop(pim, rp_info->rp.rpf_addr, NULL, rp_info, NULL);
pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, rp_info->rp.rpf_addr,
&rp_info->group, 1);
}
void pim_embedded_rp_delete(struct pim_instance *pim, const pim_addr *group)
{
struct route_node *rnode;
struct prefix group_prefix = {
.family = PIM_AF,
.prefixlen = PIM_MAX_BITLEN,
.u.prefix6 = *group,
};
/* Avoid NULL accesses during shutdown */
if (pim->embedded_rp.table == NULL)
return;
rnode = route_node_lookup(pim->embedded_rp.table, &group_prefix);
if (rnode == NULL)
return;
pim_embedded_rp_free(pim, rnode->info);
rnode->info = NULL;
/* Unlock twice to remove the node */
route_unlock_node(rnode);
route_unlock_node(rnode);
}
void pim_embedded_rp_free(struct pim_instance *pim, struct rp_info *rp_info)
{
if (pim->embedded_rp.rp_count > 0)
pim->embedded_rp.rp_count--;
if (PIM_DEBUG_TRACE)
zlog_debug("delete embedded RP %pPA", &rp_info->rp.rpf_addr);
pim_delete_tracked_nexthop(pim, rp_info->rp.rpf_addr, NULL, rp_info);
listnode_delete(pim->rp_list, rp_info);
XFREE(MTYPE_PIM_EMBEDDED_RP_ENTRY, rp_info);
}
#endif /* PIM_IPV == 6 */

View File

@ -16,7 +16,15 @@
struct pim_interface; struct pim_interface;
enum rp_source { RP_SRC_NONE = 0, RP_SRC_STATIC, RP_SRC_BSR, RP_SRC_AUTORP }; enum rp_source {
RP_SRC_NONE = 0,
RP_SRC_STATIC,
RP_SRC_BSR,
RP_SRC_AUTORP,
#if PIM_IPV == 6
RP_SRC_EMBEDDED_RP,
#endif /* PIM_IPV == 6*/
};
struct rp_info { struct rp_info {
struct prefix group; struct prefix group;
@ -26,6 +34,11 @@ struct rp_info {
char *plist; char *plist;
}; };
#if PIM_IPV == 6
/** Default maximum simultaneous embedded RPs at one time. */
#define PIM_EMBEDDED_RP_MAXIMUM 25
#endif /* PIM_IPV == 6 */
void pim_rp_init(struct pim_instance *pim); void pim_rp_init(struct pim_instance *pim);
void pim_rp_free(struct pim_instance *pim); void pim_rp_free(struct pim_instance *pim);
@ -69,4 +82,45 @@ struct rp_info *pim_rp_find_match_group(struct pim_instance *pim,
const struct prefix *group); const struct prefix *group);
void pim_upstream_update(struct pim_instance *pim, struct pim_upstream *up); void pim_upstream_update(struct pim_instance *pim, struct pim_upstream *up);
void pim_rp_refresh_group_to_rp_mapping(struct pim_instance *pim); void pim_rp_refresh_group_to_rp_mapping(struct pim_instance *pim);
#if PIM_IPV == 6
/** Check if address has valid embedded RP value. */
bool pim_embedded_rp_is_embedded(const pim_addr *group) __attribute__((nonnull(1)));
/** Test address against embedded RP group list filter. */
bool pim_embedded_rp_filter_match(const struct pim_instance *pim, const pim_addr *group)
__attribute__((nonnull(1, 2)));
/**
* Extract embedded RP address from multicast group.
*
* Returns true if successful otherwise false.
*/
bool pim_embedded_rp_extract(const pim_addr *group, pim_addr *rp) __attribute__((nonnull(1, 2)));
/** Allocate new embedded RP. */
void pim_embedded_rp_new(struct pim_instance *pim, const pim_addr *group, const pim_addr *rp)
__attribute__((nonnull(1, 2, 3)));
/** Remove and free allocated embedded RP. */
void pim_embedded_rp_delete(struct pim_instance *pim, const pim_addr *group)
__attribute__((nonnull(1, 2)));
/** Free memory allocated by embedded RP information. */
extern void pim_embedded_rp_free(struct pim_instance *pim, struct rp_info *rp_info)
__attribute__((nonnull(1, 2)));
/** Toggle embedded RP state. */
extern void pim_embedded_rp_enable(struct pim_instance *pim, bool enable)
__attribute__((nonnull(1)));
/** Configure embedded RP group prefix list. */
extern void pim_embedded_rp_set_group_list(struct pim_instance *pim, const char *group_list)
__attribute__((nonnull(1)));
/** Configure maximum number of embedded RPs to learn. */
extern void pim_embedded_rp_set_maximum_rps(struct pim_instance *pim, uint32_t maximum)
__attribute__((nonnull(1)));
#endif /* PIM_IPV == 6 */
#endif #endif

View File

@ -115,8 +115,13 @@ bool tib_sg_gm_join(struct pim_instance *pim, pim_sgaddr sg,
return false; return false;
} }
if (!*oilp) if (!*oilp) {
*oilp = tib_sg_oil_setup(pim, sg, oif); *oilp = tib_sg_oil_setup(pim, sg, oif);
#if PIM_IPV == 6
if (pim_embedded_rp_is_embedded(&sg.grp))
(*oilp)->oil_ref_count--;
#endif /* PIM_IPV == 6 */
}
if (!*oilp) if (!*oilp)
return false; return false;
@ -176,7 +181,14 @@ void tib_sg_gm_prune(struct pim_instance *pim, pim_sgaddr sg,
Making the call to pim_channel_del_oif and ignoring the return code Making the call to pim_channel_del_oif and ignoring the return code
fixes the issue without ill effect, similar to fixes the issue without ill effect, similar to
pim_forward_stop below. pim_forward_stop below.
Also on shutdown when the PIM upstream is removed the channel removal
may have already happened, so just return here instead of trying to
access an invalid pointer.
*/ */
if (pim->stopping)
return;
result = pim_channel_del_oif(*oilp, oif, PIM_OIF_FLAG_PROTO_GM, result = pim_channel_del_oif(*oilp, oif, PIM_OIF_FLAG_PROTO_GM,
__func__); __func__);
if (result) { if (result) {

View File

@ -186,6 +186,23 @@ int pim_global_config_write_worker(struct pim_instance *pim, struct vty *vty)
++writes; ++writes;
} }
#if PIM_IPV == 6
if (pim->embedded_rp.enable) {
vty_out(vty, " embedded-rp\n");
writes++;
}
if (pim->embedded_rp.maximum_rps != PIM_EMBEDDED_RP_MAXIMUM) {
vty_out(vty, " embedded-rp limit %u\n", pim->embedded_rp.maximum_rps);
writes++;
}
if (pim->embedded_rp.group_list) {
vty_out(vty, " embedded-rp group-list %s\n", pim->embedded_rp.group_list);
writes++;
}
#endif /* PIM_IPV == 6 */
writes += pim_rp_config_write(pim, vty); writes += pim_rp_config_write(pim, vty);
#if PIM_IPV == 4 #if PIM_IPV == 4
writes += pim_autorp_config_write(pim, vty); writes += pim_autorp_config_write(pim, vty);

View File

@ -0,0 +1,28 @@
log commands
!
interface r1-eth0
ipv6 address 2001:db8:10::1/64
ipv6 ospf6 area 0
ipv6 ospf6 dead-interval 8
ipv6 ospf6 hello-interval 4
ipv6 pim
!
interface r1-eth1
ipv6 address 2001:db8:20::1/64
ipv6 ospf6 area 0
ipv6 ospf6 dead-interval 8
ipv6 ospf6 hello-interval 4
ipv6 pim
!
interface lo
ip address 10.254.254.1/32
ipv6 address 2001:db8:ffff::1/128
ipv6 ospf6 area 0
ipv6 pim
!
router ospf6
redistribute connected
!
router pim6
rp 2001:db8:ffff::1
!

View File

@ -0,0 +1,39 @@
log commands
!
interface r2-eth0
ipv6 address 2001:db8:10::2/64
ipv6 ospf6 area 0
ipv6 ospf6 dead-interval 8
ipv6 ospf6 hello-interval 4
ipv6 pim
!
interface r2-eth1
ipv6 address 2001:db8:30::20/64
ipv6 ospf6 area 0
ipv6 ospf6 dead-interval 8
ipv6 ospf6 hello-interval 4
ipv6 pim
!
interface r2-eth2
ipv6 address 2001:db8:100::1/64
ipv6 ospf6 area 0
ipv6 mld
ipv6 pim
!
interface lo
ip address 10.254.254.2/32
ipv6 address 2001:db8:ffff::2/128
ipv6 ospf6 area 0
ipv6 pim
!
router ospf6
redistribute connected
!
router pim6
embedded-rp
embedded-rp limit 3
embedded-rp group-list embedded-groups
!
! Only permit embedded RPs pointing to r1 or r2 (myself)
ipv6 prefix-list embedded-groups permit FF75:0130:2001:db8:ffff::/80
ipv6 prefix-list embedded-groups permit FF75:0230:2001:db8:ffff::/80

View File

@ -0,0 +1,34 @@
log commands
!
interface r3-eth0
ipv6 address 2001:db8:20::2/64
ipv6 ospf6 area 0
ipv6 ospf6 dead-interval 8
ipv6 ospf6 hello-interval 4
ipv6 pim
!
interface r3-eth1
ipv6 address 2001:db8:30::30/64
ipv6 ospf6 area 0
ipv6 ospf6 dead-interval 8
ipv6 ospf6 hello-interval 4
ipv6 pim
!
interface r3-eth2
ipv6 address 2001:db8:200::1/64
ipv6 ospf6 area 0
ipv6 mld
ipv6 pim
!
interface lo
ip address 10.254.254.3/32
ipv6 address 2001:db8:ffff::3/128
ipv6 ospf6 area 0
ipv6 pim
!
router ospf6
redistribute connected
!
router pim6
embedded-rp
!

View File

@ -0,0 +1,264 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: ISC
# Copyright (c) 2024 Network Education Foundation, Inc. ("NetDEF")
# Rafael Zalamena
import os
import sys
import pytest
from functools import partial
from lib import topotest
# pylint: disable=C0413
# Import topogen and topotest helpers
from lib.topogen import Topogen, get_topogen
from lib.topolog import logger
from lib.pim import McastTesterHelper
"""
pim_embedded_rp.py: Test PIM embedded RP functionality.
"""
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CWD, "../"))
# Required to instantiate the topology builder class.
pytestmark = [pytest.mark.pim6d]
def build_topo(tgen):
tgen.add_router("r1")
tgen.add_router("r2")
tgen.add_router("r3")
switch = tgen.add_switch("s1")
switch.add_link(tgen.gears["r1"])
switch.add_link(tgen.gears["r2"])
switch = tgen.add_switch("s2")
switch.add_link(tgen.gears["r1"])
switch.add_link(tgen.gears["r3"])
switch = tgen.add_switch("s3")
switch.add_link(tgen.gears["r2"])
switch.add_link(tgen.gears["r3"])
tgen.add_host("h1", "2001:DB8:100::100", "via 2001:DB8:100::1")
tgen.add_host("h2", "2001:DB8:200::100", "via 2001:DB8:200::1")
switch = tgen.add_switch("s10")
switch.add_link(tgen.gears["r2"])
switch.add_link(tgen.gears["h1"])
switch = tgen.add_switch("s20")
switch.add_link(tgen.gears["r3"])
switch.add_link(tgen.gears["h2"])
app_helper = McastTesterHelper()
def setup_module(mod):
tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
for _, router in router_list.items():
router.load_frr_config(os.path.join(CWD, f"{router.name}/frr.conf"))
tgen.start_router()
app_helper.init(tgen)
def teardown_module(mod):
tgen = get_topogen()
tgen.stop_topology()
def test_ospfv3_convergence():
"Wait for OSPFv3 protocol convergence"
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
logger.info("waiting for protocols to converge")
def expect_loopback_route(router, iptype, route, proto):
"Wait until route is present on RIB for protocol."
logger.info(f"waiting route {route} in {router}")
test_func = partial(
topotest.router_json_cmp,
tgen.gears[router],
f"show {iptype} route json",
{route: [{"protocol": proto}]},
)
_, result = topotest.run_and_expect(test_func, None, count=10, wait=8)
assert result is None, f'"{router}" convergence failure'
# Wait for R1
expect_loopback_route("r1", "ipv6", "2001:db8:ffff::2/128", "ospf6")
expect_loopback_route("r1", "ipv6", "2001:db8:ffff::3/128", "ospf6")
# Wait for R2
expect_loopback_route("r2", "ipv6", "2001:db8:ffff::1/128", "ospf6")
expect_loopback_route("r2", "ipv6", "2001:db8:ffff::3/128", "ospf6")
# Wait for R3
expect_loopback_route("r3", "ipv6", "2001:db8:ffff::1/128", "ospf6")
expect_loopback_route("r3", "ipv6", "2001:db8:ffff::2/128", "ospf6")
def expect_pim_rp(router, rp, group, interface=None, missing=False):
"Wait until RP is present."
tgen = get_topogen()
maximum_wait = 15
log_message = f"waiting RP {rp} for {group} in {router}"
if missing:
log_message += \
f" to be missing ({maximum_wait} seconds maximum)"
logger.info(log_message)
expected = {rp: [{"group": f"{group}/128"}]}
if interface is not None:
expected[rp][0]["outboundInterface"] = interface
test_func = partial(
topotest.router_json_cmp,
tgen.gears[router],
f"show ipv6 pim rp-info json",
expected
)
_, result = topotest.run_and_expect(
test_func, None, count=maximum_wait, wait=1)
if missing:
assert result is not None, f'"{router}" convergence failure'
else:
assert result is None, f'"{router}" convergence failure'
def test_embedded_rp_mld_join():
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
success_group = "ff75:130:2001:db8:ffff::100"
filtered_group = "ff75:330:2001:db8:ffff::200"
app_helper.run("h1", [success_group, "h1-eth0"])
app_helper.run("h1", [filtered_group, "h1-eth0"])
# Expect first valid join request
expect_pim_rp("r2", "2001:db8:ffff::1", success_group, interface="r2-eth0")
# Expect filtered join request
expect_pim_rp("r2", "2001:db8:ffff::2", filtered_group, missing=True)
# Send over the limit join request
groups = [
"ff75:130:2001:db8:ffff::300",
"ff75:130:2001:db8:ffff::301",
"ff75:130:2001:db8:ffff::302",
]
for group in groups:
app_helper.run("h1", [group, "h1-eth0"])
topotest.sleep(2, "Waiting MLD join to be sent")
expect_pim_rp("r2", "2001:db8:ffff::1", groups[0], interface="r2-eth0")
expect_pim_rp("r2", "2001:db8:ffff::1", groups[1], interface="r2-eth0")
# Over the limit entry
expect_pim_rp("r2", "2001:db8:ffff::1", groups[2], missing=True)
app_helper.stop_all_hosts()
# Clean up the embedded RPs so we don't cross the limit next phase
tgen.gears["r2"].vtysh_cmd("clear ipv6 mroute")
def test_embedded_rp_pim_join():
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
#
# Test sending PIM join with embedded RP information to router R2
#
group = "ff75:230:2001:db8:ffff::400"
app_helper.run("h2", [group, "h2-eth0"])
expect_pim_rp("r3", "2001:db8:ffff::2", group, interface="r3-eth1")
expect_pim_rp("r2", "2001:db8:ffff::2", group, interface="lo")
app_helper.stop_all_hosts()
def test_embedded_rp_spt_switch():
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
# Clean up the embedded RPs so we don't cross the limit next phase
tgen.gears["r1"].vtysh_cmd("clear ipv6 mroute")
tgen.gears["r2"].vtysh_cmd("clear ipv6 mroute")
tgen.gears["r3"].vtysh_cmd("clear ipv6 mroute")
group = "ff75:130:2001:db8:ffff::500"
rp = "2001:db8:ffff::1"
source = "2001:db8:100::100"
# Join from r3 (host h2)
app_helper.run("h2", [group, "h2-eth0"])
# Wait for embedded RP to show up
expect_pim_rp("r3", rp, group, interface="r3-eth0")
# Send stream from r2 (host h1)
app_helper.run("h1", ["--send=0.7", group, "h1-eth0"])
# Check if R1 has the correct multicast route
logger.info("Waiting r1 multicast route installation")
test_func = partial(
topotest.router_json_cmp,
tgen.gears["r1"],
f"show ipv6 pim state json",
{group: {"*": {}, source: {}}}
)
_, result = topotest.run_and_expect(test_func, None, count=10, wait=8)
assert result is None, '"r1" convergence failure'
# Check if R2 has the correct multicast route
logger.info("Waiting r2 multicast route installation")
test_func = partial(
topotest.router_json_cmp,
tgen.gears["r2"],
f"show ipv6 pim state json",
{group: {source: {"r2-eth2": {"r2-eth1": {}}}}}
)
_, result = topotest.run_and_expect(test_func, None, count=10, wait=8)
assert result is None, '"r2" convergence failure'
# Check if R3 has the correct multicast route
logger.info("Waiting r3 multicast route installation")
test_func = partial(
topotest.router_json_cmp,
tgen.gears["r3"],
f"show ipv6 pim state json",
{group: {source: {"r3-eth1": {"r3-eth2": {}}}}}
)
_, result = topotest.run_and_expect(test_func, None, count=10, wait=8)
assert result is None, '"r3" convergence failure'
def test_memory_leak():
"Run the memory leak test and report results."
tgen = get_topogen()
if not tgen.is_memleak_enabled():
pytest.skip("Memory leak test/report is disabled")
tgen.report_memory_leaks()
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))

View File

@ -64,6 +64,14 @@ module frr-pim-rp {
"RFC XXXX: A YANG Data Model for PIM RP"; "RFC XXXX: A YANG Data Model for PIM RP";
} }
revision 2024-09-26 {
description
"Add support for embedded RP.";
reference
"RFC 3956: Embedding the Rendezvous Point (RP) Address in an IPv6
Multicast Address";
}
typedef plist-ref { typedef plist-ref {
type string; type string;
} }
@ -111,6 +119,42 @@ module frr-pim-rp {
} // static-rp } // static-rp
} // static-rp-container } // static-rp-container
grouping embedded-rp-group {
container embedded-rp {
description "Embedded RP configurations.";
leaf enable {
description
"Toggle embedded RP state:
When enabled the learned RP from the multicast group address
will be preferred over any static or dynamic RP configuration.
When disabled the packet will be processed as usual.";
type boolean;
default "false";
}
leaf group-list {
description
"Restrict embedded RP prefix ranges.
The default is to treat all multicast groups in FF70::/12
range as embedded RP. When a group prefix list is configured
and group does not match one of its permit entries it will
be treated as regular multicast group.";
type plist-ref;
}
leaf maximum-rps {
description
"Maximum allowed number of RPs to learn.";
type uint32;
default 25;
}
} // embedded-rp container
} // embedded-rp group
grouping auto-rp-container { grouping auto-rp-container {
description description
"Grouping of AutoRP container."; "Grouping of AutoRP container.";
@ -194,6 +238,13 @@ module frr-pim-rp {
"Only applicable to IPv4 address family."; "Only applicable to IPv4 address family.";
} }
} }
uses embedded-rp-group {
when "../frr-pim:address-family = 'frr-rt:ipv6'" {
description
"Only available for IPv6 addresses.";
}
}
} // rp } // rp
} // augment } // augment
} }