Merge pull request #18032 from opensourcerouting/gmp-limit

pimd,pim6d: implement GMP group / source limits
This commit is contained in:
Jafar Al-Gharaibeh 2025-02-13 09:32:07 -06:00 committed by GitHub
commit f70530c8e8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
21 changed files with 876 additions and 8 deletions

View File

@ -437,6 +437,14 @@ is in a vrf, enter the interface command with the vrf keyword at the end.
Set the IGMP version used on this interface. The default value is 3. Set the IGMP version used on this interface. The default value is 3.
.. clicmd:: ip igmp max-groups (0-4294967295)
Set the maximum number of IGMP groups that the can be joined on an interface.
.. clicmd:: ip igmp max-sources (0-4294967295)
Set the maximum number of IGMP sources to learn per group.
.. clicmd:: ip multicast boundary oil WORD .. clicmd:: ip multicast boundary oil WORD
Set a PIM multicast boundary, based upon the WORD prefix-list. If a PIM join Set a PIM multicast boundary, based upon the WORD prefix-list. If a PIM join

View File

@ -258,6 +258,14 @@ is in a vrf, enter the interface command with the vrf keyword at the end.
Set the MLD version used on this interface. The default value is 2. Set the MLD version used on this interface. The default value is 2.
.. clicmd:: ipv6 mld max-groups (0-4294967295)
Set the maximum number of MLD groups that the can be joined on an interface.
.. clicmd:: ipv6 mld max-sources (0-4294967295)
Set the maximum number of MLD sources to learn per group.
.. clicmd:: ipv6 multicast boundary oil WORD .. clicmd:: ipv6 multicast boundary oil WORD
Set a PIMv6 multicast boundary, based upon the WORD prefix-list. If a PIMv6 Set a PIMv6 multicast boundary, based upon the WORD prefix-list. If a PIMv6
@ -481,6 +489,10 @@ PIMv6 Clear Commands
Clear commands reset various variables. Clear commands reset various variables.
.. clicmd:: clear ipv6 mld [vrf NAME] interfaces
Reset learned multicast groups / sources.
.. clicmd:: clear ipv6 mroute .. clicmd:: clear ipv6 mroute
Reset multicast routes. Reset multicast routes.

View File

@ -1612,6 +1612,43 @@ DEFPY (interface_no_ipv6_mld_version,
"frr-routing:ipv6"); "frr-routing:ipv6");
} }
DEFPY_YANG(interface_ipv6_mld_limits,
interface_ipv6_mld_limits_cmd,
"[no] ipv6 mld <max-sources$do_src (0-4294967295)$val"
"|max-groups$do_grp (0-4294967295)$val>",
NO_STR
IPV6_STR
IFACE_MLD_STR
"Limit number of MLDv2 sources to track\n"
"Permitted number of sources\n"
"Limit number of MLD group memberships to track\n"
"Permitted number of groups\n")
{
const char *xpath;
assert(do_src || do_grp);
if (do_src)
xpath = "./max-sources";
else
xpath = "./max-groups";
if (no)
nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL);
else
nb_cli_enqueue_change(vty, xpath, NB_OP_MODIFY, val_str);
return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH, FRR_PIM_AF_XPATH_VAL);
}
ALIAS_YANG(interface_ipv6_mld_limits,
no_interface_ipv6_mld_limits_cmd,
"no ipv6 mld <max-sources$do_src|max-groups$do_grp>",
NO_STR
IPV6_STR
IFACE_MLD_STR
"Limit number of MLDv2 sources to track\n"
"Limit number of MLD group memberships to track\n")
DEFPY (interface_ipv6_mld_query_interval, DEFPY (interface_ipv6_mld_query_interval,
interface_ipv6_mld_query_interval_cmd, interface_ipv6_mld_query_interval_cmd,
"ipv6 mld query-interval (1-65535)$q_interval", "ipv6 mld query-interval (1-65535)$q_interval",
@ -2341,6 +2378,32 @@ DEFPY (show_ipv6_pim_bsrp,
return pim_show_group_rp_mappings_info_helper(vrf, vty, !!json); return pim_show_group_rp_mappings_info_helper(vrf, vty, !!json);
} }
DEFPY(clear_ipv6_mld_interfaces,
clear_ipv6_mld_interfaces_cmd,
"clear ipv6 mld [vrf NAME$vrf_name] interfaces",
CLEAR_STR
IPV6_STR
"MLD clear commands\n"
VRF_CMD_HELP_STR
"Reset MLD interfaces\n")
{
struct interface *ifp;
struct vrf *vrf;
vrf = vrf_name ? vrf_lookup_by_name(vrf_name) : vrf_lookup_by_id(VRF_DEFAULT);
if (!vrf) {
vty_out(vty, "Specified VRF: %s does not exist\n", vrf_name);
return CMD_WARNING;
}
FOR_ALL_INTERFACES (vrf, ifp)
pim_if_addr_del_all(ifp);
FOR_ALL_INTERFACES (vrf, ifp)
pim_if_addr_add_all(ifp);
return CMD_SUCCESS;
}
DEFPY (clear_ipv6_pim_statistics, DEFPY (clear_ipv6_pim_statistics,
clear_ipv6_pim_statistics_cmd, clear_ipv6_pim_statistics_cmd,
"clear ipv6 pim statistics [vrf NAME]$name", "clear ipv6 pim statistics [vrf NAME]$name",
@ -2865,6 +2928,9 @@ void pim_cmd_init(void)
install_element(INTERFACE_NODE, &interface_no_ipv6_pim_boundary_oil_cmd); install_element(INTERFACE_NODE, &interface_no_ipv6_pim_boundary_oil_cmd);
install_element(INTERFACE_NODE, &interface_ipv6_mroute_cmd); install_element(INTERFACE_NODE, &interface_ipv6_mroute_cmd);
install_element(INTERFACE_NODE, &interface_no_ipv6_mroute_cmd); install_element(INTERFACE_NODE, &interface_no_ipv6_mroute_cmd);
install_element(INTERFACE_NODE, &interface_ipv6_mld_limits_cmd);
install_element(INTERFACE_NODE, &no_interface_ipv6_mld_limits_cmd);
/* Install BSM command */ /* Install BSM command */
install_element(INTERFACE_NODE, &ipv6_pim_bsm_cmd); install_element(INTERFACE_NODE, &ipv6_pim_bsm_cmd);
install_element(INTERFACE_NODE, &no_ipv6_pim_bsm_cmd); install_element(INTERFACE_NODE, &no_ipv6_pim_bsm_cmd);
@ -2935,6 +3001,7 @@ void pim_cmd_init(void)
install_element(VIEW_NODE, &show_ipv6_pim_bsr_cmd); install_element(VIEW_NODE, &show_ipv6_pim_bsr_cmd);
install_element(VIEW_NODE, &show_ipv6_pim_bsm_db_cmd); install_element(VIEW_NODE, &show_ipv6_pim_bsm_db_cmd);
install_element(VIEW_NODE, &show_ipv6_pim_bsrp_cmd); install_element(VIEW_NODE, &show_ipv6_pim_bsrp_cmd);
install_element(ENABLE_NODE, &clear_ipv6_mld_interfaces_cmd);
install_element(ENABLE_NODE, &clear_ipv6_pim_statistics_cmd); install_element(ENABLE_NODE, &clear_ipv6_pim_statistics_cmd);
install_element(ENABLE_NODE, &clear_ipv6_mroute_cmd); install_element(ENABLE_NODE, &clear_ipv6_mroute_cmd);
install_element(ENABLE_NODE, &clear_ipv6_pim_oil_cmd); install_element(ENABLE_NODE, &clear_ipv6_pim_oil_cmd);

View File

@ -190,11 +190,26 @@ static struct gm_sg *gm_sg_find(struct gm_if *gm_ifp, pim_addr grp,
return gm_sgs_find(gm_ifp->sgs, &ref); return gm_sgs_find(gm_ifp->sgs, &ref);
} }
static bool gm_sg_has_group(struct gm_sgs_head *sgs, const pim_addr group)
{
struct gm_sg *sg;
frr_each (gm_sgs, sgs, sg)
if (pim_addr_cmp(sg->sgaddr.grp, group) == 0)
return true;
return false;
}
static struct gm_sg *gm_sg_make(struct gm_if *gm_ifp, pim_addr grp, static struct gm_sg *gm_sg_make(struct gm_if *gm_ifp, pim_addr grp,
pim_addr src) pim_addr src)
{ {
struct gm_sg *ret, *prev; struct gm_sg *ret, *prev;
/* Count all unique group members. */
if (!gm_sg_has_group(gm_ifp->sgs, grp))
gm_ifp->groups_count++;
ret = XCALLOC(MTYPE_GM_SG, sizeof(*ret)); ret = XCALLOC(MTYPE_GM_SG, sizeof(*ret));
ret->sgaddr.grp = grp; ret->sgaddr.grp = grp;
ret->sgaddr.src = src; ret->sgaddr.src = src;
@ -212,6 +227,47 @@ static struct gm_sg *gm_sg_make(struct gm_if *gm_ifp, pim_addr grp,
return ret; return ret;
} }
static size_t gm_sg_source_count(struct gm_sgs_head *sgs, const pim_addr group)
{
struct gm_sg *sg;
size_t source_count;
source_count = 0;
frr_each (gm_sgs, sgs, sg)
if (pim_addr_cmp(sg->sgaddr.grp, group) == 0)
source_count++;
return source_count;
}
static bool gm_sg_limit_reached(struct gm_if *gm_if, const pim_addr source, const pim_addr group)
{
const struct pim_interface *pim_interface = gm_if->ifp->info;
if (!gm_sg_has_group(gm_if->sgs, group)) {
if (gm_if->groups_count >= pim_interface->gm_group_limit) {
if (PIM_DEBUG_GM_TRACE)
zlog_debug("interface %s has reached group limit (%u), refusing to add group %pPA",
gm_if->ifp->name, pim_interface->gm_group_limit, &group);
return true;
}
return false;
}
if (gm_sg_source_count(gm_if->sgs, group) >= pim_interface->gm_source_limit) {
if (PIM_DEBUG_GM_TRACE) {
zlog_debug("interface %s has reached source limit (%u), refusing to add source %pPA (group %pPA)",
gm_if->ifp->name, pim_interface->gm_source_limit, &source,
&group);
}
return true;
}
return false;
}
/* /*
* interface -> packets, sorted by expiry (because add_tail insert order) * interface -> packets, sorted by expiry (because add_tail insert order)
*/ */
@ -471,6 +527,11 @@ static void gm_sg_update(struct gm_sg *sg, bool has_expired)
zlog_debug(log_sg(sg, "dropping")); zlog_debug(log_sg(sg, "dropping"));
gm_sgs_del(gm_ifp->sgs, sg); gm_sgs_del(gm_ifp->sgs, sg);
/* Decrement unique group members counter. */
if (!gm_sg_has_group(gm_ifp->sgs, sg->sgaddr.grp))
gm_ifp->groups_count--;
gm_sg_free(sg); gm_sg_free(sg);
} }
} }
@ -634,8 +695,12 @@ static void gm_handle_v2_pass1(struct gm_packet_state *pkt,
case MLD_RECTYPE_CHANGE_TO_EXCLUDE: case MLD_RECTYPE_CHANGE_TO_EXCLUDE:
/* this always replaces or creates state */ /* this always replaces or creates state */
is_excl = true; is_excl = true;
if (!grp) if (!grp) {
if (gm_sg_limit_reached(pkt->iface, PIMADDR_ANY, rechdr->grp))
return;
grp = gm_sg_make(pkt->iface, rechdr->grp, PIMADDR_ANY); grp = gm_sg_make(pkt->iface, rechdr->grp, PIMADDR_ANY);
}
item = gm_packet_sg_setup(pkt, grp, is_excl, false); item = gm_packet_sg_setup(pkt, grp, is_excl, false);
item->n_exclude = n_src; item->n_exclude = n_src;
@ -700,9 +765,13 @@ static void gm_handle_v2_pass1(struct gm_packet_state *pkt,
struct gm_sg *sg; struct gm_sg *sg;
sg = gm_sg_find(pkt->iface, rechdr->grp, rechdr->srcs[j]); sg = gm_sg_find(pkt->iface, rechdr->grp, rechdr->srcs[j]);
if (!sg) if (!sg) {
if (gm_sg_limit_reached(pkt->iface, rechdr->srcs[j], rechdr->grp))
return;
sg = gm_sg_make(pkt->iface, rechdr->grp, sg = gm_sg_make(pkt->iface, rechdr->grp,
rechdr->srcs[j]); rechdr->srcs[j]);
}
gm_packet_sg_setup(pkt, sg, is_excl, true); gm_packet_sg_setup(pkt, sg, is_excl, true);
} }
@ -952,6 +1021,10 @@ static void gm_handle_v1_report(struct gm_if *gm_ifp,
hdr = (struct mld_v1_pkt *)data; hdr = (struct mld_v1_pkt *)data;
if (!gm_sg_has_group(gm_ifp->sgs, hdr->grp) &&
gm_sg_limit_reached(gm_ifp, PIMADDR_ANY, hdr->grp))
return;
max_entries = 1; max_entries = 1;
pkt = XCALLOC(MTYPE_GM_STATE, pkt = XCALLOC(MTYPE_GM_STATE,
offsetof(struct gm_packet_state, items[max_entries])); offsetof(struct gm_packet_state, items[max_entries]));
@ -1255,6 +1328,9 @@ static void gm_handle_q_groupsrc(struct gm_if *gm_ifp,
for (i = 0; i < n_src; i++) { for (i = 0; i < n_src; i++) {
sg = gm_sg_find(gm_ifp, grp, srcs[i]); sg = gm_sg_find(gm_ifp, grp, srcs[i]);
if (sg == NULL)
continue;
GM_UPDATE_SG_STATE(sg); GM_UPDATE_SG_STATE(sg);
gm_sg_timer_start(gm_ifp, sg, timers->expire_wait); gm_sg_timer_start(gm_ifp, sg, timers->expire_wait);
} }

View File

@ -350,6 +350,8 @@ struct gm_if {
struct gm_subscribers_head subscribers[1]; struct gm_subscribers_head subscribers[1];
struct gm_packet_expires_head expires[1]; struct gm_packet_expires_head expires[1];
size_t groups_count;
struct timeval started; struct timeval started;
struct gm_if_stats stats; struct gm_if_stats stats;
}; };

View File

@ -5656,6 +5656,43 @@ DEFUN (interface_no_ip_igmp_last_member_query_interval,
return gm_process_no_last_member_query_interval_cmd(vty); return gm_process_no_last_member_query_interval_cmd(vty);
} }
DEFPY_YANG(interface_ip_igmp_limits,
interface_ip_igmp_limits_cmd,
"[no] ip igmp <max-sources$do_src (0-4294967295)$val"
"|max-groups$do_grp (0-4294967295)$val>",
NO_STR
IP_STR
IFACE_IGMP_STR
"Limit number of IGMPv3 sources to track\n"
"Permitted number of sources\n"
"Limit number of IGMP group memberships to track\n"
"Permitted number of groups\n")
{
const char *xpath;
assert(do_src || do_grp);
if (do_src)
xpath = "./max-sources";
else
xpath = "./max-groups";
if (no)
nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL);
else
nb_cli_enqueue_change(vty, xpath, NB_OP_MODIFY, val_str);
return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH, FRR_PIM_AF_XPATH_VAL);
}
ALIAS_YANG(interface_ip_igmp_limits,
no_interface_ip_igmp_limits_cmd,
"no ip igmp <max-sources$do_src|max-groups$do_grp>",
NO_STR
IP_STR
IFACE_IGMP_STR
"Limit number of IGMPv3 sources to track\n"
"Limit number of IGMP group memberships to track\n")
DEFUN (interface_ip_pim_drprio, DEFUN (interface_ip_pim_drprio,
interface_ip_pim_drprio_cmd, interface_ip_pim_drprio_cmd,
"ip pim drpriority (0-4294967295)", "ip pim drpriority (0-4294967295)",
@ -9101,6 +9138,8 @@ void pim_cmd_init(void)
install_element(INTERFACE_NODE, install_element(INTERFACE_NODE,
&interface_no_ip_igmp_last_member_query_interval_cmd); &interface_no_ip_igmp_last_member_query_interval_cmd);
install_element(INTERFACE_NODE, &interface_ip_igmp_proxy_cmd); install_element(INTERFACE_NODE, &interface_ip_igmp_proxy_cmd);
install_element(INTERFACE_NODE, &interface_ip_igmp_limits_cmd);
install_element(INTERFACE_NODE, &no_interface_ip_igmp_limits_cmd);
install_element(INTERFACE_NODE, &interface_ip_pim_activeactive_cmd); install_element(INTERFACE_NODE, &interface_ip_pim_activeactive_cmd);
install_element(INTERFACE_NODE, &interface_ip_pim_ssm_cmd); install_element(INTERFACE_NODE, &interface_ip_pim_ssm_cmd);
install_element(INTERFACE_NODE, &interface_no_ip_pim_ssm_cmd); install_element(INTERFACE_NODE, &interface_no_ip_pim_ssm_cmd);

View File

@ -128,6 +128,8 @@ struct pim_interface *pim_if_new(struct interface *ifp, bool gm, bool pim,
pim_ifp->gm_specific_query_max_response_time_dsec = pim_ifp->gm_specific_query_max_response_time_dsec =
GM_SPECIFIC_QUERY_MAX_RESPONSE_TIME_DSEC; GM_SPECIFIC_QUERY_MAX_RESPONSE_TIME_DSEC;
pim_ifp->gm_last_member_query_count = GM_DEFAULT_ROBUSTNESS_VARIABLE; pim_ifp->gm_last_member_query_count = GM_DEFAULT_ROBUSTNESS_VARIABLE;
pim_ifp->gm_group_limit = UINT32_MAX;
pim_ifp->gm_source_limit = UINT32_MAX;
/* BSM config on interface: true by default */ /* BSM config on interface: true by default */
pim_ifp->bsm_enable = true; pim_ifp->bsm_enable = true;

View File

@ -105,6 +105,8 @@ struct pim_interface {
struct gm_if *mld; struct gm_if *mld;
uint32_t gm_source_limit, gm_group_limit;
int pim_sock_fd; /* PIM socket file descriptor */ int pim_sock_fd; /* PIM socket file descriptor */
struct event *t_pim_sock_read; /* thread for reading PIM socket */ struct event *t_pim_sock_read; /* thread for reading PIM socket */
int64_t pim_sock_creation; /* timestamp of PIM socket creation */ int64_t pim_sock_creation; /* timestamp of PIM socket creation */

View File

@ -1416,6 +1416,14 @@ struct gm_group *igmp_add_group_by_addr(struct gm_sock *igmp,
__func__, &group_addr); __func__, &group_addr);
return NULL; return NULL;
} }
if (listcount(pim_ifp->gm_group_list) >= pim_ifp->gm_group_limit) {
if (PIM_DEBUG_GM_TRACE)
zlog_debug("interface %s has reached group limit (%u), refusing to add group %pI4",
igmp->interface->name, pim_ifp->gm_group_limit, &group_addr);
return NULL;
}
/* /*
Non-existant group is created as INCLUDE {empty}: Non-existant group is created as INCLUDE {empty}:

View File

@ -423,6 +423,7 @@ struct gm_source *igmp_find_source_by_addr(struct gm_group *group,
struct gm_source *igmp_get_source_by_addr(struct gm_group *group, struct gm_source *igmp_get_source_by_addr(struct gm_group *group,
struct in_addr src_addr, bool *new) struct in_addr src_addr, bool *new)
{ {
const struct pim_interface *pim_interface = group->interface->info;
struct gm_source *src; struct gm_source *src;
if (new) if (new)
@ -432,6 +433,14 @@ struct gm_source *igmp_get_source_by_addr(struct gm_group *group,
if (src) if (src)
return src; return src;
if (listcount(group->group_source_list) >= pim_interface->gm_source_limit) {
if (PIM_DEBUG_GM_TRACE)
zlog_debug("interface %s has reached source limit (%u), refusing to add source %pI4 (group %pI4)",
group->interface->name, pim_interface->gm_source_limit,
&src_addr, &group->group_addr);
return NULL;
}
if (PIM_DEBUG_GM_TRACE) { if (PIM_DEBUG_GM_TRACE) {
char group_str[INET_ADDRSTRLEN]; char group_str[INET_ADDRSTRLEN];
char source_str[INET_ADDRSTRLEN]; char source_str[INET_ADDRSTRLEN];

View File

@ -724,6 +724,18 @@ const struct frr_yang_module_info frr_gmp_info = {
.create = lib_interface_gmp_address_family_join_group_create, .create = lib_interface_gmp_address_family_join_group_create,
.destroy = lib_interface_gmp_address_family_join_group_destroy, .destroy = lib_interface_gmp_address_family_join_group_destroy,
} }
},
{
.xpath = "/frr-interface:lib/interface/frr-gmp:gmp/address-family/max-sources",
.cbs = {
.modify = lib_interface_gm_max_sources_modify,
}
},
{
.xpath = "/frr-interface:lib/interface/frr-gmp:gmp/address-family/max-groups",
.cbs = {
.modify = lib_interface_gm_max_groups_modify,
}
}, },
{ {
.xpath = "/frr-interface:lib/interface/frr-gmp:gmp/address-family/proxy", .xpath = "/frr-interface:lib/interface/frr-gmp:gmp/address-family/proxy",

View File

@ -287,6 +287,8 @@ int lib_interface_gmp_address_family_static_group_create(
struct nb_cb_create_args *args); struct nb_cb_create_args *args);
int lib_interface_gmp_address_family_static_group_destroy( int lib_interface_gmp_address_family_static_group_destroy(
struct nb_cb_destroy_args *args); struct nb_cb_destroy_args *args);
int lib_interface_gm_max_sources_modify(struct nb_cb_modify_args *args);
int lib_interface_gm_max_groups_modify(struct nb_cb_modify_args *args);
/* /*
* Callback registered with routing_nb lib to validate only * Callback registered with routing_nb lib to validate only

View File

@ -4396,6 +4396,72 @@ int lib_interface_gmp_address_family_last_member_query_interval_modify(
return NB_OK; return NB_OK;
} }
/*
* XPath: /frr-interface:lib/interface/frr-gmp:gmp/address-family/max-groups
*/
int lib_interface_gm_max_groups_modify(struct nb_cb_modify_args *args)
{
struct interface *ifp;
struct pim_interface *pim_ifp;
const char *ifp_name;
const struct lyd_node *if_dnode;
switch (args->event) {
case NB_EV_VALIDATE:
if_dnode = yang_dnode_get_parent(args->dnode, "interface");
if (!is_pim_interface(if_dnode)) {
ifp_name = yang_dnode_get_string(if_dnode, "name");
snprintf(args->errmsg, args->errmsg_len,
"multicast not enabled on interface %s", ifp_name);
return NB_ERR_VALIDATION;
}
break;
case NB_EV_PREPARE:
case NB_EV_ABORT:
break;
case NB_EV_APPLY:
ifp = nb_running_get_entry(args->dnode, NULL, true);
pim_ifp = ifp->info;
pim_ifp->gm_group_limit = yang_dnode_get_uint32(args->dnode, NULL);
break;
}
return NB_OK;
}
/*
* XPath: /frr-interface:lib/interface/frr-gmp:gmp/address-family/max-sources
*/
int lib_interface_gm_max_sources_modify(struct nb_cb_modify_args *args)
{
struct interface *ifp;
struct pim_interface *pim_ifp;
const char *ifp_name;
const struct lyd_node *if_dnode;
switch (args->event) {
case NB_EV_VALIDATE:
if_dnode = yang_dnode_get_parent(args->dnode, "interface");
if (!is_pim_interface(if_dnode)) {
ifp_name = yang_dnode_get_string(if_dnode, "name");
snprintf(args->errmsg, args->errmsg_len,
"multicast not enabled on interface %s", ifp_name);
return NB_ERR_VALIDATION;
}
break;
case NB_EV_PREPARE:
case NB_EV_ABORT:
break;
case NB_EV_APPLY:
ifp = nb_running_get_entry(args->dnode, NULL, true);
pim_ifp = ifp->info;
pim_ifp->gm_source_limit = yang_dnode_get_uint32(args->dnode, NULL);
break;
}
return NB_OK;
}
/* /*
* XPath: /frr-interface:lib/interface/frr-gmp:gmp/address-family/robustness-variable * XPath: /frr-interface:lib/interface/frr-gmp:gmp/address-family/robustness-variable
*/ */

View File

@ -457,6 +457,20 @@ int pim_config_write(struct vty *vty, int writes, struct interface *ifp,
++writes; ++writes;
} }
/* IF igmp/mld max-sources */
if (pim_ifp->gm_source_limit != UINT32_MAX) {
vty_out(vty, " " PIM_AF_NAME " " GM_AF_DBG " max-sources %u\n",
pim_ifp->gm_source_limit);
++writes;
}
/* IF igmp/mld max-groups */
if (pim_ifp->gm_group_limit != UINT32_MAX) {
vty_out(vty, " " PIM_AF_NAME " " GM_AF_DBG " max-groups %u\n",
pim_ifp->gm_group_limit);
++writes;
}
/* IF ip pim drpriority */ /* IF ip pim drpriority */
if (pim_ifp->pim_dr_priority != PIM_DEFAULT_DR_PRIORITY) { if (pim_ifp->pim_dr_priority != PIM_DEFAULT_DR_PRIORITY) {
vty_out(vty, " " PIM_AF_NAME " pim drpriority %u\n", vty_out(vty, " " PIM_AF_NAME " pim drpriority %u\n",

View File

@ -34,16 +34,64 @@ def interface_name_to_index(name):
return None return None
def multicast_join(sock, ifindex, group, port): def interface_index_to_address(index, iptype="inet"):
"Gets the interface main address using its name. Returns None on failure."
interfaces = json.loads(subprocess.check_output("ip -j addr show", shell=True))
for interface in interfaces:
if interface["ifindex"] == index:
break
for address in interface["addr_info"]:
if address["family"] == iptype:
break
local_address = ipaddress.ip_address(address["local"])
return local_address.packed
def group_source_req(ifindex, group, source):
"Packs the information into 'struct group_source_req' format."
mreq = struct.pack("<I", ifindex)
group_bytes = (
struct.pack("<IHHI", 0, socket.AF_INET6, 0, 0)
+ group.packed
+ struct.pack("<I", 0)
)
group_bytes += struct.pack(f"<{128 - len(group_bytes)}x")
source_bytes = (
struct.pack("<IHHI", 0, socket.AF_INET6, 0, 0)
+ source.packed
+ struct.pack("<I", 0)
)
source_bytes += struct.pack(f"<{128 - len(source_bytes)}x")
return mreq + group_bytes + source_bytes + struct.pack("<4x")
def multicast_join(sock, ifindex, group, port, source=None):
"Joins a multicast group." "Joins a multicast group."
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if ip_version == 4: if ip_version == 4:
mreq = group.packed + struct.pack("@II", socket.INADDR_ANY, ifindex) if source is None:
opt = socket.IP_ADD_MEMBERSHIP mreq = group.packed + struct.pack("@II", socket.INADDR_ANY, ifindex)
opt = socket.IP_ADD_MEMBERSHIP
else:
source = ipaddress.ip_address(source)
mreq = group.packed + interface_index_to_address(ifindex) + source.packed
opt = 39
else: else:
mreq = group.packed + struct.pack("@I", ifindex) if source is None:
opt = socket.IPV6_JOIN_GROUP mreq = group.packed + struct.pack("@I", ifindex)
opt = socket.IPV6_JOIN_GROUP
else:
mreq = group_source_req(ifindex, group, ipaddress.ip_address(source))
print(mreq)
opt = 46
sock.bind((str(group), port)) sock.bind((str(group), port))
sock.setsockopt(ip_proto, opt, mreq) sock.setsockopt(ip_proto, opt, mreq)
@ -57,6 +105,7 @@ parser.add_argument("interface", help="Interface name")
parser.add_argument("--port", type=int, default=1000, help="port to send to") parser.add_argument("--port", type=int, default=1000, help="port to send to")
parser.add_argument("--ttl", type=int, default=16, help="TTL/hops for sending packets") parser.add_argument("--ttl", type=int, default=16, help="TTL/hops for sending packets")
parser.add_argument("--socket", help="Point to topotest UNIX socket") parser.add_argument("--socket", help="Point to topotest UNIX socket")
parser.add_argument("--source", help="Source address for multicast")
parser.add_argument( parser.add_argument(
"--send", help="Transmit instead of join with interval", type=float, default=0 "--send", help="Transmit instead of join with interval", type=float, default=0
) )
@ -112,7 +161,7 @@ if args.send > 0:
# Block to ensure packet send. # Block to ensure packet send.
msock.setblocking(True) msock.setblocking(True)
else: else:
multicast_join(msock, ifindex, args.group, args.port) multicast_join(msock, ifindex, args.group, args.port, args.source)
def should_exit(): def should_exit():

View File

@ -0,0 +1,48 @@
log commands
!
interface r1-eth0
ip address 192.168.1.1/24
ip pim
ipv6 address 2001:db8:1::1/64
ipv6 pim
!
interface r1-eth1
ip address 192.168.2.1/24
ip pim
ipv6 address 2001:db8:2::1/64
ipv6 pim
!
interface r1-eth2
ip address 192.168.100.1/24
ip igmp
ip pim passive
ipv6 address 2001:db8:ffff::1/128
ipv6 mld
ipv6 pim passive
!
interface lo
ip address 10.254.254.1/32
ip pim
ip pim use-source 10.254.254.1
!
router bgp 65100
no bgp ebgp-requires-policy
no bgp network import-check
neighbor 192.168.1.2 remote-as 65200
neighbor 192.168.2.2 remote-as 65300
!
address-family ipv4 unicast
redistribute connected
exit-address-family
address-family ipv6 unicast
redistribute connected
neighbor 192.168.1.2 activate
neighbor 192.168.2.2 activate
exit-address-family
!
router pim
rp 10.254.254.1
!
router pim6
rp 2001:db8:ffff::1
!

View File

@ -0,0 +1,37 @@
log commands
!
interface r2-eth0
ip address 192.168.1.2/24
ip pim
ipv6 address 2001:db8:1::2/64
ipv6 pim
!
interface r2-eth1
ip address 192.168.101.1/24
ip igmp
ip pim passive
!
interface lo
ip address 10.254.254.2/32
ipv6 address 2001:db8:ffff::2/128
ipv6 pim passive
!
router bgp 65200
no bgp ebgp-requires-policy
no bgp network import-check
neighbor 192.168.1.1 remote-as 65100
!
address-family ipv4 unicast
redistribute connected
exit-address-family
address-family ipv6 unicast
redistribute connected
neighbor 192.168.1.1 activate
exit-address-family
!
router pim
rp 10.254.254.2
!
router pim6
rp 2001:db8:ffff::2
!

View File

@ -0,0 +1,34 @@
log commands
!
interface r3-eth0
ip address 192.168.2.2/24
ip pim
ipv6 address 2001:db8:2::2/64
ipv6 pim
!
interface lo
ip address 10.254.254.3/32
ip pim
ip pim use-source 10.254.254.3
ipv6 address 2001:db8:ffff::3/128
ipv6 pim passive
!
router bgp 65300
no bgp ebgp-requires-policy
no bgp network import-check
neighbor 192.168.2.1 remote-as 65100
!
address-family ipv4 unicast
redistribute connected
exit-address-family
address-family ipv6 unicast
redistribute connected
neighbor 192.168.2.1 activate
exit-address-family
!
router pim
rp 10.254.254.1
!
router pim6
rp 2001:db8:ffff::1
!

View File

@ -0,0 +1,367 @@
#!/usr/bin/env python
# SPDX-License-Identifier: ISC
#
# test_multicast_features.py
# Part of NetDEF Topology Tests
#
# Copyright (c) 2025 by
# Network Device Education Foundation, Inc. ("NetDEF")
#
"""
test_multicast_features.py: Test the FRR PIM multicast features.
"""
import os
import sys
import json
from functools import partial
import re
import pytest
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
# Import topogen and topotest helpers
from lib import topotest
# Required to instantiate the topology builder class.
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
from lib.pim import McastTesterHelper
pytestmark = [pytest.mark.bgpd, pytest.mark.pimd]
app_helper = McastTesterHelper()
def build_topo(tgen):
"""
+----+ +----+ +----+ +----+
| h1 | <-> | r1 | <-> | r2 | <-> | h2 |
+----+ +----+ +----+ +----+
^
|
v
+----+
| r3 |
+----+
"""
# Create 3 routers
for routern in range(1, 4):
tgen.add_router(f"r{routern}")
# R1 interface eth0 and R2 interface eth0
switch = tgen.add_switch("s1")
switch.add_link(tgen.gears["r1"])
switch.add_link(tgen.gears["r2"])
# R1 interface eth1
switch = tgen.add_switch("s2")
switch.add_link(tgen.gears["r1"])
switch.add_link(tgen.gears["r3"])
# R1 interface eth2
switch = tgen.add_switch("s3")
tgen.add_host("h1", "192.168.100.100/24", "via 192.168.100.1")
switch.add_link(tgen.gears["r1"])
switch.add_link(tgen.gears["h1"])
# R2 interface eth1
switch = tgen.add_switch("s4")
tgen.add_host("h2", "192.168.101.100/24", "via 192.168.101.1")
switch.add_link(tgen.gears["r2"])
switch.add_link(tgen.gears["h2"])
def setup_module(mod):
"Sets up the pytest environment"
tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
for _, router in router_list.items():
router.load_frr_config(os.path.join(CWD, f"{router.name}/frr.conf"))
# Initialize all routers.
tgen.start_router()
app_helper.init(tgen)
def teardown_module():
"Teardown the pytest environment"
tgen = get_topogen()
app_helper.cleanup()
tgen.stop_topology()
def test_bgp_convergence():
"Wait for BGP protocol convergence"
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
logger.info("waiting for protocols to converge")
def expect_loopback_route(router, iptype, route, proto):
"Wait until route is present on RIB for protocol."
logger.info(f"waiting route {route} in {router}")
test_func = partial(
topotest.router_json_cmp,
tgen.gears[router],
f"show {iptype} route json",
{route: [{"protocol": proto}]},
)
_, result = topotest.run_and_expect(test_func, None, count=130, wait=1)
assertmsg = '"{}" convergence failure'.format(router)
assert result is None, assertmsg
# Wait for R1
expect_loopback_route("r1", "ip", "10.254.254.2/32", "bgp")
expect_loopback_route("r1", "ip", "10.254.254.3/32", "bgp")
expect_loopback_route("r1", "ipv6", "2001:db8:ffff::2/128", "bgp")
expect_loopback_route("r1", "ipv6", "2001:db8:ffff::3/128", "bgp")
# Wait for R2
expect_loopback_route("r2", "ip", "10.254.254.1/32", "bgp")
expect_loopback_route("r2", "ip", "10.254.254.3/32", "bgp")
expect_loopback_route("r2", "ipv6", "2001:db8:ffff::1/128", "bgp")
expect_loopback_route("r2", "ipv6", "2001:db8:ffff::3/128", "bgp")
# Wait for R3
expect_loopback_route("r3", "ip", "10.254.254.1/32", "bgp")
expect_loopback_route("r3", "ip", "10.254.254.2/32", "bgp")
expect_loopback_route("r3", "ipv6", "2001:db8:ffff::1/128", "bgp")
expect_loopback_route("r3", "ipv6", "2001:db8:ffff::2/128", "bgp")
def test_pim_convergence():
"Wait for PIM peers find each other."
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
def expect_pim_peer(router, iptype, interface, peer):
"Wait until peer is present."
logger.info(f"waiting peer {peer} in {router}")
expected = {interface: {peer: {"upTime": "*"}}}
test_func = partial(
topotest.router_json_cmp,
tgen.gears[router],
f"show {iptype} pim neighbor json",
expected,
)
_, result = topotest.run_and_expect(test_func, None, count=40, wait=2)
assertmsg = f'"{router}" convergence failure'
assert result is None, assertmsg
expect_pim_peer("r1", "ip", "r1-eth0", "192.168.1.2")
expect_pim_peer("r2", "ip", "r2-eth0", "192.168.1.1")
expect_pim_peer("r1", "ip", "r1-eth1", "192.168.2.2")
#
# IPv6 part
#
out = tgen.gears["r1"].vtysh_cmd("show interface r1-eth0 json", True)
r1_r2_link_address = out["r1-eth0"]["ipAddresses"][1]["address"].split('/')[0]
out = tgen.gears["r1"].vtysh_cmd("show interface r1-eth1 json", True)
r1_r3_link_address = out["r1-eth1"]["ipAddresses"][1]["address"].split('/')[0]
out = tgen.gears["r2"].vtysh_cmd("show interface r2-eth0 json", True)
r2_link_address = out["r2-eth0"]["ipAddresses"][1]["address"].split('/')[0]
out = tgen.gears["r3"].vtysh_cmd("show interface r3-eth0 json", True)
r3_link_address = out["r3-eth0"]["ipAddresses"][1]["address"].split('/')[0]
expect_pim_peer("r1", "ipv6", "r1-eth0", r2_link_address)
expect_pim_peer("r2", "ipv6", "r2-eth0", r1_r2_link_address)
expect_pim_peer("r1", "ipv6", "r1-eth1", r3_link_address)
def test_igmp_group_limit():
"Test IGMP group limits."
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
tgen.gears["r1"].vtysh_cmd("""
configure terminal
interface r1-eth2
ip igmp max-groups 4
""")
app_helper.run("h1", ["224.0.100.1", "h1-eth0"])
app_helper.run("h1", ["224.0.100.2", "h1-eth0"])
app_helper.run("h1", ["224.0.100.3", "h1-eth0"])
app_helper.run("h1", ["224.0.100.4", "h1-eth0"])
app_helper.run("h1", ["224.0.100.5", "h1-eth0"])
app_helper.run("h1", ["224.0.100.6", "h1-eth0"])
def expect_igmp_group_count():
igmp_groups = tgen.gears["r1"].vtysh_cmd("show ip igmp groups json", isjson=True)
try:
return len(igmp_groups["r1-eth2"]["groups"])
except KeyError:
return 0
topotest.run_and_expect(expect_igmp_group_count, 4, count=10, wait=2)
# Cleanup
app_helper.stop_host("h1")
tgen.gears["r1"].vtysh_cmd("""
configure terminal
interface r1-eth2
no ip igmp max-groups 4
exit
clear ip igmp interfaces
""")
def test_igmp_group_source_limit():
"Test IGMP source limits."
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
tgen.gears["r1"].vtysh_cmd("""
configure terminal
interface r1-eth2
ip igmp max-sources 4
exit
""")
app_helper.run("h1", ["--source=192.168.100.10", "232.0.101.10", "h1-eth0"])
app_helper.run("h1", ["--source=192.168.100.11", "232.0.101.10", "h1-eth0"])
app_helper.run("h1", ["--source=192.168.100.12", "232.0.101.10", "h1-eth0"])
app_helper.run("h1", ["--source=192.168.100.13", "232.0.101.10", "h1-eth0"])
app_helper.run("h1", ["--source=192.168.100.14", "232.0.101.10", "h1-eth0"])
app_helper.run("h1", ["--source=192.168.100.15", "232.0.101.10", "h1-eth0"])
app_helper.run("h1", ["--source=192.168.100.16", "232.0.101.10", "h1-eth0"])
def expect_igmp_group_source_count():
igmp_sources = tgen.gears["r1"].vtysh_cmd("show ip igmp sources json", isjson=True)
try:
return len(igmp_sources["r1-eth2"]["232.0.101.10"]["sources"])
except KeyError:
return 0
topotest.run_and_expect(expect_igmp_group_source_count, 4, count=10, wait=2)
# Cleanup
tgen.gears["r1"].vtysh_cmd("""
configure terminal
interface r1-eth2
no ip igmp max-sources 4
exit
clear ip igmp interfaces
""")
app_helper.stop_host("h1")
def test_mld_group_limit():
"Test MLD group limits."
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
tgen.gears["r1"].vtysh_cmd("""
configure terminal
interface r1-eth2
ipv6 mld max-groups 14
""")
app_helper.run("h1", ["FF05::100", "h1-eth0"])
app_helper.run("h1", ["FF05::101", "h1-eth0"])
app_helper.run("h1", ["FF05::102", "h1-eth0"])
app_helper.run("h1", ["FF05::103", "h1-eth0"])
app_helper.run("h1", ["FF05::104", "h1-eth0"])
app_helper.run("h1", ["FF05::105", "h1-eth0"])
app_helper.run("h1", ["FF05::106", "h1-eth0"])
app_helper.run("h1", ["FF05::107", "h1-eth0"])
app_helper.run("h1", ["FF05::108", "h1-eth0"])
app_helper.run("h1", ["FF05::109", "h1-eth0"])
app_helper.run("h1", ["FF05::110", "h1-eth0"])
app_helper.run("h1", ["FF05::111", "h1-eth0"])
app_helper.run("h1", ["FF05::112", "h1-eth0"])
app_helper.run("h1", ["FF05::113", "h1-eth0"])
app_helper.run("h1", ["FF05::114", "h1-eth0"])
app_helper.run("h1", ["FF05::115", "h1-eth0"])
def expect_mld_group_count():
mld_groups = tgen.gears["r1"].vtysh_cmd("show ipv6 mld groups json", isjson=True)
try:
return len(mld_groups["r1-eth2"]["groups"])
except KeyError:
return 0
topotest.run_and_expect(expect_mld_group_count, 14, count=10, wait=2)
# Cleanup
app_helper.stop_host("h1")
tgen.gears["r1"].vtysh_cmd("""
configure terminal
interface r1-eth2
no ipv6 mld max-groups 4
exit
clear ipv6 mld interfaces
""")
def test_mld_group_source_limit():
"Test MLD source limits."
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
tgen.gears["r1"].vtysh_cmd("""
configure terminal
interface r1-eth2
ipv6 mld max-sources 4
exit
""")
app_helper.run("h1", ["--source=2001:db8:1::100", "FF35::100", "h1-eth0"])
app_helper.run("h1", ["--source=2001:db8:1::101", "FF35::100", "h1-eth0"])
app_helper.run("h1", ["--source=2001:db8:1::102", "FF35::100", "h1-eth0"])
app_helper.run("h1", ["--source=2001:db8:1::103", "FF35::100", "h1-eth0"])
app_helper.run("h1", ["--source=2001:db8:1::104", "FF35::100", "h1-eth0"])
app_helper.run("h1", ["--source=2001:db8:1::105", "FF35::100", "h1-eth0"])
app_helper.run("h1", ["--source=2001:db8:1::106", "FF35::100", "h1-eth0"])
def expect_mld_source_group_count():
mld_sources = tgen.gears["r1"].vtysh_cmd("show ipv6 mld joins json", isjson=True)
try:
return len(mld_sources["default"]["r1-eth2"]["ff35::100"].keys())
except KeyError:
return 0
topotest.run_and_expect(expect_mld_source_group_count, 4, count=10, wait=2)
# Cleanup
tgen.gears["r1"].vtysh_cmd("""
configure terminal
interface r1-eth2
no ipv6 mld max-sources 4
exit
clear ipv6 mld interfaces
""")
app_helper.stop_host("h1")
def test_memory_leak():
"Run the memory leak test and report results."
tgen = get_topogen()
if not tgen.is_memleak_enabled():
pytest.skip("Memory leak test/report is disabled")
tgen.report_memory_leaks()
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))

View File

@ -154,6 +154,20 @@ module frr-gmp {
"Enable IGMP proxy on the interface."; "Enable IGMP proxy on the interface.";
} }
leaf max-groups {
type uint32;
default "4294967295";
description
"Limit number of tracked IGMP group memberships on this interface.";
}
leaf max-sources {
type uint32;
default "4294967295";
description
"Limit number of tracked IGMPv3 sources on this interface.";
}
list join-group { list join-group {
key "group-addr source-addr"; key "group-addr source-addr";
description description