Merge pull request #12550 from AbhishekNR/mld_join

pim6d: Implementing "ipv6 mld join"
This commit is contained in:
Donatas Abraitis 2023-04-17 11:01:21 +03:00 committed by GitHub
commit e7fd314f06
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 1396 additions and 159 deletions

View File

@ -598,6 +598,14 @@ static inline bool ipv6_mcast_ssm(const struct in6_addr *addr)
return (bits & 0xfff0ffff) == 0xff300000;
}
static inline bool ipv6_mcast_reserved(const struct in6_addr *addr)
{
uint32_t bits = ntohl(addr->s6_addr32[0]);
/* ffx2::/16 */
return (bits & 0xff0fffff) == 0xff020000;
}
static inline uint8_t ipv4_mcast_scope(const struct in_addr *addr)
{
uint32_t bits = ntohl(addr->s_addr);

View File

@ -539,11 +539,8 @@ DEFPY (interface_ipv6_mld_join,
"Source address\n")
{
char xpath[XPATH_MAXLEN];
struct ipaddr group_addr = {0};
(void)str2ipaddr(group_str, &group_addr);
if (!IN6_IS_ADDR_MULTICAST(&group_addr)) {
if (!IN6_IS_ADDR_MULTICAST(&group)) {
vty_out(vty, "Invalid Multicast Address\n");
return CMD_WARNING_CONFIG_FAILED;
}

View File

@ -28,6 +28,7 @@ typedef struct in_addr pim_addr;
#define PIM_MROUTE_DBG "mroute"
#define PIMREG "pimreg"
#define GM "IGMP"
#define IPPROTO_GM IPPROTO_IGMP
#define PIM_ADDR_FUNCNAME(name) ipv4_##name
@ -57,6 +58,7 @@ typedef struct in6_addr pim_addr;
#define PIM_MROUTE_DBG "mroute6"
#define PIMREG "pim6reg"
#define GM "MLD"
#define IPPROTO_GM IPPROTO_ICMPV6
#define PIM_ADDR_FUNCNAME(name) ipv6_##name

View File

@ -42,10 +42,10 @@
#if PIM_IPV == 4
static void pim_if_igmp_join_del_all(struct interface *ifp);
static int igmp_join_sock(const char *ifname, ifindex_t ifindex,
struct in_addr group_addr, struct in_addr source_addr,
struct pim_interface *pim_ifp);
#endif
static int gm_join_sock(const char *ifname, ifindex_t ifindex,
pim_addr group_addr, pim_addr source_addr,
struct pim_interface *pim_ifp);
void pim_if_init(struct pim_instance *pim)
{
@ -560,7 +560,7 @@ void pim_if_addr_add(struct connected *ifc)
/* Close socket and reopen with Source and Group
*/
close(ij->sock_fd);
join_fd = igmp_join_sock(
join_fd = gm_join_sock(
ifp->name, ifp->ifindex, ij->group_addr,
ij->source_addr, pim_ifp);
if (join_fd < 0) {
@ -573,7 +573,7 @@ void pim_if_addr_add(struct connected *ifc)
"<src?>", ij->source_addr,
source_str, sizeof(source_str));
zlog_warn(
"%s: igmp_join_sock() failure for IGMP group %s source %s on interface %s",
"%s: gm_join_sock() failure for IGMP group %s source %s on interface %s",
__func__, group_str, source_str,
ifp->name);
/* warning only */
@ -1214,15 +1214,13 @@ long pim_if_t_suppressed_msec(struct interface *ifp)
return t_suppressed_msec;
}
#if PIM_IPV == 4
static void igmp_join_free(struct gm_join *ij)
static void gm_join_free(struct gm_join *ij)
{
XFREE(MTYPE_PIM_IGMP_JOIN, ij);
}
static struct gm_join *igmp_join_find(struct list *join_list,
struct in_addr group_addr,
struct in_addr source_addr)
static struct gm_join *gm_join_find(struct list *join_list, pim_addr group_addr,
pim_addr source_addr)
{
struct listnode *node;
struct gm_join *ij;
@ -1230,38 +1228,33 @@ static struct gm_join *igmp_join_find(struct list *join_list,
assert(join_list);
for (ALL_LIST_ELEMENTS_RO(join_list, node, ij)) {
if ((group_addr.s_addr == ij->group_addr.s_addr)
&& (source_addr.s_addr == ij->source_addr.s_addr))
if ((!pim_addr_cmp(group_addr, ij->group_addr)) &&
(!pim_addr_cmp(source_addr, ij->source_addr)))
return ij;
}
return 0;
}
static int igmp_join_sock(const char *ifname, ifindex_t ifindex,
struct in_addr group_addr, struct in_addr source_addr,
struct pim_interface *pim_ifp)
static int gm_join_sock(const char *ifname, ifindex_t ifindex,
pim_addr group_addr, pim_addr source_addr,
struct pim_interface *pim_ifp)
{
int join_fd;
pim_ifp->igmp_ifstat_joins_sent++;
join_fd = pim_socket_raw(IPPROTO_IGMP);
join_fd = pim_socket_raw(IPPROTO_GM);
if (join_fd < 0) {
pim_ifp->igmp_ifstat_joins_failed++;
return -1;
}
if (pim_igmp_join_source(join_fd, ifindex, group_addr, source_addr)) {
char group_str[INET_ADDRSTRLEN];
char source_str[INET_ADDRSTRLEN];
pim_inet4_dump("<grp?>", group_addr, group_str,
sizeof(group_str));
pim_inet4_dump("<src?>", source_addr, source_str,
sizeof(source_str));
if (pim_gm_join_source(join_fd, ifindex, group_addr, source_addr)) {
zlog_warn(
"%s: setsockopt(fd=%d) failure for IGMP group %s source %s ifindex %d on interface %s: errno=%d: %s",
__func__, join_fd, group_str, source_str, ifindex,
"%s: setsockopt(fd=%d) failure for " GM
" group %pPAs source %pPAs ifindex %d on interface %s: errno=%d: %s",
__func__, join_fd, &group_addr, &source_addr, ifindex,
ifname, errno, safe_strerror(errno));
pim_ifp->igmp_ifstat_joins_failed++;
@ -1273,10 +1266,8 @@ static int igmp_join_sock(const char *ifname, ifindex_t ifindex,
return join_fd;
}
#if PIM_IPV == 4
static struct gm_join *igmp_join_new(struct interface *ifp,
struct in_addr group_addr,
struct in_addr source_addr)
static struct gm_join *gm_join_new(struct interface *ifp, pim_addr group_addr,
pim_addr source_addr)
{
struct pim_interface *pim_ifp;
struct gm_join *ij;
@ -1285,19 +1276,12 @@ static struct gm_join *igmp_join_new(struct interface *ifp,
pim_ifp = ifp->info;
assert(pim_ifp);
join_fd = igmp_join_sock(ifp->name, ifp->ifindex, group_addr,
source_addr, pim_ifp);
join_fd = gm_join_sock(ifp->name, ifp->ifindex, group_addr, source_addr,
pim_ifp);
if (join_fd < 0) {
char group_str[INET_ADDRSTRLEN];
char source_str[INET_ADDRSTRLEN];
pim_inet4_dump("<grp?>", group_addr, group_str,
sizeof(group_str));
pim_inet4_dump("<src?>", source_addr, source_str,
sizeof(source_str));
zlog_warn(
"%s: igmp_join_sock() failure for IGMP group %s source %s on interface %s",
__func__, group_str, source_str, ifp->name);
zlog_warn("%s: gm_join_sock() failure for " GM
" group %pPAs source %pPAs on interface %s",
__func__, &group_addr, &source_addr, ifp->name);
return 0;
}
@ -1312,11 +1296,9 @@ static struct gm_join *igmp_join_new(struct interface *ifp,
return ij;
}
#endif /* PIM_IPV == 4 */
#if PIM_IPV == 4
ferr_r pim_if_igmp_join_add(struct interface *ifp, struct in_addr group_addr,
struct in_addr source_addr)
ferr_r pim_if_gm_join_add(struct interface *ifp, pim_addr group_addr,
pim_addr source_addr)
{
struct pim_interface *pim_ifp;
struct gm_join *ij;
@ -1329,37 +1311,32 @@ ferr_r pim_if_igmp_join_add(struct interface *ifp, struct in_addr group_addr,
if (!pim_ifp->gm_join_list) {
pim_ifp->gm_join_list = list_new();
pim_ifp->gm_join_list->del = (void (*)(void *))igmp_join_free;
pim_ifp->gm_join_list->del = (void (*)(void *))gm_join_free;
}
ij = igmp_join_find(pim_ifp->gm_join_list, group_addr, source_addr);
ij = gm_join_find(pim_ifp->gm_join_list, group_addr, source_addr);
/* This interface has already been configured to join this IGMP group
/* This interface has already been configured to join this IGMP/MLD
* group
*/
if (ij) {
return ferr_ok();
}
(void)igmp_join_new(ifp, group_addr, source_addr);
(void)gm_join_new(ifp, group_addr, source_addr);
if (PIM_DEBUG_GM_EVENTS) {
char group_str[INET_ADDRSTRLEN];
char source_str[INET_ADDRSTRLEN];
pim_inet4_dump("<grp?>", group_addr, group_str,
sizeof(group_str));
pim_inet4_dump("<src?>", source_addr, source_str,
sizeof(source_str));
zlog_debug(
"%s: issued static igmp join for channel (S,G)=(%s,%s) on interface %s",
__func__, source_str, group_str, ifp->name);
"%s: issued static " GM
" join for channel (S,G)=(%pPA,%pPA) on interface %s",
__func__, &source_addr, &group_addr, ifp->name);
}
return ferr_ok();
}
#endif /* PIM_IPV == 4 */
int pim_if_igmp_join_del(struct interface *ifp, struct in_addr group_addr,
struct in_addr source_addr)
int pim_if_gm_join_del(struct interface *ifp, pim_addr group_addr,
pim_addr source_addr)
{
struct pim_interface *pim_ifp;
struct gm_join *ij;
@ -1372,40 +1349,29 @@ int pim_if_igmp_join_del(struct interface *ifp, struct in_addr group_addr,
}
if (!pim_ifp->gm_join_list) {
zlog_warn("%s: no IGMP join on interface %s", __func__,
zlog_warn("%s: no " GM " join on interface %s", __func__,
ifp->name);
return -2;
}
ij = igmp_join_find(pim_ifp->gm_join_list, group_addr, source_addr);
ij = gm_join_find(pim_ifp->gm_join_list, group_addr, source_addr);
if (!ij) {
char group_str[INET_ADDRSTRLEN];
char source_str[INET_ADDRSTRLEN];
pim_inet4_dump("<grp?>", group_addr, group_str,
sizeof(group_str));
pim_inet4_dump("<src?>", source_addr, source_str,
sizeof(source_str));
zlog_warn(
"%s: could not find IGMP group %s source %s on interface %s",
__func__, group_str, source_str, ifp->name);
zlog_warn("%s: could not find " GM
" group %pPAs source %pPAs on interface %s",
__func__, &group_addr, &source_addr, ifp->name);
return -3;
}
if (close(ij->sock_fd)) {
char group_str[INET_ADDRSTRLEN];
char source_str[INET_ADDRSTRLEN];
pim_inet4_dump("<grp?>", group_addr, group_str,
sizeof(group_str));
pim_inet4_dump("<src?>", source_addr, source_str,
sizeof(source_str));
zlog_warn(
"%s: failure closing sock_fd=%d for IGMP group %s source %s on interface %s: errno=%d: %s",
__func__, ij->sock_fd, group_str, source_str, ifp->name,
errno, safe_strerror(errno));
"%s: failure closing sock_fd=%d for " GM
" group %pPAs source %pPAs on interface %s: errno=%d: %s",
__func__, ij->sock_fd, &group_addr, &source_addr,
ifp->name, errno, safe_strerror(errno));
/* warning only */
}
listnode_delete(pim_ifp->gm_join_list, ij);
igmp_join_free(ij);
gm_join_free(ij);
if (listcount(pim_ifp->gm_join_list) < 1) {
list_delete(&pim_ifp->gm_join_list);
pim_ifp->gm_join_list = 0;
@ -1414,6 +1380,7 @@ int pim_if_igmp_join_del(struct interface *ifp, struct in_addr group_addr,
return 0;
}
#if PIM_IPV == 4
__attribute__((unused))
static void pim_if_igmp_join_del_all(struct interface *ifp)
{
@ -1433,21 +1400,9 @@ static void pim_if_igmp_join_del_all(struct interface *ifp)
return;
for (ALL_LIST_ELEMENTS(pim_ifp->gm_join_list, node, nextnode, ij))
pim_if_igmp_join_del(ifp, ij->group_addr, ij->source_addr);
pim_if_gm_join_del(ifp, ij->group_addr, ij->source_addr);
}
#else /* PIM_IPV != 4 */
ferr_r pim_if_igmp_join_add(struct interface *ifp, struct in_addr group_addr,
struct in_addr source_addr)
{
return ferr_ok();
}
int pim_if_igmp_join_del(struct interface *ifp, struct in_addr group_addr,
struct in_addr source_addr)
{
return 0;
}
#endif /* PIM_IPV != 4 */
#endif /* PIM_IPV == 4 */
/*
RFC 4601

View File

@ -217,10 +217,10 @@ int pim_if_t_override_msec(struct interface *ifp);
pim_addr pim_find_primary_addr(struct interface *ifp);
ferr_r pim_if_igmp_join_add(struct interface *ifp, struct in_addr group_addr,
struct in_addr source_addr);
int pim_if_igmp_join_del(struct interface *ifp, struct in_addr group_addr,
struct in_addr source_addr);
ferr_r pim_if_gm_join_add(struct interface *ifp, pim_addr group_addr,
pim_addr source_addr);
int pim_if_gm_join_del(struct interface *ifp, pim_addr group_addr,
pim_addr source_addr);
void pim_if_update_could_assert(struct interface *ifp);

View File

@ -7,6 +7,8 @@
#ifndef PIM_IGMP_JOIN_H
#define PIM_IGMP_JOIN_H
#include "pim_addr.h"
/* required headers #include'd by caller */
#ifndef SOL_IP
@ -26,35 +28,64 @@ struct group_source_req {
};
#endif
static inline int pim_igmp_join_source(int fd, ifindex_t ifindex,
struct in_addr group_addr,
struct in_addr source_addr)
#if PIM_IPV == 4
static inline int pim_gm_join_source(int fd, ifindex_t ifindex,
pim_addr group_addr, pim_addr source_addr)
{
struct group_source_req req;
struct sockaddr_in group;
struct sockaddr_in source;
struct sockaddr_in group = {};
struct sockaddr_in source = {};
memset(&req, 0, sizeof(req));
memset(&group, 0, sizeof(group));
group.sin_family = AF_INET;
group.sin_family = PIM_AF;
group.sin_addr = group_addr;
group.sin_port = htons(0);
memcpy(&req.gsr_group, &group, sizeof(struct sockaddr_in));
memcpy(&req.gsr_group, &group, sizeof(group));
memset(&source, 0, sizeof(source));
source.sin_family = AF_INET;
source.sin_family = PIM_AF;
source.sin_addr = source_addr;
source.sin_port = htons(0);
memcpy(&req.gsr_source, &source, sizeof(struct sockaddr_in));
memcpy(&req.gsr_source, &source, sizeof(source));
req.gsr_interface = ifindex;
if (source_addr.s_addr == INADDR_ANY)
if (pim_addr_is_any(source_addr))
return setsockopt(fd, SOL_IP, MCAST_JOIN_GROUP, &req,
sizeof(req));
else
return setsockopt(fd, SOL_IP, MCAST_JOIN_SOURCE_GROUP, &req,
sizeof(req));
}
#else /* PIM_IPV != 4*/
static inline int pim_gm_join_source(int fd, ifindex_t ifindex,
pim_addr group_addr, pim_addr source_addr)
{
struct group_source_req req;
struct sockaddr_in6 group = {};
struct sockaddr_in6 source = {};
memset(&req, 0, sizeof(req));
group.sin6_family = PIM_AF;
group.sin6_addr = group_addr;
group.sin6_port = htons(0);
memcpy(&req.gsr_group, &group, sizeof(group));
source.sin6_family = PIM_AF;
source.sin6_addr = source_addr;
source.sin6_port = htons(0);
memcpy(&req.gsr_source, &source, sizeof(source));
req.gsr_interface = ifindex;
if (pim_addr_is_any(source_addr))
return setsockopt(fd, SOL_IPV6, MCAST_JOIN_GROUP, &req,
sizeof(req));
else
return setsockopt(fd, SOL_IPV6, MCAST_JOIN_SOURCE_GROUP, &req,
sizeof(req));
}
#endif /* PIM_IPV != 4*/
#endif /* PIM_IGMP_JOIN_H */

View File

@ -2898,10 +2898,9 @@ int lib_interface_gmp_address_family_robustness_variable_modify(
int lib_interface_gmp_address_family_static_group_create(
struct nb_cb_create_args *args)
{
#if PIM_IPV == 4
struct interface *ifp;
struct ipaddr source_addr;
struct ipaddr group_addr;
pim_addr source_addr;
pim_addr group_addr;
int result;
const char *ifp_name;
const struct lyd_node *if_dnode;
@ -2917,33 +2916,40 @@ int lib_interface_gmp_address_family_static_group_create(
return NB_ERR_VALIDATION;
}
yang_dnode_get_ip(&group_addr, args->dnode, "./group-addr");
if (pim_is_group_224_0_0_0_24(group_addr.ip._v4_addr)) {
yang_dnode_get_pimaddr(&group_addr, args->dnode,
"./group-addr");
#if PIM_IPV == 4
if (pim_is_group_224_0_0_0_24(group_addr)) {
snprintf(
args->errmsg, args->errmsg_len,
"Groups within 224.0.0.0/24 are reserved and cannot be joined");
return NB_ERR_VALIDATION;
}
#else
if (ipv6_mcast_reserved(&group_addr)) {
snprintf(
args->errmsg, args->errmsg_len,
"Groups within ffx2::/16 are reserved and cannot be joined");
return NB_ERR_VALIDATION;
}
#endif
break;
case NB_EV_PREPARE:
case NB_EV_ABORT:
break;
case NB_EV_APPLY:
ifp = nb_running_get_entry(args->dnode, NULL, true);
yang_dnode_get_ip(&source_addr, args->dnode, "./source-addr");
yang_dnode_get_ip(&group_addr, args->dnode, "./group-addr");
result = pim_if_igmp_join_add(ifp, group_addr.ip._v4_addr,
source_addr.ip._v4_addr);
yang_dnode_get_pimaddr(&source_addr, args->dnode,
"./source-addr");
yang_dnode_get_pimaddr(&group_addr, args->dnode,
"./group-addr");
result = pim_if_gm_join_add(ifp, group_addr, source_addr);
if (result) {
snprintf(args->errmsg, args->errmsg_len,
"Failure joining IGMP group");
"Failure joining " GM " group");
return NB_ERR_INCONSISTENCY;
}
}
#else
/* TBD Depends on MLD data structure changes */
#endif /* PIM_IPV == 4 */
return NB_OK;
}
@ -2951,8 +2957,8 @@ int lib_interface_gmp_address_family_static_group_destroy(
struct nb_cb_destroy_args *args)
{
struct interface *ifp;
struct ipaddr source_addr;
struct ipaddr group_addr;
pim_addr source_addr;
pim_addr group_addr;
int result;
switch (args->event) {
@ -2962,22 +2968,17 @@ int lib_interface_gmp_address_family_static_group_destroy(
break;
case NB_EV_APPLY:
ifp = nb_running_get_entry(args->dnode, NULL, true);
yang_dnode_get_ip(&source_addr, args->dnode, "./source-addr");
yang_dnode_get_ip(&group_addr, args->dnode, "./group-addr");
result = pim_if_igmp_join_del(ifp, group_addr.ip._v4_addr,
source_addr.ip._v4_addr);
yang_dnode_get_pimaddr(&source_addr, args->dnode,
"./source-addr");
yang_dnode_get_pimaddr(&group_addr, args->dnode,
"./group-addr");
result = pim_if_gm_join_del(ifp, group_addr, source_addr);
if (result) {
char src_str[INET_ADDRSTRLEN];
char grp_str[INET_ADDRSTRLEN];
ipaddr2str(&source_addr, src_str, sizeof(src_str));
ipaddr2str(&group_addr, grp_str, sizeof(grp_str));
snprintf(args->errmsg, args->errmsg_len,
"%% Failure leaving IGMP group %s %s on interface %s: %d",
src_str, grp_str, ifp->name, result);
"%% Failure leaving " GM
" group %pPAs %pPAs on interface %s: %d",
&source_addr, &group_addr, ifp->name, result);
return NB_ERR_INCONSISTENCY;
}

View File

@ -335,18 +335,12 @@ static int gm_config_write(struct vty *vty, int writes,
struct listnode *node;
struct gm_join *ij;
for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_join_list, node, ij)) {
char group_str[INET_ADDRSTRLEN];
char source_str[INET_ADDRSTRLEN];
pim_inet4_dump("<grp?>", ij->group_addr, group_str,
sizeof(group_str));
if (ij->source_addr.s_addr == INADDR_ANY) {
vty_out(vty, " ip igmp join %s\n", group_str);
} else {
inet_ntop(AF_INET, &ij->source_addr, source_str,
sizeof(source_str));
vty_out(vty, " ip igmp join %s %s\n", group_str,
source_str);
}
if (pim_addr_is_any(ij->source_addr))
vty_out(vty, " ip igmp join %pPAs\n",
&ij->group_addr);
else
vty_out(vty, " ip igmp join %pPAs %pPAs\n",
&ij->group_addr, &ij->source_addr);
++writes;
}
}
@ -388,6 +382,21 @@ static int gm_config_write(struct vty *vty, int writes,
vty_out(vty, " ipv6 mld last-member-query-interval %d\n",
pim_ifp->gm_specific_query_max_response_time_dsec);
/* IF ipv6 mld join */
if (pim_ifp->gm_join_list) {
struct listnode *node;
struct gm_join *ij;
for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_join_list, node, ij)) {
if (pim_addr_is_any(ij->source_addr))
vty_out(vty, " ipv6 mld join %pPAs\n",
&ij->group_addr);
else
vty_out(vty, " ipv6 mld join %pPAs %pPAs\n",
&ij->group_addr, &ij->source_addr);
++writes;
}
}
return writes;
}
#endif

View File

@ -169,6 +169,7 @@ pimd_pim6d_CFLAGS = $(AM_CFLAGS) -DPIM_IPV=6
pimd_pim6d_LDADD = lib/libfrr.la $(LIBCAP)
endif
pimd_test_igmpv3_join_CFLAGS = $(AM_CFLAGS) -DPIM_IPV=4
pimd_test_igmpv3_join_LDADD = lib/libfrr.la
pimd_test_igmpv3_join_SOURCES = pimd/test_igmpv3_join.c

View File

@ -54,8 +54,8 @@ static int iface_solve_index(const char *ifname)
int main(int argc, const char *argv[])
{
struct in_addr group_addr;
struct in_addr source_addr;
pim_addr group_addr;
pim_addr source_addr;
const char *ifname;
const char *group;
const char *source;
@ -106,7 +106,7 @@ int main(int argc, const char *argv[])
exit(1);
}
result = pim_igmp_join_source(fd, ifindex, group_addr, source_addr);
result = pim_gm_join_source(fd, ifindex, group_addr, source_addr);
if (result) {
fprintf(stderr,
"%s: setsockopt(fd=%d) failure for IGMP group %s source %s ifindex %d on interface %s: errno=%d: %s\n",

View File

@ -5120,6 +5120,75 @@ def verify_pim6_config(tgen, input_dict, expected=True):
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return True
@retry(retry_timeout=62)
def verify_local_mld_groups(tgen, dut, interface, group_addresses):
"""
Verify local MLD groups are received from an intended interface
by running "show ipv6 mld join json" command
Parameters
----------
* `tgen`: topogen object
* `dut`: device under test
* `interface`: interface, from which IGMP groups are configured
* `group_addresses`: MLD group address
Usage
-----
dut = "r1"
interface = "r1-r0-eth0"
group_address = "ffaa::1"
result = verify_local_mld_groups(tgen, dut, interface, group_address)
Returns
-------
errormsg(str) or True
"""
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
if dut not in tgen.routers():
return False
rnode = tgen.routers()[dut]
logger.info("[DUT: %s]: Verifying local MLD groups received:", dut)
show_ipv6_local_mld_json = run_frr_cmd(
rnode, "show ipv6 mld join json", isjson=True
)
if type(group_addresses) is not list:
group_addresses = [group_addresses]
if interface not in show_ipv6_local_mld_json["default"]:
errormsg = (
"[DUT %s]: Verifying local MLD group received"
" from interface %s [FAILED]!! " % (dut, interface)
)
return errormsg
for grp_addr in group_addresses:
found = False
if grp_addr in show_ipv6_local_mld_json["default"][interface]:
found = True
break
if not found:
errormsg = (
"[DUT %s]: Verifying local MLD group received"
" from interface %s [FAILED]!! "
" Expected: %s " % (dut, interface, grp_addr)
)
return errormsg
logger.info(
"[DUT %s]: Verifying local MLD group %s received "
"from interface %s [PASSED]!! ",
dut,
grp_addr,
interface,
)
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return True
# def cleanup(self):
# super(McastTesterHelper, self).cleanup()

View File

@ -0,0 +1,249 @@
{
"address_types": ["ipv6"],
"ipv6base": "fd00::",
"ipv6mask": 64,
"link_ip_start": {
"ipv6": "fd00::",
"v6mask": 64
},
"lo_prefix": {
"ipv6": "2001:db8:f::",
"v6mask": 128
},
"routers": {
"r1": {
"links": {
"r4": {"ipv6": "auto", "pim6": "enable"},
"r2": {"ipv6": "auto", "pim6": "enable"},
"r3": {"ipv6": "auto", "pim6": "enable"},
"i1": {"ipv6": "auto", "pim6": "enable"},
"i2": {"ipv6": "auto", "pim6": "enable"}
},
"bgp": {
"local_as": "100",
"router_id": "192.168.1.1",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r4": {
"dest_link": {
"r1": {}
}
},
"r2": {
"dest_link": {
"r1": {}
}
},
"r3": {
"dest_link": {
"r1": {}
}
}
}
}
},
"ipv6": {
"unicast": {
"redistribute": [
{"redist_type": "static"},
{"redist_type": "connected"}
],
"neighbor": {
"r4": {
"dest_link": {
"r1": {}
}
},
"r2": {
"dest_link": {
"r1": {}
}
},
"r3": {
"dest_link": {
"r1": {}
}
}
}
}
}
}
}
},
"r2": {
"links": {
"lo": {"ipv6": "auto", "type": "loopback", "pim6": "enable"},
"r1": {"ipv6": "auto", "pim6": "enable"},
"r4": {"ipv6": "auto", "pim6": "enable"}
},
"bgp": {
"local_as": "100",
"router_id": "192.168.1.2",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r2": {}
}
},
"r4": {
"dest_link": {
"r2": {}
}
}
}
}
},
"ipv6": {
"unicast": {
"redistribute": [
{"redist_type": "static"},
{"redist_type": "connected"}
],
"neighbor": {
"r1": {
"dest_link": {
"r2": {}
}
},
"r4": {
"dest_link": {
"r2": {}
}
}
}
}
}
}
}
},
"r3": {
"links": {
"r1": {"ipv6": "auto", "pim6": "enable"},
"r4": {"ipv6": "auto", "pim6": "enable"}
},
"bgp": {
"local_as": "100",
"router_id": "192.168.1.3",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r3": {}
}
},
"r4": {
"dest_link": {
"r3": {}
}
}
}
}
},
"ipv6": {
"unicast": {
"redistribute": [
{"redist_type": "static"},
{"redist_type": "connected"}
],
"neighbor": {
"r1": {
"dest_link": {
"r3": {}
}
},
"r4": {
"dest_link": {
"r3": {}
}
}
}
}
}
}
}
},
"r4": {
"links": {
"r2": {"ipv6": "auto", "pim6": "enable"},
"r3": {"ipv6": "auto", "pim6": "enable"},
"i4": {"ipv6": "auto", "pim6": "enable"},
"r1": {"ipv6": "auto", "pim6": "enable"}
},
"bgp": {
"local_as": "100",
"router_id": "192.168.1.4",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r4": {}
}
},
"r2": {
"dest_link": {
"r4": {}
}
},
"r3": {
"dest_link": {
"r4": {}
}
}
}
}
},
"ipv6": {
"unicast": {
"redistribute": [
{"redist_type": "static"},
{"redist_type": "connected"}
],
"neighbor": {
"r1": {
"dest_link": {
"r4": {}
}
},
"r2": {
"dest_link": {
"r4": {}
}
},
"r3": {
"dest_link": {
"r4": {}
}
}
}
}
}
}
}
},
"i1": {
"links": {
"r1": {"ipv6": "auto"}
}
},
"i2": {
"links": {
"r1": {"ipv6": "auto"}
}
},
"i4": {
"links": {
"r4": {"ipv6": "auto"}
}
}
}
}

View File

@ -0,0 +1,915 @@
#!/usr/bin/env python
# SPDX-License-Identifier: ISC
#
# Copyright (c) 2023 by VMware, Inc. ("VMware")
#
"""
Following tests are covered to test_multicast_pim_mld_local_tier_1:
Test steps
- Create topology (setup module)
- Bring up topology
Following tests are covered:
1. Verify static MLD group populated when static "ip mld join <grp>" in configured
2. Verify mroute and upstream populated with correct OIL/IIF with static imld join
3. Verify local MLD join not allowed for non multicast group
4. Verify static MLD group removed from DUT while removing "ip mld join" CLI
5. Verify static MLD groups after removing and adding MLD config
"""
import os
import sys
import time
import pytest
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CWD, "../"))
sys.path.append(os.path.join(CWD, "../lib/"))
# Required to instantiate the topology builder class.
# pylint: disable=C0413
# Import topogen and topotest helpers
from lib.topogen import Topogen, get_topogen
from re import search as re_search
from re import findall as findall
from lib.common_config import (
start_topology,
write_test_header,
write_test_footer,
step,
kill_router_daemons,
start_router_daemons,
reset_config_on_routers,
do_countdown,
apply_raw_config,
socat_send_pim6_traffic,
)
from lib.pim import (
create_pim_config,
verify_mroutes,
verify_upstream_iif,
verify_mld_groups,
clear_pim6_mroute,
McastTesterHelper,
verify_pim_neighbors,
create_mld_config,
verify_mld_groups,
verify_local_mld_groups,
verify_pim_rp_info,
)
from lib.topolog import logger
from lib.topojson import build_config_from_json
r1_r2_links = []
r1_r3_links = []
r2_r1_links = []
r2_r4_links = []
r3_r1_links = []
r3_r4_links = []
r4_r2_links = []
r4_r3_links = []
pytestmark = [pytest.mark.pim6d, pytest.mark.staticd]
TOPOLOGY = """
+-------------------+
| |
i1--- R1-------R2----------R4---i2
| |
+-------R3----------+
Description:
i1, i2, i3. i4, i5, i6, i7, i8 - FRR running iperf to send MLD
join and traffic
R1 - DUT (LHR)
R2 - RP
R3 - Transit
R4 - (FHR)
"""
# Global variables
GROUP_RANGE = "ffaa::/16"
RP_RANGE = "ff00::/8"
GROUP_RANGE_1 = [
"ffaa::1/128",
"ffaa::2/128",
"ffaa::3/128",
"ffaa::4/128",
"ffaa::5/128",
]
MLD_JOIN_RANGE_1 = ["ffaa::1", "ffaa::2", "ffaa::3", "ffaa::4", "ffaa::5"]
MLD_JOIN_RANGE_2 = [
"ff02::1:ff00:0",
"ff02::d",
"fe80::250:56ff:feb7:d8d5",
"2001::4",
"2002::5",
]
def setup_module(mod):
"""
Sets up the pytest environment
* `mod`: module name
"""
testsuite_run_time = time.asctime(time.localtime(time.time()))
logger.info("Testsuite start time: {}".format(testsuite_run_time))
logger.info("=" * 40)
logger.info("Master Topology: \n {}".format(TOPOLOGY))
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
json_file = "{}/multicast_mld_local_join.json".format(CWD)
tgen = Topogen(json_file, mod.__name__)
global topo
topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
# to start daemons and then start routers
start_topology(tgen)
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
# Creating configuration from JSON
build_config_from_json(tgen, topo)
# Verify PIM neighbors
result = verify_pim_neighbors(tgen, topo)
assert result is True, " Verify PIM neighbor: Failed Error: {}".format(result)
logger.info("Running setup_module() done")
def teardown_module():
"""Teardown the pytest environment"""
logger.info("Running teardown_module to delete topology")
tgen = get_topogen()
# Stop toplogy and Remove tmp files
tgen.stop_topology()
logger.info(
"Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
)
logger.info("=" * 40)
#####################################################
#
# Testcases
#
#####################################################
def test_mld_local_joins_p0(request):
"""
Verify static MLD group populated when static
"ipv6 mld join <grp>" in configured
"""
tgen = get_topogen()
tc_name = request.node.name
write_test_header(tc_name)
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
reset_config_on_routers(tgen)
step("configure BGP on R1, R2, R3, R4 and enable redistribute static/connected")
step("Enable the MLD on R11 interfac of R1 and configure local mld groups")
intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
intf_r1_i2 = topo["routers"]["r1"]["links"]["i2"]["interface"]
input_dict = {
"r1": {
"mld": {
"interfaces": {
intf_r1_i1: {"mld": {"version": "1", "join": MLD_JOIN_RANGE_1}},
intf_r1_i2: {"mld": {"version": "1", "join": MLD_JOIN_RANGE_1}},
}
}
}
}
result = create_mld_config(tgen, topo, input_dict)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Configure static RP for (ffaa::1-5) as R2")
input_dict = {
"r2": {
"pim6": {
"rp": [
{
"rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv6"].split(
"/"
)[0],
"group_addr_range": GROUP_RANGE,
}
]
}
}
}
result = create_pim_config(tgen, topo, input_dict)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step("verify static mld join using show ipv6 mld join")
dut = "r1"
interfaces = [intf_r1_i1, intf_r1_i2]
for interface in interfaces:
result = verify_local_mld_groups(tgen, dut, interface, MLD_JOIN_RANGE_1)
assert result is True, "Testcase {} :Failed \n Error: {}".format(
tc_name, result
)
step("verify mld groups using show ipv6 mld groups")
interfaces = [intf_r1_i1, intf_r1_i2]
for interface in interfaces:
result = verify_mld_groups(tgen, dut, interface, MLD_JOIN_RANGE_1)
assert result is True, "Testcase {} :Failed \n Error: {}".format(
tc_name, result
)
write_test_footer(tc_name)
def test_mroute_with_mld_local_joins_p0(request):
"""
Verify mroute and upstream populated with correct OIL/IIF with
static mld join
"""
tgen = get_topogen()
tc_name = request.node.name
write_test_header(tc_name)
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
reset_config_on_routers(tgen)
step("Enable the PIM on all the interfaces of R1, R2, R3, R4")
step("configure BGP on R1, R2, R3, R4 and enable redistribute static/connected")
step("Enable the MLD on R11 interfac of R1 and configure local mld groups")
intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
intf_r1_i2 = topo["routers"]["r1"]["links"]["i2"]["interface"]
input_dict = {
"r1": {
"mld": {
"interfaces": {
intf_r1_i1: {"mld": {"version": "1", "join": MLD_JOIN_RANGE_1}},
intf_r1_i2: {"mld": {"version": "1", "join": MLD_JOIN_RANGE_1}},
}
}
}
}
result = create_mld_config(tgen, topo, input_dict)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Configure static RP for (ffaa::1-5) as R2")
input_dict = {
"r2": {
"pim6": {
"rp": [
{
"rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv6"].split(
"/"
)[0],
"group_addr_range": GROUP_RANGE,
}
]
}
}
}
result = create_pim_config(tgen, topo, input_dict)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step("verify static mld join using show ipv6 mld join")
dut = "r1"
interfaces = [intf_r1_i1, intf_r1_i2]
for interface in interfaces:
result = verify_local_mld_groups(tgen, dut, interface, MLD_JOIN_RANGE_1)
assert result is True, "Testcase {} :Failed \n Error: {}".format(
tc_name, result
)
step("verify mld groups using show ipv6 mld groups")
interfaces = [intf_r1_i1, intf_r1_i2]
for interface in interfaces:
result = verify_mld_groups(tgen, dut, interface, MLD_JOIN_RANGE_1)
assert result is True, "Testcase {} :Failed \n Error: {}".format(
tc_name, result
)
step("verify RP-info populated in DUT")
dut = "r1"
rp_address = topo["routers"]["r2"]["links"]["lo"]["ipv6"].split("/")[0]
SOURCE = "Static"
oif = topo["routers"]["r1"]["links"]["r2"]["interface"]
result = verify_pim_rp_info(tgen, topo, dut, GROUP_RANGE_1, oif, rp_address, SOURCE)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("Send traffic from R4 to all the groups ( ffaa::1 to ffaa::5)")
intf_ip = topo["routers"]["i4"]["links"]["r4"]["ipv6"].split("/")[0]
intf = topo["routers"]["i4"]["links"]["r4"]["interface"]
result = socat_send_pim6_traffic(tgen, "i4", "UDP6-SEND", MLD_JOIN_RANGE_1, intf)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step(
"'show ipv6 mroute' showing correct RPF and OIF interface for (*,G)"
" and (S,G) entries on all the nodes"
)
source_i6 = topo["routers"]["i4"]["links"]["r4"]["ipv6"].split("/")[0]
intf_r1_r2 = topo["routers"]["r1"]["links"]["r2"]["interface"]
input_dict_starg = [
{
"dut": "r1",
"src_address": "*",
"iif": intf_r1_r2,
"oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
},
{
"dut": "r1",
"src_address": "*",
"iif": intf_r1_r2,
"oil": topo["routers"]["r1"]["links"]["i2"]["interface"],
},
]
input_dict_sg = [
{
"dut": "r1",
"src_address": source_i6,
"iif": topo["routers"]["r1"]["links"]["r4"]["interface"],
"oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
},
{
"dut": "r1",
"src_address": source_i6,
"iif": topo["routers"]["r1"]["links"]["r4"]["interface"],
"oil": topo["routers"]["r1"]["links"]["i2"]["interface"],
},
]
step("Verify mroutes and iff upstream for local mld groups")
for input_dict in [input_dict_starg, input_dict_sg]:
for data in input_dict:
result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
MLD_JOIN_RANGE_1,
data["iif"],
data["oil"],
)
assert result is True, "Testcase {} : Failed Error: {}".format(
tc_name, result
)
result = verify_upstream_iif(
tgen, data["dut"], data["iif"], data["src_address"], MLD_JOIN_RANGE_1
)
assert result is True, "Testcase {} : Failed Error: {}".format(
tc_name, result
)
step("Verify mroutes not created with local interface ip ")
input_dict_local_sg = [
{
"dut": "r1",
"src_address": intf_r1_i1,
"iif": topo["routers"]["r1"]["links"]["r4"]["interface"],
"oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
},
{
"dut": "r1",
"src_address": intf_r1_i2,
"iif": topo["routers"]["r1"]["links"]["r4"]["interface"],
"oil": topo["routers"]["r1"]["links"]["i2"]["interface"],
},
]
for data in input_dict_local_sg:
result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
MLD_JOIN_RANGE_1,
data["iif"],
data["oil"],
expected=False,
)
assert result is not True, (
"Testcase {} : Failed Error: {}"
"sg created with local interface ip".format(tc_name, result)
)
result = verify_upstream_iif(
tgen,
data["dut"],
data["iif"],
data["src_address"],
MLD_JOIN_RANGE_1,
expected=False,
)
assert result is not True, (
"Testcase {} : Failed Error: {}"
"upstream created with local interface ip".format(tc_name, result)
)
write_test_footer(tc_name)
def test_remove_add_mld_local_joins_p1(request):
"""
Verify static MLD group removed from DUT while
removing "ip mld join" CLI
"""
tgen = get_topogen()
tc_name = request.node.name
write_test_header(tc_name)
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
reset_config_on_routers(tgen)
step("Enable the PIM on all the interfaces of R1, R2, R3, R4")
step("configure BGP on R1, R2, R3, R4 and enable redistribute static/connected")
step("Enable the MLD on R11 interfac of R1 and configure local mld groups")
intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
input_dict = {
"r1": {
"mld": {
"interfaces": {
intf_r1_i1: {"mld": {"version": "1", "join": MLD_JOIN_RANGE_1}}
}
}
}
}
result = create_mld_config(tgen, topo, input_dict)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Configure static RP for (ffaa::1-5) as R2")
input_dict = {
"r2": {
"pim6": {
"rp": [
{
"rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv6"].split(
"/"
)[0],
"group_addr_range": GROUP_RANGE,
}
]
}
}
}
result = create_pim_config(tgen, topo, input_dict)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step("verify static mld join using show ipv6 mld join")
dut = "r1"
interface = intf_r1_i1
result = verify_local_mld_groups(tgen, dut, interface, MLD_JOIN_RANGE_1)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("verify mld groups using show ipv6 mld groups")
interface = intf_r1_i1
result = verify_mld_groups(tgen, dut, interface, MLD_JOIN_RANGE_1)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("verify RP-info populated in DUT")
dut = "r1"
rp_address = topo["routers"]["r2"]["links"]["lo"]["ipv6"].split("/")[0]
SOURCE = "Static"
oif = topo["routers"]["r1"]["links"]["r2"]["interface"]
result = verify_pim_rp_info(tgen, topo, dut, GROUP_RANGE_1, oif, rp_address, SOURCE)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("Send traffic from R4 to all the groups ( ffaa::1 to ffaa::5)")
intf_ip = topo["routers"]["i4"]["links"]["r4"]["ipv6"].split("/")[0]
intf = topo["routers"]["i4"]["links"]["r4"]["interface"]
result = socat_send_pim6_traffic(tgen, "i4", "UDP6-SEND", MLD_JOIN_RANGE_1, intf)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step(
"'show ipv6 mroute' showing correct RPF and OIF interface for (*,G)"
" and (S,G) entries on all the nodes"
)
source_i6 = topo["routers"]["i4"]["links"]["r4"]["ipv6"].split("/")[0]
intf_r1_r2 = topo["routers"]["r1"]["links"]["r2"]["interface"]
input_dict_starg = [
{
"dut": "r1",
"src_address": "*",
"iif": intf_r1_r2,
"oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
}
]
input_dict_sg = [
{
"dut": "r1",
"src_address": source_i6,
"iif": topo["routers"]["r1"]["links"]["r4"]["interface"],
"oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
}
]
step("Verify mroutes and iff upstream for local mld groups")
for input_dict in [input_dict_starg, input_dict_sg]:
for data in input_dict:
result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
MLD_JOIN_RANGE_1,
data["iif"],
data["oil"],
)
assert result is True, "Testcase {} : Failed Error: {}".format(
tc_name, result
)
result = verify_upstream_iif(
tgen, data["dut"], data["iif"], data["src_address"], MLD_JOIN_RANGE_1
)
assert result is True, "Testcase {} : Failed Error: {}".format(
tc_name, result
)
step("Remove MLD join from DUT")
input_dict = {
"r1": {
"mld": {
"interfaces": {
intf_r1_i1: {
"mld": {
"join": MLD_JOIN_RANGE_1,
"delete_attr": True,
}
}
}
}
}
}
result = create_mld_config(tgen, topo, input_dict)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("verify static mld join removed using show ipv6 mld join")
dut = "r1"
interface = intf_r1_i1
result = verify_local_mld_groups(
tgen, dut, interface, MLD_JOIN_RANGE_1, expected=False
)
assert (
result is not True
), "Testcase {} :Failed \n Error: {}" "MLD join still present".format(
tc_name, result
)
step("verify mld groups removed using show ipv6 mld groups")
interface = intf_r1_i1
result = verify_mld_groups(tgen, dut, interface, MLD_JOIN_RANGE_1, expected=False)
assert (
result is not True
), "Testcase {} :Failed \n Error: {}" "MLD groups still present".format(
tc_name, result
)
step("Verify mroutes and iff upstream for local mld groups")
for input_dict in [input_dict_starg, input_dict_sg]:
for data in input_dict:
result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
MLD_JOIN_RANGE_1,
data["iif"],
data["oil"],
expected=False,
)
assert (
result is not True
), "Testcase {} : Failed Error: {}" "mroutes still present".format(
tc_name, result
)
result = verify_upstream_iif(
tgen,
data["dut"],
data["iif"],
data["src_address"],
MLD_JOIN_RANGE_1,
expected=False,
)
assert (
result is not True
), "Testcase {} : Failed Error: {}" "mroutes still present".format(
tc_name, result
)
step("Add MLD join on DUT again")
input_dict = {
"r1": {
"mld": {
"interfaces": {
intf_r1_i1: {
"mld": {
"join": MLD_JOIN_RANGE_1,
}
}
}
}
}
}
result = create_mld_config(tgen, topo, input_dict)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("verify static mld join using show ipv6 mld join")
dut = "r1"
interface = intf_r1_i1
result = verify_local_mld_groups(tgen, dut, interface, MLD_JOIN_RANGE_1)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("verify mld groups using show ipv6 mld groups")
interface = intf_r1_i1
result = verify_mld_groups(tgen, dut, interface, MLD_JOIN_RANGE_1)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("Verify mroutes and iff upstream for local mld groups")
for input_dict in [input_dict_starg, input_dict_sg]:
for data in input_dict:
result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
MLD_JOIN_RANGE_1,
data["iif"],
data["oil"],
)
assert result is True, "Testcase {} : Failed Error: {}".format(
tc_name, result
)
result = verify_upstream_iif(
tgen, data["dut"], data["iif"], data["src_address"], MLD_JOIN_RANGE_1
)
assert result is True, "Testcase {} : Failed Error: {}".format(
tc_name, result
)
write_test_footer(tc_name)
def test_remove_add_mld_config_with_local_joins_p1(request):
"""
Verify static MLD groups after removing
and adding MLD config
"""
tgen = get_topogen()
tc_name = request.node.name
write_test_header(tc_name)
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
reset_config_on_routers(tgen)
step("Enable the PIM on all the interfaces of R1, R2, R3, R4")
step("configure BGP on R1, R2, R3, R4 and enable redistribute static/connected")
step("Enable the MLD on R11 interfac of R1 and configure local mld groups")
intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
input_dict = {
"r1": {
"mld": {
"interfaces": {
intf_r1_i1: {"mld": {"version": "1", "join": MLD_JOIN_RANGE_1}}
}
}
}
}
result = create_mld_config(tgen, topo, input_dict)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Configure static RP for (ffaa::1-5) as R2")
input_dict = {
"r2": {
"pim6": {
"rp": [
{
"rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv6"].split(
"/"
)[0],
"group_addr_range": GROUP_RANGE,
}
]
}
}
}
result = create_pim_config(tgen, topo, input_dict)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step("verify static mld join using show ipv6 mld join")
dut = "r1"
interface = intf_r1_i1
result = verify_local_mld_groups(tgen, dut, interface, MLD_JOIN_RANGE_1)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("verify mld groups using show ipv6 mld groups")
interface = intf_r1_i1
result = verify_mld_groups(tgen, dut, interface, MLD_JOIN_RANGE_1)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("Send traffic from R4 to all the groups ( ffaa::1 to ffaa::5)")
intf_ip = topo["routers"]["i4"]["links"]["r4"]["ipv6"].split("/")[0]
intf = topo["routers"]["i4"]["links"]["r4"]["interface"]
result = socat_send_pim6_traffic(tgen, "i4", "UDP6-SEND", MLD_JOIN_RANGE_1, intf)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step(
"'show ipv6 mroute' showing correct RPF and OIF interface for (*,G)"
" and (S,G) entries on all the nodes"
)
source_i6 = topo["routers"]["i4"]["links"]["r4"]["ipv6"].split("/")[0]
intf_r1_r2 = topo["routers"]["r1"]["links"]["r2"]["interface"]
input_dict_starg = [
{
"dut": "r1",
"src_address": "*",
"iif": intf_r1_r2,
"oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
}
]
input_dict_sg = [
{
"dut": "r1",
"src_address": source_i6,
"iif": topo["routers"]["r1"]["links"]["r4"]["interface"],
"oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
}
]
step("Verify mroutes and iff upstream for local mld groups")
for input_dict in [input_dict_starg, input_dict_sg]:
for data in input_dict:
result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
MLD_JOIN_RANGE_1,
data["iif"],
data["oil"],
)
assert result is True, "Testcase {} : Failed Error: {}".format(
tc_name, result
)
result = verify_upstream_iif(
tgen, data["dut"], data["iif"], data["src_address"], MLD_JOIN_RANGE_1
)
assert result is True, "Testcase {} : Failed Error: {}".format(
tc_name, result
)
step("Remove mld and mld version 2 from DUT interface")
input_dict = {
"r1": {
"mld": {
"interfaces": {intf_r1_i1: {"mld": {"version": "1", "delete": True}}}
}
}
}
result = create_mld_config(tgen, topo, input_dict)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("verify static mld join using show ipv6 mld join")
dut = "r1"
interface = intf_r1_i1
result = verify_local_mld_groups(
tgen, dut, interface, MLD_JOIN_RANGE_1, expected=False
)
assert result is not True, "Testcase {} :Failed \n Error: {}".format(
tc_name, result
)
step("verify mld groups using show ipv6 mld groups")
interface = intf_r1_i1
result = verify_mld_groups(tgen, dut, interface, MLD_JOIN_RANGE_1, expected=False)
assert (
result is not True
), "Testcase {} :Failed \n Error: {}" "MLD grsp still present".format(
tc_name, result
)
step("Verify mroutes and iff upstream for local mld groups")
for input_dict in [input_dict_starg, input_dict_sg]:
for data in input_dict:
result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
MLD_JOIN_RANGE_1,
data["iif"],
data["oil"],
expected=False,
)
assert (
result is not True
), "Testcase {} : Failed Error: {}" "mroutes still present".format(
tc_name, result
)
step("Add mld and mld version 2 from DUT interface")
input_dict = {
"r1": {
"mld": {
"interfaces": {
intf_r1_i1: {"mld": {"version": "1", "join": MLD_JOIN_RANGE_1}}
}
}
}
}
result = create_mld_config(tgen, topo, input_dict)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("verify static mld join using show ipv6 mld join")
dut = "r1"
interface = intf_r1_i1
result = verify_local_mld_groups(tgen, dut, interface, MLD_JOIN_RANGE_1)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("verify mld groups using show ipv6 mld groups")
interface = intf_r1_i1
result = verify_mld_groups(tgen, dut, interface, MLD_JOIN_RANGE_1)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("Verify mroutes and iff upstream for local mld groups")
for input_dict in [input_dict_starg, input_dict_sg]:
for data in input_dict:
result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
MLD_JOIN_RANGE_1,
data["iif"],
data["oil"],
)
assert result is True, "Testcase {} : Failed Error: {}".format(
tc_name, result
)
result = verify_upstream_iif(
tgen, data["dut"], data["iif"], data["src_address"], MLD_JOIN_RANGE_1
)
assert result is True, "Testcase {} : Failed Error: {}".format(
tc_name, result
)
write_test_footer(tc_name)
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))