summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDonald Sharp <donaldsharp72@gmail.com>2024-08-22 11:32:56 -0400
committerGitHub <noreply@github.com>2024-08-22 11:32:56 -0400
commit05c17eff06536e750d43829ccded5c9e8f0f9ca4 (patch)
tree5cafbf2d7a2d501cc51f998d941ef7c11ccf300a
parent879460ce27d2ec8faee68e2c973b6fe1007a6f38 (diff)
parent7ce2a1b8fb6a8d392eea3882d012f12403b39372 (diff)
Merge pull request #16450 from nabahr/static_joins
PIM: Implement static IGMP joins without an IGMP report
-rw-r--r--doc/user/pim.rst12
-rw-r--r--pimd/pim6_cmd.c82
-rw-r--r--pimd/pim_cmd.c266
-rw-r--r--pimd/pim_iface.c166
-rw-r--r--pimd/pim_iface.h6
-rw-r--r--pimd/pim_igmp.h6
-rw-r--r--pimd/pim_memory.c1
-rw-r--r--pimd/pim_memory.h1
-rw-r--r--pimd/pim_nb.c8
-rw-r--r--pimd/pim_nb.h11
-rw-r--r--pimd/pim_nb_config.c97
-rw-r--r--pimd/pim_vty.c49
-rw-r--r--tests/topotests/lib/pim.py76
-rw-r--r--tests/topotests/multicast_pim_uplink_topo4/multicast_pim_uplink_topo4.json295
-rw-r--r--tests/topotests/multicast_pim_uplink_topo4/test_multicast_pim_uplink_topo4.py893
-rw-r--r--yang/frr-gmp.yang21
16 files changed, 1871 insertions, 119 deletions
diff --git a/doc/user/pim.rst b/doc/user/pim.rst
index 1740828f26..5701560bd6 100644
--- a/doc/user/pim.rst
+++ b/doc/user/pim.rst
@@ -255,9 +255,17 @@ is in a vrf, enter the interface command with the vrf keyword at the end.
Tell pim to receive IGMP reports and Query on this interface. The default
version is v3. This command is useful on a LHR.
-.. clicmd:: ip igmp join A.B.C.D [A.B.C.D]
+.. clicmd:: ip igmp join-group A.B.C.D [A.B.C.D]
- Join multicast group or source-group on an interface.
+ Join multicast group or source-group on an interface. This will result in
+ an IGMP join happening through a local socket so that IGMP reports will be
+ sent on this interface. It may also have the side effect of the kernel
+ forwarding multicast traffic to the socket unnessarily.
+
+.. clicmd:: ip igmp static-group A.B.C.D [A.B.C.D]
+
+ Add a static multicast group or source-group on an interface. This will behave
+ as if there is a receiver on this interface without any IGMP reports.
.. clicmd:: ip igmp query-interval (1-65535)
diff --git a/pimd/pim6_cmd.c b/pimd/pim6_cmd.c
index 99f1474712..f7a4e0e481 100644
--- a/pimd/pim6_cmd.c
+++ b/pimd/pim6_cmd.c
@@ -1363,45 +1363,56 @@ DEFPY_ATTR(no_ipv6_ssmpingd,
return ret;
}
-DEFPY (interface_ipv6_mld_join,
- interface_ipv6_mld_join_cmd,
- "ipv6 mld join X:X::X:X$group [X:X::X:X$source]",
+DEFPY_YANG_HIDDEN (interface_ipv6_mld_join,
+ interface_ipv6_mld_join_cmd,
+ "[no] ipv6 mld join X:X::X:X$grp [X:X::X:X]$src",
+ NO_STR
+ IPV6_STR
+ IFACE_MLD_STR
+ "MLD join multicast group\n"
+ "Multicast group address\n"
+ "Source address\n")
+{
+ nb_cli_enqueue_change(vty, ".", (!no ? NB_OP_CREATE : NB_OP_DESTROY),
+ NULL);
+ return nb_cli_apply_changes(vty, FRR_GMP_JOIN_GROUP_XPATH,
+ "frr-routing:ipv6", grp_str,
+ (src_str ? src_str : "::"));
+}
+ALIAS (interface_ipv6_mld_join,
+ interface_ipv6_mld_join_group_cmd,
+ "[no] ipv6 mld join-group X:X::X:X$grp [X:X::X:X]$src",
+ NO_STR
IPV6_STR
IFACE_MLD_STR
"MLD join multicast group\n"
"Multicast group address\n"
- "Source address\n")
-{
- char xpath[XPATH_MAXLEN];
-
- if (!IN6_IS_ADDR_MULTICAST(&group)) {
- vty_out(vty, "Invalid Multicast Address\n");
- return CMD_WARNING_CONFIG_FAILED;
- }
-
- if (source_str) {
- if (IPV6_ADDR_SAME(&source, &in6addr_any)) {
- vty_out(vty, "Bad source address %s\n", source_str);
- return CMD_WARNING_CONFIG_FAILED;
- }
- } else
- source_str = "::";
-
- snprintf(xpath, sizeof(xpath), FRR_GMP_JOIN_XPATH, "frr-routing:ipv6",
- group_str, source_str);
-
- nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
-
- return nb_cli_apply_changes(vty, NULL);
-}
-
-DEFPY (interface_no_ipv6_mld_join,
- interface_no_ipv6_mld_join_cmd,
- "no ipv6 mld join X:X::X:X$group [X:X::X:X$source]",
+ "Source address\n");
+
+DEFPY_YANG (interface_ipv6_mld_static_group,
+ interface_ipv6_mld_static_group_cmd,
+ "[no] ipv6 mld static-group X:X::X:X$grp [X:X::X:X]$src",
+ NO_STR
+ IPV6_STR
+ IFACE_MLD_STR
+ "Static multicast group\n"
+ "Multicast group address\n"
+ "Source address\n")
+{
+ nb_cli_enqueue_change(vty, ".", (!no ? NB_OP_CREATE : NB_OP_DESTROY),
+ NULL);
+ return nb_cli_apply_changes(vty, FRR_GMP_STATIC_GROUP_XPATH,
+ "frr-routing:ipv6", grp_str,
+ (src_str ? src_str : "::"));
+}
+
+DEFPY (interface_no_ipv6_mld_static_group,
+ interface_no_ipv6_mld_static_group_cmd,
+ "no ipv6 mld static-group X:X::X:X$group [X:X::X:X$source]",
NO_STR
IPV6_STR
IFACE_MLD_STR
- "MLD join multicast group\n"
+ "Static multicast group\n"
"Multicast group address\n"
"Source address\n")
{
@@ -1415,8 +1426,8 @@ DEFPY (interface_no_ipv6_mld_join,
} else
source_str = "::";
- snprintf(xpath, sizeof(xpath), FRR_GMP_JOIN_XPATH, "frr-routing:ipv6",
- group_str, source_str);
+ snprintf(xpath, sizeof(xpath), FRR_GMP_STATIC_GROUP_XPATH,
+ "frr-routing:ipv6", group_str, source_str);
nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL);
@@ -2669,7 +2680,8 @@ void pim_cmd_init(void)
install_element(INTERFACE_NODE, &interface_ipv6_mld_cmd);
install_element(INTERFACE_NODE, &interface_no_ipv6_mld_cmd);
install_element(INTERFACE_NODE, &interface_ipv6_mld_join_cmd);
- install_element(INTERFACE_NODE, &interface_no_ipv6_mld_join_cmd);
+ install_element(INTERFACE_NODE, &interface_ipv6_mld_join_group_cmd);
+ install_element(INTERFACE_NODE, &interface_ipv6_mld_static_group_cmd);
install_element(INTERFACE_NODE, &interface_ipv6_mld_version_cmd);
install_element(INTERFACE_NODE, &interface_no_ipv6_mld_version_cmd);
install_element(INTERFACE_NODE, &interface_ipv6_mld_query_interval_cmd);
diff --git a/pimd/pim_cmd.c b/pimd/pim_cmd.c
index f57048c703..633c46966e 100644
--- a/pimd/pim_cmd.c
+++ b/pimd/pim_cmd.c
@@ -682,6 +682,91 @@ static void igmp_show_interface_join(struct pim_instance *pim, struct vty *vty,
vty_json(vty, json);
}
+static void igmp_show_interface_static_group(struct pim_instance *pim,
+ struct vty *vty, bool uj)
+{
+ struct interface *ifp;
+ json_object *json = NULL;
+ json_object *json_iface = NULL;
+ json_object *json_grp = NULL;
+ json_object *json_grp_arr = NULL;
+
+ if (uj) {
+ json = json_object_new_object();
+ json_object_string_add(json, "vrf",
+ vrf_id_to_name(pim->vrf->vrf_id));
+ } else {
+ vty_out(vty,
+ "Interface Address Source Group\n");
+ }
+
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ struct pim_interface *pim_ifp;
+ struct listnode *node;
+ struct static_group *stgrp;
+ struct in_addr pri_addr;
+ char pri_addr_str[INET_ADDRSTRLEN];
+
+ pim_ifp = ifp->info;
+
+ if (!pim_ifp)
+ continue;
+
+ if (!pim_ifp->static_group_list)
+ continue;
+
+ pri_addr = pim_find_primary_addr(ifp);
+ pim_inet4_dump("<pri?>", pri_addr, pri_addr_str,
+ sizeof(pri_addr_str));
+
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->static_group_list, node,
+ stgrp)) {
+ char group_str[INET_ADDRSTRLEN];
+ char source_str[INET_ADDRSTRLEN];
+
+ pim_inet4_dump("<grp?>", stgrp->group_addr, group_str,
+ sizeof(group_str));
+ pim_inet4_dump("<src?>", stgrp->source_addr, source_str,
+ sizeof(source_str));
+
+ if (uj) {
+ json_object_object_get_ex(json, ifp->name,
+ &json_iface);
+
+ if (!json_iface) {
+ json_iface = json_object_new_object();
+ json_object_string_add(json_iface,
+ "name",
+ ifp->name);
+ json_object_object_add(json, ifp->name,
+ json_iface);
+ json_grp_arr = json_object_new_array();
+ json_object_object_add(json_iface,
+ "groups",
+ json_grp_arr);
+ }
+
+ json_grp = json_object_new_object();
+ json_object_string_add(json_grp, "source",
+ source_str);
+ json_object_string_add(json_grp, "group",
+ group_str);
+ json_object_string_add(json_grp, "primaryAddr",
+ pri_addr_str);
+ json_object_array_add(json_grp_arr, json_grp);
+ } else {
+ vty_out(vty, "%-16s %-15s %-15s %-15s\n",
+ ifp->name, pri_addr_str, source_str,
+ group_str);
+ }
+ } /* for (pim_ifp->static_group_list) */
+
+ } /* for (iflist) */
+
+ if (uj)
+ vty_json(vty, json);
+}
+
static void igmp_show_statistics(struct pim_instance *pim, struct vty *vty,
const char *ifname, bool uj)
{
@@ -1724,6 +1809,15 @@ DEFUN (show_ip_igmp_join,
return CMD_SUCCESS;
}
+ALIAS (show_ip_igmp_join,
+ show_ip_igmp_join_group_cmd,
+ "show ip igmp [vrf NAME] join-group [json]",
+ SHOW_STR
+ IP_STR
+ IGMP_STR
+ VRF_CMD_HELP_STR
+ "IGMP static join information\n"
+ JSON_STR);
DEFUN (show_ip_igmp_join_vrf_all,
show_ip_igmp_join_vrf_all_cmd,
@@ -1756,6 +1850,69 @@ DEFUN (show_ip_igmp_join_vrf_all,
return CMD_SUCCESS;
}
+ALIAS (show_ip_igmp_join_vrf_all,
+ show_ip_igmp_join_group_vrf_all_cmd,
+ "show ip igmp vrf all join-group [json]",
+ SHOW_STR
+ IP_STR
+ IGMP_STR
+ VRF_CMD_HELP_STR
+ "IGMP static join information\n"
+ JSON_STR);
+
+DEFUN (show_ip_igmp_static_group,
+ show_ip_igmp_static_group_cmd,
+ "show ip igmp [vrf NAME] static-group [json]",
+ SHOW_STR
+ IP_STR
+ IGMP_STR
+ VRF_CMD_HELP_STR
+ "Static group information\n"
+ JSON_STR)
+{
+ int idx = 2;
+ bool uj = use_json(argc, argv);
+ struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, uj);
+
+ if (!vrf)
+ return CMD_WARNING;
+
+ igmp_show_interface_static_group(vrf->info, vty, uj);
+
+ return CMD_SUCCESS;
+}
+
+DEFUN (show_ip_igmp_static_group_vrf_all,
+ show_ip_igmp_static_group_vrf_all_cmd,
+ "show ip igmp vrf all static-group [json]",
+ SHOW_STR
+ IP_STR
+ IGMP_STR
+ VRF_CMD_HELP_STR
+ "Static group information\n"
+ JSON_STR)
+{
+ bool uj = use_json(argc, argv);
+ struct vrf *vrf;
+ bool first = true;
+
+ if (uj)
+ vty_out(vty, "{ ");
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
+ if (uj) {
+ if (!first)
+ vty_out(vty, ", ");
+ vty_out(vty, " \"%s\": ", vrf->name);
+ first = false;
+ } else
+ vty_out(vty, "VRF: %s\n", vrf->name);
+ igmp_show_interface_static_group(vrf->info, vty, uj);
+ }
+ if (uj)
+ vty_out(vty, "}\n");
+
+ return CMD_SUCCESS;
+}
DEFPY(show_ip_igmp_groups,
show_ip_igmp_groups_cmd,
@@ -4924,71 +5081,47 @@ DEFUN (interface_no_ip_igmp,
"frr-routing:ipv4");
}
-DEFUN (interface_ip_igmp_join,
- interface_ip_igmp_join_cmd,
- "ip igmp join A.B.C.D [A.B.C.D]",
- IP_STR
- IFACE_IGMP_STR
- "IGMP join multicast group\n"
- "Multicast group address\n"
- "Source address\n")
+DEFPY_YANG_HIDDEN (interface_ip_igmp_join,
+ interface_ip_igmp_join_cmd,
+ "[no] ip igmp join A.B.C.D$grp [A.B.C.D]$src",
+ NO_STR
+ IP_STR
+ IFACE_IGMP_STR
+ "IGMP join multicast group\n"
+ "Multicast group address\n"
+ "Source address\n")
{
- int idx_group = 3;
- int idx_source = 4;
- const char *source_str;
- char xpath[XPATH_MAXLEN];
-
- if (argc == 5) {
- source_str = argv[idx_source]->arg;
-
- if (strcmp(source_str, "0.0.0.0") == 0) {
- vty_out(vty, "Bad source address %s\n",
- argv[idx_source]->arg);
- return CMD_WARNING_CONFIG_FAILED;
- }
- } else
- source_str = "0.0.0.0";
-
- snprintf(xpath, sizeof(xpath), FRR_GMP_JOIN_XPATH,
- "frr-routing:ipv4", argv[idx_group]->arg, source_str);
-
- nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
-
- return nb_cli_apply_changes(vty, NULL);
+ nb_cli_enqueue_change(vty, ".", (!no ? NB_OP_CREATE : NB_OP_DESTROY),
+ NULL);
+ return nb_cli_apply_changes(vty, FRR_GMP_JOIN_GROUP_XPATH,
+ "frr-routing:ipv4", grp_str,
+ (src_str ? src_str : "0.0.0.0"));
}
-
-DEFUN (interface_no_ip_igmp_join,
- interface_no_ip_igmp_join_cmd,
- "no ip igmp join A.B.C.D [A.B.C.D]",
- NO_STR
- IP_STR
- IFACE_IGMP_STR
- "IGMP join multicast group\n"
- "Multicast group address\n"
- "Source address\n")
-{
- int idx_group = 4;
- int idx_source = 5;
- const char *source_str;
- char xpath[XPATH_MAXLEN];
-
- if (argc == 6) {
- source_str = argv[idx_source]->arg;
-
- if (strcmp(source_str, "0.0.0.0") == 0) {
- vty_out(vty, "Bad source address %s\n",
- argv[idx_source]->arg);
- return CMD_WARNING_CONFIG_FAILED;
- }
- } else
- source_str = "0.0.0.0";
-
- snprintf(xpath, sizeof(xpath), FRR_GMP_JOIN_XPATH,
- "frr-routing:ipv4", argv[idx_group]->arg, source_str);
-
- nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL);
-
- return nb_cli_apply_changes(vty, NULL);
+ALIAS(interface_ip_igmp_join,
+ interface_ip_igmp_join_group_cmd,
+ "[no] ip igmp join-group A.B.C.D$grp [A.B.C.D]$src",
+ NO_STR
+ IP_STR
+ IFACE_IGMP_STR
+ "IGMP join multicast group\n"
+ "Multicast group address\n"
+ "Source address\n");
+
+DEFPY_YANG (interface_ip_igmp_static_group,
+ interface_ip_igmp_static_group_cmd,
+ "[no] ip igmp static-group A.B.C.D$grp [A.B.C.D]$src",
+ NO_STR
+ IP_STR
+ IFACE_IGMP_STR
+ "Static multicast group\n"
+ "Multicast group address\n"
+ "Source address\n")
+{
+ nb_cli_enqueue_change(vty, ".", (!no ? NB_OP_CREATE : NB_OP_DESTROY),
+ NULL);
+ return nb_cli_apply_changes(vty, FRR_GMP_STATIC_GROUP_XPATH,
+ "frr-routing:ipv4", grp_str,
+ (src_str ? src_str : "0.0.0.0"));
}
DEFUN (interface_ip_igmp_query_interval,
@@ -8420,7 +8553,8 @@ void pim_cmd_init(void)
install_element(INTERFACE_NODE, &interface_ip_igmp_cmd);
install_element(INTERFACE_NODE, &interface_no_ip_igmp_cmd);
install_element(INTERFACE_NODE, &interface_ip_igmp_join_cmd);
- install_element(INTERFACE_NODE, &interface_no_ip_igmp_join_cmd);
+ install_element(INTERFACE_NODE, &interface_ip_igmp_join_group_cmd);
+ install_element(INTERFACE_NODE, &interface_ip_igmp_static_group_cmd);
install_element(INTERFACE_NODE, &interface_ip_igmp_version_cmd);
install_element(INTERFACE_NODE, &interface_no_ip_igmp_version_cmd);
install_element(INTERFACE_NODE, &interface_ip_igmp_query_interval_cmd);
@@ -8480,7 +8614,11 @@ void pim_cmd_init(void)
install_element(VIEW_NODE, &show_ip_igmp_interface_cmd);
install_element(VIEW_NODE, &show_ip_igmp_interface_vrf_all_cmd);
install_element(VIEW_NODE, &show_ip_igmp_join_cmd);
+ install_element(VIEW_NODE, &show_ip_igmp_join_group_cmd);
install_element(VIEW_NODE, &show_ip_igmp_join_vrf_all_cmd);
+ install_element(VIEW_NODE, &show_ip_igmp_join_group_vrf_all_cmd);
+ install_element(VIEW_NODE, &show_ip_igmp_static_group_cmd);
+ install_element(VIEW_NODE, &show_ip_igmp_static_group_vrf_all_cmd);
install_element(VIEW_NODE, &show_ip_igmp_groups_cmd);
install_element(VIEW_NODE, &show_ip_igmp_groups_vrf_all_cmd);
install_element(VIEW_NODE, &show_ip_igmp_groups_retransmissions_cmd);
diff --git a/pimd/pim_iface.c b/pimd/pim_iface.c
index cdcca0ebe6..45a2435ae5 100644
--- a/pimd/pim_iface.c
+++ b/pimd/pim_iface.c
@@ -37,10 +37,12 @@
#include "pim_jp_agg.h"
#include "pim_igmp_join.h"
#include "pim_vxlan.h"
+#include "pim_tib.h"
#include "pim6_mld.h"
static void pim_if_gm_join_del_all(struct interface *ifp);
+static void pim_if_static_group_del_all(struct interface *ifp);
static int gm_join_sock(const char *ifname, ifindex_t ifindex,
pim_addr group_addr, pim_addr source_addr,
@@ -144,6 +146,7 @@ struct pim_interface *pim_if_new(struct interface *ifp, bool gm, bool pim,
pim_ifp->gm_enable = gm;
pim_ifp->gm_join_list = NULL;
+ pim_ifp->static_group_list = NULL;
pim_ifp->pim_neighbor_list = NULL;
pim_ifp->upstream_switch_list = NULL;
pim_ifp->pim_generation_id = 0;
@@ -188,9 +191,11 @@ void pim_if_delete(struct interface *ifp)
assert(pim_ifp);
pim_ifp->pim->mcast_if_count--;
- if (pim_ifp->gm_join_list) {
+ if (pim_ifp->gm_join_list)
pim_if_gm_join_del_all(ifp);
- }
+
+ if (pim_ifp->static_group_list)
+ pim_if_static_group_del_all(ifp);
pim_ifchannel_delete_all(ifp);
#if PIM_IPV == 4
@@ -1218,6 +1223,11 @@ static void gm_join_free(struct gm_join *ij)
XFREE(MTYPE_PIM_IGMP_JOIN, ij);
}
+static void static_group_free(struct static_group *stgrp)
+{
+ XFREE(MTYPE_PIM_STATIC_GROUP, stgrp);
+}
+
static struct gm_join *gm_join_find(struct list *join_list, pim_addr group_addr,
pim_addr source_addr)
{
@@ -1232,7 +1242,25 @@ static struct gm_join *gm_join_find(struct list *join_list, pim_addr group_addr,
return ij;
}
- return 0;
+ return NULL;
+}
+
+static struct static_group *static_group_find(struct list *static_group_list,
+ pim_addr group_addr,
+ pim_addr source_addr)
+{
+ struct listnode *node;
+ struct static_group *stgrp;
+
+ assert(static_group_list);
+
+ for (ALL_LIST_ELEMENTS_RO(static_group_list, node, stgrp)) {
+ if ((!pim_addr_cmp(group_addr, stgrp->group_addr)) &&
+ (!pim_addr_cmp(source_addr, stgrp->source_addr)))
+ return stgrp;
+ }
+
+ return NULL;
}
static int gm_join_sock(const char *ifname, ifindex_t ifindex,
@@ -1296,6 +1324,34 @@ static struct gm_join *gm_join_new(struct interface *ifp, pim_addr group_addr,
return ij;
}
+static struct static_group *static_group_new(struct interface *ifp,
+ pim_addr group_addr,
+ pim_addr source_addr)
+{
+ struct pim_interface *pim_ifp;
+ struct static_group *stgrp;
+ pim_sgaddr sg;
+
+ pim_ifp = ifp->info;
+ assert(pim_ifp);
+
+ stgrp = XCALLOC(MTYPE_PIM_STATIC_GROUP, sizeof(*stgrp));
+
+ stgrp->group_addr = group_addr;
+ stgrp->source_addr = source_addr;
+ stgrp->oilp = NULL;
+
+ memset(&sg, 0, sizeof(sg));
+ sg.src = source_addr;
+ sg.grp = group_addr;
+
+ tib_sg_gm_join(pim_ifp->pim, sg, ifp, &(stgrp->oilp));
+
+ listnode_add(pim_ifp->static_group_list, stgrp);
+
+ return stgrp;
+}
+
ferr_r pim_if_gm_join_add(struct interface *ifp, pim_addr group_addr,
pim_addr source_addr)
{
@@ -1382,7 +1438,6 @@ int pim_if_gm_join_del(struct interface *ifp, pim_addr group_addr,
return 0;
}
-__attribute__((unused))
static void pim_if_gm_join_del_all(struct interface *ifp)
{
struct pim_interface *pim_ifp;
@@ -1404,6 +1459,109 @@ static void pim_if_gm_join_del_all(struct interface *ifp)
pim_if_gm_join_del(ifp, ij->group_addr, ij->source_addr);
}
+ferr_r pim_if_static_group_add(struct interface *ifp, pim_addr group_addr,
+ pim_addr source_addr)
+{
+ struct pim_interface *pim_ifp;
+ struct static_group *stgrp;
+
+ pim_ifp = ifp->info;
+ if (!pim_ifp) {
+ return ferr_cfg_invalid("multicast not enabled on interface %s",
+ ifp->name);
+ }
+
+ if (!pim_ifp->static_group_list) {
+ pim_ifp->static_group_list = list_new();
+ pim_ifp->static_group_list->del =
+ (void (*)(void *))static_group_free;
+ }
+
+ stgrp = static_group_find(pim_ifp->static_group_list, group_addr,
+ source_addr);
+
+ /* This interface has already been configured with this static group
+ */
+ if (stgrp)
+ return ferr_ok();
+
+ (void)static_group_new(ifp, group_addr, source_addr);
+
+ if (PIM_DEBUG_GM_EVENTS) {
+ zlog_debug("%s: Added static group (S,G)=(%pPA,%pPA) on interface %s",
+ __func__, &source_addr, &group_addr, ifp->name);
+ }
+
+ return ferr_ok();
+}
+
+int pim_if_static_group_del(struct interface *ifp, pim_addr group_addr,
+ pim_addr source_addr)
+{
+ struct pim_interface *pim_ifp;
+ struct static_group *stgrp;
+ pim_sgaddr sg;
+
+ pim_ifp = ifp->info;
+ if (!pim_ifp) {
+ zlog_warn("%s: multicast not enabled on interface %s", __func__,
+ ifp->name);
+ return -1;
+ }
+
+ if (!pim_ifp->static_group_list) {
+ zlog_warn("%s: no static groups on interface %s", __func__,
+ ifp->name);
+ return -2;
+ }
+
+ stgrp = static_group_find(pim_ifp->static_group_list, group_addr,
+ source_addr);
+ if (!stgrp) {
+ zlog_warn("%s: could not find static group %pPAs source %pPAs on interface %s",
+ __func__, &group_addr, &source_addr, ifp->name);
+ return -3;
+ }
+
+ memset(&sg, 0, sizeof(sg));
+ sg.src = source_addr;
+ sg.grp = group_addr;
+
+ tib_sg_gm_prune(pim_ifp->pim, sg, ifp, &(stgrp->oilp));
+
+ listnode_delete(pim_ifp->static_group_list, stgrp);
+ static_group_free(stgrp);
+ if (listcount(pim_ifp->static_group_list) < 1) {
+ list_delete(&pim_ifp->static_group_list);
+ pim_ifp->static_group_list = 0;
+ }
+
+ return 0;
+}
+
+static void pim_if_static_group_del_all(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp;
+ struct listnode *node;
+ struct listnode *nextnode;
+ struct static_group *stgrp;
+
+ pim_ifp = ifp->info;
+ if (!pim_ifp) {
+ zlog_warn("%s: multicast not enabled on interface %s", __func__,
+ ifp->name);
+ return;
+ }
+
+ if (!pim_ifp->static_group_list)
+ return;
+
+ for (ALL_LIST_ELEMENTS(pim_ifp->static_group_list, node, nextnode,
+ stgrp))
+ pim_if_static_group_del(ifp, stgrp->group_addr,
+ stgrp->source_addr);
+}
+
/*
RFC 4601
diff --git a/pimd/pim_iface.h b/pimd/pim_iface.h
index 0312f719d3..4d20379665 100644
--- a/pimd/pim_iface.h
+++ b/pimd/pim_iface.h
@@ -98,6 +98,7 @@ struct pim_interface {
*/
struct list *gm_socket_list; /* list of struct IGMP or MLD sock */
struct list *gm_join_list; /* list of struct IGMP or MLD join */
+ struct list *static_group_list; /* list of struct static group */
struct list *gm_group_list; /* list of struct IGMP or MLD group */
struct hash *gm_group_hash;
@@ -222,6 +223,11 @@ ferr_r pim_if_gm_join_add(struct interface *ifp, pim_addr group_addr,
int pim_if_gm_join_del(struct interface *ifp, pim_addr group_addr,
pim_addr source_addr);
+ferr_r pim_if_static_group_add(struct interface *ifp, pim_addr group_addr,
+ pim_addr source_addr);
+int pim_if_static_group_del(struct interface *ifp, pim_addr group_addr,
+ pim_addr source_addr);
+
void pim_if_update_could_assert(struct interface *ifp);
void pim_if_assert_on_neighbor_down(struct interface *ifp, pim_addr neigh_addr);
diff --git a/pimd/pim_igmp.h b/pimd/pim_igmp.h
index a1f19b3c6e..de0ec01a65 100644
--- a/pimd/pim_igmp.h
+++ b/pimd/pim_igmp.h
@@ -58,6 +58,12 @@ struct gm_join {
time_t sock_creation;
};
+struct static_group {
+ pim_addr group_addr;
+ pim_addr source_addr;
+ struct channel_oil *oilp;
+};
+
struct gm_sock {
int fd;
struct interface *interface;
diff --git a/pimd/pim_memory.c b/pimd/pim_memory.c
index 604e24482d..2c35bc6473 100644
--- a/pimd/pim_memory.c
+++ b/pimd/pim_memory.c
@@ -14,6 +14,7 @@ DEFINE_MGROUP(PIMD, "pimd");
DEFINE_MTYPE(PIMD, PIM_CHANNEL_OIL, "PIM SSM (S,G) channel OIL");
DEFINE_MTYPE(PIMD, PIM_INTERFACE, "PIM interface");
DEFINE_MTYPE(PIMD, PIM_IGMP_JOIN, "PIM interface IGMP static join");
+DEFINE_MTYPE(PIMD, PIM_STATIC_GROUP, "PIM interface IGMP static group");
DEFINE_MTYPE(PIMD, PIM_IGMP_SOCKET, "PIM interface IGMP socket");
DEFINE_MTYPE(PIMD, PIM_IGMP_GROUP, "PIM interface IGMP group");
DEFINE_MTYPE(PIMD, PIM_IGMP_GROUP_SOURCE, "PIM interface IGMP source");
diff --git a/pimd/pim_memory.h b/pimd/pim_memory.h
index 353e09a71c..b44d3e191a 100644
--- a/pimd/pim_memory.h
+++ b/pimd/pim_memory.h
@@ -13,6 +13,7 @@ DECLARE_MGROUP(PIMD);
DECLARE_MTYPE(PIM_CHANNEL_OIL);
DECLARE_MTYPE(PIM_INTERFACE);
DECLARE_MTYPE(PIM_IGMP_JOIN);
+DECLARE_MTYPE(PIM_STATIC_GROUP);
DECLARE_MTYPE(PIM_IGMP_SOCKET);
DECLARE_MTYPE(PIM_IGMP_GROUP);
DECLARE_MTYPE(PIM_IGMP_GROUP_SOURCE);
diff --git a/pimd/pim_nb.c b/pimd/pim_nb.c
index 72b5bdefc9..c154c18afa 100644
--- a/pimd/pim_nb.c
+++ b/pimd/pim_nb.c
@@ -441,6 +441,13 @@ const struct frr_yang_module_info frr_gmp_info = {
}
},
{
+ .xpath = "/frr-interface:lib/interface/frr-gmp:gmp/address-family/join-group",
+ .cbs = {
+ .create = lib_interface_gmp_address_family_join_group_create,
+ .destroy = lib_interface_gmp_address_family_join_group_destroy,
+ }
+ },
+ {
.xpath = "/frr-interface:lib/interface/frr-gmp:gmp/address-family/static-group",
.cbs = {
.create = lib_interface_gmp_address_family_static_group_create,
@@ -452,4 +459,3 @@ const struct frr_yang_module_info frr_gmp_info = {
},
}
};
-
diff --git a/pimd/pim_nb.h b/pimd/pim_nb.h
index 3c7ab49ab3..fc4c11cea9 100644
--- a/pimd/pim_nb.h
+++ b/pimd/pim_nb.h
@@ -182,6 +182,10 @@ int lib_interface_gmp_address_family_last_member_query_interval_modify(
struct nb_cb_modify_args *args);
int lib_interface_gmp_address_family_robustness_variable_modify(
struct nb_cb_modify_args *args);
+int lib_interface_gmp_address_family_join_group_create(
+ struct nb_cb_create_args *args);
+int lib_interface_gmp_address_family_join_group_destroy(
+ struct nb_cb_destroy_args *args);
int lib_interface_gmp_address_family_static_group_create(
struct nb_cb_create_args *args);
int lib_interface_gmp_address_family_static_group_destroy(
@@ -219,8 +223,11 @@ int routing_control_plane_protocols_name_validate(
"./frr-gmp:gmp/address-family[address-family='%s']"
#define FRR_GMP_ENABLE_XPATH \
"%s/frr-gmp:gmp/address-family[address-family='%s']/enable"
-#define FRR_GMP_JOIN_XPATH \
- "./frr-gmp:gmp/address-family[address-family='%s']/" \
+#define FRR_GMP_JOIN_GROUP_XPATH \
+ "./frr-gmp:gmp/address-family[address-family='%s']/" \
+ "join-group[group-addr='%s'][source-addr='%s']"
+#define FRR_GMP_STATIC_GROUP_XPATH \
+ "./frr-gmp:gmp/address-family[address-family='%s']/" \
"static-group[group-addr='%s'][source-addr='%s']"
#endif /* _FRR_PIM_NB_H_ */
diff --git a/pimd/pim_nb_config.c b/pimd/pim_nb_config.c
index bc7338ce18..037bfea786 100644
--- a/pimd/pim_nb_config.c
+++ b/pimd/pim_nb_config.c
@@ -2989,9 +2989,9 @@ int lib_interface_gmp_address_family_robustness_variable_modify(
}
/*
- * XPath: /frr-interface:lib/interface/frr-gmp:gmp/address-family/static-group
+ * XPath: /frr-interface:lib/interface/frr-gmp:gmp/address-family/join-group
*/
-int lib_interface_gmp_address_family_static_group_create(
+int lib_interface_gmp_address_family_join_group_create(
struct nb_cb_create_args *args)
{
struct interface *ifp;
@@ -3049,7 +3049,7 @@ int lib_interface_gmp_address_family_static_group_create(
return NB_OK;
}
-int lib_interface_gmp_address_family_static_group_destroy(
+int lib_interface_gmp_address_family_join_group_destroy(
struct nb_cb_destroy_args *args)
{
struct interface *ifp;
@@ -3084,3 +3084,94 @@ int lib_interface_gmp_address_family_static_group_destroy(
return NB_OK;
}
+
+/*
+ * XPath: /frr-interface:lib/interface/frr-gmp:gmp/address-family/static-group
+ */
+int lib_interface_gmp_address_family_static_group_create(
+ struct nb_cb_create_args *args)
+{
+ struct interface *ifp;
+ pim_addr source_addr;
+ pim_addr group_addr;
+ int result;
+ const char *ifp_name;
+ const struct lyd_node *if_dnode;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ if_dnode = yang_dnode_get_parent(args->dnode, "interface");
+ if (!is_pim_interface(if_dnode)) {
+ ifp_name = yang_dnode_get_string(if_dnode, "name");
+ snprintf(args->errmsg, args->errmsg_len,
+ "multicast not enabled on interface %s",
+ ifp_name);
+ return NB_ERR_VALIDATION;
+ }
+
+ yang_dnode_get_pimaddr(&group_addr, args->dnode, "./group-addr");
+#if PIM_IPV == 4
+ if (pim_is_group_224_0_0_0_24(group_addr)) {
+ snprintf(args->errmsg, args->errmsg_len,
+ "Groups within 224.0.0.0/24 are reserved and cannot be joined");
+ return NB_ERR_VALIDATION;
+ }
+#else
+ if (ipv6_mcast_reserved(&group_addr)) {
+ snprintf(args->errmsg, args->errmsg_len,
+ "Groups within ffx2::/16 are reserved and cannot be joined");
+ return NB_ERR_VALIDATION;
+ }
+#endif
+ break;
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ yang_dnode_get_pimaddr(&source_addr, args->dnode,
+ "./source-addr");
+ yang_dnode_get_pimaddr(&group_addr, args->dnode, "./group-addr");
+ result = pim_if_static_group_add(ifp, group_addr, source_addr);
+ if (result) {
+ snprintf(args->errmsg, args->errmsg_len,
+ "Failure adding static group");
+ return NB_ERR_INCONSISTENCY;
+ }
+ }
+ return NB_OK;
+}
+
+int lib_interface_gmp_address_family_static_group_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ struct interface *ifp;
+ pim_addr source_addr;
+ pim_addr group_addr;
+ int result;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ yang_dnode_get_pimaddr(&source_addr, args->dnode,
+ "./source-addr");
+ yang_dnode_get_pimaddr(&group_addr, args->dnode, "./group-addr");
+ result = pim_if_static_group_del(ifp, group_addr, source_addr);
+
+ if (result) {
+ snprintf(args->errmsg, args->errmsg_len,
+ "%% Failure removing static group %pPAs %pPAs on interface %s: %d",
+ &source_addr, &group_addr, ifp->name, result);
+
+ return NB_ERR_INCONSISTENCY;
+ }
+
+ break;
+ }
+
+ return NB_OK;
+}
diff --git a/pimd/pim_vty.c b/pimd/pim_vty.c
index 1910a68495..9cf4bb3e87 100644
--- a/pimd/pim_vty.c
+++ b/pimd/pim_vty.c
@@ -306,21 +306,38 @@ static int gm_config_write(struct vty *vty, int writes,
++writes;
}
- /* IF ip igmp join */
+ /* IF ip igmp join-group */
if (pim_ifp->gm_join_list) {
struct listnode *node;
struct gm_join *ij;
for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_join_list, node, ij)) {
if (pim_addr_is_any(ij->source_addr))
- vty_out(vty, " ip igmp join %pPAs\n",
+ vty_out(vty, " ip igmp join-group %pPAs\n",
&ij->group_addr);
else
- vty_out(vty, " ip igmp join %pPAs %pPAs\n",
+ vty_out(vty, " ip igmp join-group %pPAs %pPAs\n",
&ij->group_addr, &ij->source_addr);
++writes;
}
}
+ /* IF ip igmp static-group */
+ if (pim_ifp->static_group_list) {
+ struct listnode *node;
+ struct static_group *stgrp;
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->static_group_list, node,
+ stgrp)) {
+ if (pim_addr_is_any(stgrp->source_addr))
+ vty_out(vty, " ip igmp static-group %pPAs\n",
+ &stgrp->group_addr);
+ else
+ vty_out(vty,
+ " ip igmp static-group %pPAs %pPAs\n",
+ &stgrp->group_addr, &stgrp->source_addr);
+ ++writes;
+ }
+ }
+
return writes;
}
#else
@@ -358,21 +375,41 @@ static int gm_config_write(struct vty *vty, int writes,
vty_out(vty, " ipv6 mld last-member-query-interval %d\n",
pim_ifp->gm_specific_query_max_response_time_dsec);
- /* IF ipv6 mld join */
+ /* IF ipv6 mld join-group */
if (pim_ifp->gm_join_list) {
struct listnode *node;
struct gm_join *ij;
+
for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_join_list, node, ij)) {
if (pim_addr_is_any(ij->source_addr))
- vty_out(vty, " ipv6 mld join %pPAs\n",
+ vty_out(vty, " ipv6 mld join-group %pPAs\n",
&ij->group_addr);
else
- vty_out(vty, " ipv6 mld join %pPAs %pPAs\n",
+ vty_out(vty,
+ " ipv6 mld join-group %pPAs %pPAs\n",
&ij->group_addr, &ij->source_addr);
++writes;
}
}
+ /* IF ipv6 mld static-group */
+ if (pim_ifp->static_group_list) {
+ struct listnode *node;
+ struct static_group *stgrp;
+
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->static_group_list, node,
+ stgrp)) {
+ if (pim_addr_is_any(stgrp->source_addr))
+ vty_out(vty, " ipv6 mld static-group %pPAs\n",
+ &stgrp->group_addr);
+ else
+ vty_out(vty,
+ " ipv6 mld static-group %pPAs %pPAs\n",
+ &stgrp->group_addr, &stgrp->source_addr);
+ ++writes;
+ }
+ }
+
return writes;
}
#endif
diff --git a/tests/topotests/lib/pim.py b/tests/topotests/lib/pim.py
index cc56ffdd8c..71e36b6229 100644
--- a/tests/topotests/lib/pim.py
+++ b/tests/topotests/lib/pim.py
@@ -332,6 +332,13 @@ def create_igmp_config(tgen, topo, input_dict=None, build=False):
cmd = "no {}".format(cmd)
config_data.append(cmd)
+ if attribute == "static-group":
+ for group in data:
+ cmd = "ip {} {} {}".format(protocol, attribute, group)
+ if del_attr:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
if attribute == "query":
for query, value in data.items():
if query != "delete":
@@ -4253,6 +4260,75 @@ def verify_local_igmp_groups(tgen, dut, interface, group_addresses):
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return True
+@retry(retry_timeout=62)
+def verify_static_groups(tgen, dut, interface, group_addresses):
+ """
+ Verify static groups are received from an intended interface
+ by running "show ip igmp static-group json" command
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `dut`: device under test
+ * `interface`: interface, from which IGMP groups are configured
+ * `group_addresses`: IGMP group address
+
+ Usage
+ -----
+ dut = "r1"
+ interface = "r1-r0-eth0"
+ group_address = "225.1.1.1"
+ result = verify_static_groups(tgen, dut, interface, group_address)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ if dut not in tgen.routers():
+ return False
+
+ rnode = tgen.routers()[dut]
+
+ logger.info("[DUT: %s]: Verifying static groups received:", dut)
+ show_static_group_json = run_frr_cmd(rnode, "show ip igmp static-group json", isjson=True)
+
+ if type(group_addresses) is not list:
+ group_addresses = [group_addresses]
+
+ if interface not in show_static_group_json:
+ errormsg = (
+ "[DUT %s]: Verifying static group received"
+ " from interface %s [FAILED]!! " % (dut, interface)
+ )
+ return errormsg
+
+ for grp_addr in group_addresses:
+ found = False
+ for index in show_static_group_json[interface]["groups"]:
+ if index["group"] == grp_addr:
+ found = True
+ break
+ if not found:
+ errormsg = (
+ "[DUT %s]: Verifying static group received"
+ " from interface %s [FAILED]!! "
+ " Expected: %s " % (dut, interface, grp_addr)
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: Verifying static group %s received "
+ "from interface %s [PASSED]!! ",
+ dut,
+ grp_addr,
+ interface,
+ )
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
def verify_pim_interface_traffic(tgen, input_dict, return_stats=True, addr_type="ipv4"):
"""
diff --git a/tests/topotests/multicast_pim_uplink_topo4/multicast_pim_uplink_topo4.json b/tests/topotests/multicast_pim_uplink_topo4/multicast_pim_uplink_topo4.json
new file mode 100644
index 0000000000..dc9e1ac49b
--- /dev/null
+++ b/tests/topotests/multicast_pim_uplink_topo4/multicast_pim_uplink_topo4.json
@@ -0,0 +1,295 @@
+{
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 24,
+ "link_ip_start": {"ipv4": "10.0.0.0", "v4mask": 24, "link_local": "disable"},
+ "lo_prefix": {"ipv4": "1.0.", "v4mask": 32},
+ "routers": {
+ "r1": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "r2-link1": {"ipv4": "auto", "pim": "enable"},
+ "r2-link2": {"ipv4": "auto", "pim": "enable"},
+ "r2-link3": {"ipv4": "auto", "pim": "enable"},
+ "r2-link4": {"ipv4": "auto", "pim": "enable"},
+ "r3-link1": {"ipv4": "auto", "pim": "enable"},
+ "r3-link2": {"ipv4": "auto", "pim": "enable"},
+ "r3-link3": {"ipv4": "auto", "pim": "enable"},
+ "r3-link4": {"ipv4": "auto", "pim": "enable"},
+ "r4": {"ipv4": "auto", "pim": "enable"},
+ "r5": {"ipv4": "auto", "pim": "enable"},
+ "i1": {"ipv4": "auto", "pim": "enable"},
+ "i2": {"ipv4": "auto", "pim": "enable"},
+ "i9": {"ipv4": "auto", "pim": "enable"}
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ],
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {},
+ "r1-link2": {},
+ "r1-link3": {},
+ "r1-link4": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r1-link1": {},
+ "r1-link2": {},
+ "r1-link3": {},
+ "r1-link4": {}
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r1": {}
+ }
+ },
+ "r5": {
+ "dest_link": {
+ "r1": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "r1-link1": {"ipv4": "auto", "pim": "enable"},
+ "r1-link2": {"ipv4": "auto", "pim": "enable"},
+ "r1-link3": {"ipv4": "auto", "pim": "enable"},
+ "r1-link4": {"ipv4": "auto", "pim": "enable"},
+ "r4-link1": {"ipv4": "auto", "pim": "enable"},
+ "r4-link2": {"ipv4": "auto", "pim": "enable"},
+ "r4-link3": {"ipv4": "auto", "pim": "enable"},
+ "r4-link4": {"ipv4": "auto", "pim": "enable"},
+ "i3": {"ipv4": "auto", "pim": "enable"},
+ "i4": {"ipv4": "auto", "pim": "enable"}
+ },
+ "bgp": {
+ "local_as": "200",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ],
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {},
+ "r2-link2": {},
+ "r2-link3": {},
+ "r2-link4": {}
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r2-link1": {},
+ "r2-link2": {},
+ "r2-link3": {},
+ "r2-link4": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "r1-link1": {"ipv4": "auto", "pim": "enable"},
+ "r1-link2": {"ipv4": "auto", "pim": "enable"},
+ "r1-link3": {"ipv4": "auto", "pim": "enable"},
+ "r1-link4": {"ipv4": "auto", "pim": "enable"},
+ "r4-link1": {"ipv4": "auto", "pim": "enable"},
+ "r4-link2": {"ipv4": "auto", "pim": "enable"},
+ "r4-link3": {"ipv4": "auto", "pim": "enable"},
+ "r4-link4": {"ipv4": "auto", "pim": "enable"},
+ "i5": {"ipv4": "auto", "pim": "enable"}
+ },
+ "bgp": {
+ "local_as": "300",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ],
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3-link1": {},
+ "r3-link2": {},
+ "r3-link3": {},
+ "r3-link4": {}
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3-link1": {},
+ "r3-link2": {},
+ "r3-link3": {},
+ "r3-link4": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r4": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "r2-link1": {"ipv4": "auto", "pim": "enable"},
+ "r2-link2": {"ipv4": "auto", "pim": "enable"},
+ "r2-link3": {"ipv4": "auto", "pim": "enable"},
+ "r2-link4": {"ipv4": "auto", "pim": "enable"},
+ "r3-link1": {"ipv4": "auto", "pim": "enable"},
+ "r3-link2": {"ipv4": "auto", "pim": "enable"},
+ "r3-link3": {"ipv4": "auto", "pim": "enable"},
+ "r3-link4": {"ipv4": "auto", "pim": "enable"},
+ "r1": {"ipv4": "auto", "pim": "enable"},
+ "r5": {"ipv4": "auto", "pim": "enable"},
+ "i6": {"ipv4": "auto", "pim": "enable"},
+ "i7": {"ipv4": "auto", "pim": "enable"}
+ },
+ "bgp": {
+ "local_as": "400",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ],
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r4-link1": {},
+ "r4-link2": {},
+ "r4-link3": {},
+ "r4-link4": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r4-link1": {},
+ "r4-link2": {},
+ "r4-link3": {},
+ "r4-link4": {}
+ }
+ },
+ "r1": {
+ "dest_link": {
+ "r4": {}
+ }
+ },
+ "r5": {
+ "dest_link": {
+ "r4": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r5": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "r1": {"ipv4": "auto", "pim": "enable"},
+ "r4": {"ipv4": "auto", "pim": "enable"},
+ "i8": {"ipv4": "auto", "pim": "enable"}
+ },
+ "bgp": {
+ "local_as": "500",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ],
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r5": {}
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r5": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "i1": {
+ "links": {
+ "r1": {"ipv4": "auto"}
+ }
+ },
+ "i2": {
+ "links": {
+ "r1": {"ipv4": "auto"}
+ }
+ },
+ "i3": {
+ "links": {
+ "r2": {"ipv4": "auto"}
+ }
+ },
+ "i4": {
+ "links": {
+ "r2": {"ipv4": "auto"}
+ }
+ },
+ "i5": {
+ "links": {
+ "r3": {"ipv4": "auto"}
+ }
+ },
+ "i6": {
+ "links": {
+ "r4": {"ipv4": "auto"}
+ }
+ },
+ "i7": {
+ "links": {
+ "r4": {"ipv4": "auto"}
+ }
+ },
+ "i8": {
+ "links": {
+ "r5": {"ipv4": "auto"}
+ }
+ },
+ "i9": {
+ "links": {
+ "r1": {"ipv4": "auto"}
+ }
+ }
+
+ }
+}
diff --git a/tests/topotests/multicast_pim_uplink_topo4/test_multicast_pim_uplink_topo4.py b/tests/topotests/multicast_pim_uplink_topo4/test_multicast_pim_uplink_topo4.py
new file mode 100644
index 0000000000..d384baa452
--- /dev/null
+++ b/tests/topotests/multicast_pim_uplink_topo4/test_multicast_pim_uplink_topo4.py
@@ -0,0 +1,893 @@
+#!/usr/bin/env python3
+#
+# SPDX-License-Identifier: ISC
+#
+# Copyright (c) 2024 by Architecture Technology Corp. (ATCorp)
+#
+
+"""
+Following tests are covered to test multicast pim sm:
+
+1. TC:1 Verify static group populated when static "ip igmp static-group <grp>" in configured
+2. TC:2 Verify mroute and upstream populated with correct OIL/IIF with static group
+3. TC:3 Verify static group not allowed for "224.0.0.0/24" and non multicast group
+4. TC:4 Verify static group removed from DUT while removing "ip igmp static-group" CLI
+5. TC:5 Verify static groups after removing and adding static-group config
+"""
+
+import os
+import sys
+import time
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# Required to instantiate the topology builder class.
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+
+from lib.common_config import (
+ start_topology,
+ write_test_header,
+ write_test_footer,
+ step,
+ addKernelRoute,
+ reset_config_on_routers,
+ shutdown_bringup_interface,
+ required_linux_kernel_version,
+)
+from lib.pim import (
+ create_pim_config,
+ create_igmp_config,
+ verify_mroutes,
+ clear_pim_interface_traffic,
+ verify_upstream_iif,
+ clear_mroute,
+ verify_pim_rp_info,
+ verify_static_groups,
+ McastTesterHelper,
+)
+from lib.bgp import (
+ verify_bgp_convergence,
+)
+from lib.topolog import logger
+from lib.topojson import build_config_from_json
+
+# Global variables
+TOPOLOGY = """
+
+ i9 i3-+-i4 i6-+-i7
+ | | |
+ i1--- R1-------R2----------R4------R5---i8
+ | | |
+ i2 R3-------------------+
+ +
+ |
+ i5
+
+ Description:
+ i1, i2, i3. i4, i5, i6, i7, i8 - FRR running iperf to send IGMP
+ join and traffic
+ R1 - DUT (LHR/FHR)
+ R2 - RP
+ R3 - Transit
+ R4 - (LHR/FHR)
+ R5 - Transit
+"""
+# Global variables
+RP_RANGE1 = "226.0.0.1/32"
+RP_RANGE2 = "226.0.0.2/32"
+RP_RANGE3 = "226.0.0.3/32"
+RP_RANGE4 = "226.0.0.4/32"
+RP_RANGE5 = "226.0.0.5/32"
+RP_RANGE6 = "232.0.0.1/32"
+RP_RANGE7 = "232.0.0.2/32"
+RP_RANGE8 = "232.0.0.3/32"
+RP_RANGE9 = "232.0.0.4/32"
+RP_RANGE10 = "232.0.0.5/32"
+
+GROUP_RANGE = "224.0.0.0/4"
+IGMP_GROUP = "225.1.1.1/32"
+IGMP_JOIN = "225.1.1.1"
+GROUP_RANGE_1 = [
+ "225.1.1.1/32",
+ "225.1.1.2/32",
+ "225.1.1.3/32",
+ "225.1.1.4/32",
+ "225.1.1.5/32",
+]
+IGMP_JOIN_RANGE_1 = ["225.1.1.1", "225.1.1.2", "225.1.1.3", "225.1.1.4", "225.1.1.5"]
+IGMP_JOIN_RANGE_2 = ["224.0.0.1", "224.0.0.2", "224.0.0.3", "192.0.0.4", "192.0.0.5"]
+IGMP_JOIN_RANGE_3 = [
+ "226.0.0.1",
+ "226.0.0.2",
+ "226.0.0.3",
+ "226.0.0.4",
+ "226.0.0.5",
+ "232.0.0.1",
+ "232.0.0.2",
+ "232.0.0.3",
+ "232.0.0.4",
+ "232.0.0.5",
+]
+GROUP_RANGE_3 = [
+ "226.0.0.1/32",
+ "226.0.0.2/32",
+ "226.0.0.3/32",
+ "226.0.0.4/32",
+ "226.0.0.5/32",
+ "232.0.0.1/32",
+ "232.0.0.2/32",
+ "232.0.0.3/32",
+ "232.0.0.4/32",
+ "232.0.0.5/32",
+]
+
+r1_r2_links = []
+r1_r3_links = []
+r2_r1_links = []
+r2_r4_links = []
+r3_r1_links = []
+r3_r4_links = []
+r4_r2_links = []
+r4_r3_links = []
+
+pytestmark = [pytest.mark.pimd]
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ # Required linux kernel version for this suite to run.
+ result = required_linux_kernel_version("4.19")
+ if result is not True:
+ pytest.skip("Kernel requirements are not met")
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ testdir = os.path.dirname(os.path.realpath(__file__))
+ json_file = "{}/multicast_pim_uplink_topo4.json".format(testdir)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
+ # ... and here it calls Mininet initialization functions.
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, tgen.json_topo)
+
+ # Pre-requisite data
+ get_interfaces_names(topo)
+
+ # XXX Replace this using "with McastTesterHelper()... " in each test if possible.
+ global app_helper
+ app_helper = McastTesterHelper(tgen)
+
+ # Verify BGP convergence
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "setup_module : Failed \n Error:" " {}".format(
+ BGP_CONVERGENCE
+ )
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """Teardown the pytest environment"""
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ app_helper.cleanup()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
+
+
+#####################################################
+#
+# Local APIs
+#
+#####################################################
+
+
+def get_interfaces_names(topo):
+ """
+ API to fetch interfaces names and create list, which further would be used
+ for verification
+
+ Parameters
+ ----------
+ * `topo` : inout JSON data
+ """
+
+ for link in range(1, 5):
+ intf = topo["routers"]["r1"]["links"]["r2-link{}".format(link)]["interface"]
+ r1_r2_links.append(intf)
+
+ intf = topo["routers"]["r1"]["links"]["r3-link{}".format(link)]["interface"]
+ r1_r3_links.append(intf)
+
+ intf = topo["routers"]["r2"]["links"]["r1-link{}".format(link)]["interface"]
+ r2_r1_links.append(intf)
+
+ intf = topo["routers"]["r3"]["links"]["r1-link{}".format(link)]["interface"]
+ r3_r1_links.append(intf)
+
+ intf = topo["routers"]["r2"]["links"]["r4-link{}".format(link)]["interface"]
+ r2_r4_links.append(intf)
+
+ intf = topo["routers"]["r4"]["links"]["r2-link{}".format(link)]["interface"]
+ r4_r2_links.append(intf)
+
+ intf = topo["routers"]["r4"]["links"]["r3-link{}".format(link)]["interface"]
+ r4_r3_links.append(intf)
+
+
+def shutdown_interfaces(tgen):
+ """
+ API to Shut down interfaces which is not
+ used in all the testcases as part of this TDS
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+
+ """
+ logger.info("shutting down extra interfaces")
+ intf_r1_r4 = topo["routers"]["r1"]["links"]["r4"]["interface"]
+ intf_r1_r5 = topo["routers"]["r1"]["links"]["r5"]["interface"]
+ intf_r4_r1 = topo["routers"]["r4"]["links"]["r1"]["interface"]
+ intf_r5_r1 = topo["routers"]["r5"]["links"]["r1"]["interface"]
+ intf_r4_r5 = topo["routers"]["r4"]["links"]["r5"]["interface"]
+ intf_r5_r4 = topo["routers"]["r5"]["links"]["r4"]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf_r1_r4, False)
+ shutdown_bringup_interface(tgen, "r1", intf_r1_r5, False)
+ shutdown_bringup_interface(tgen, "r4", intf_r4_r1, False)
+ shutdown_bringup_interface(tgen, "r5", intf_r5_r1, False)
+ shutdown_bringup_interface(tgen, "r4", intf_r4_r5, False)
+ shutdown_bringup_interface(tgen, "r5", intf_r5_r4, False)
+
+
+def config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, iperf, iperf_intf, GROUP_RANGE, join=False, traffic=False
+):
+ """
+ API to do pre-configuration to send IGMP join and multicast
+ traffic
+
+ parameters:
+ -----------
+ * `tgen`: topogen object
+ * `topo`: input json data
+ * `tc_name`: caller test case name
+ * `iperf`: router running iperf
+ * `iperf_intf`: interface name router running iperf
+ * `GROUP_RANGE`: group range
+ * `join`: IGMP join, default False
+ * `traffic`: multicast traffic, default False
+ """
+
+ if join:
+ # Add route to kernal
+ result = addKernelRoute(tgen, iperf, iperf_intf, GROUP_RANGE)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ if traffic:
+ # Add route to kernal
+ result = addKernelRoute(tgen, iperf, iperf_intf, GROUP_RANGE)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ router_list = tgen.routers()
+ for router in router_list.keys():
+ if router == iperf:
+ continue
+
+ rnode = router_list[router]
+ rnode.run("echo 2 > /proc/sys/net/ipv4/conf/all/rp_filter")
+
+ return True
+
+
+#####################################################
+#
+# Testcases
+#
+#####################################################
+
+
+def test_ip_igmp_static_groups_p0(request):
+ """
+ TC_1 Verify static group populated when static
+ "ip igmp static-group <grp>" in configured
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ app_helper.stop_all_hosts()
+ clear_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_pim_interface_traffic(tgen, topo)
+
+ # Verify BGP convergence
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("shut down not required interfaces")
+ shutdown_interfaces(tgen)
+
+ step("Enable the PIM on all the interfaces of R1, R2, R3, R4")
+ step("configure BGP on R1, R2, R3, R4 and enable redistribute static/connected")
+ step("Enable the IGMP on R11 interfac of R1 and configure static groups")
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ intf_r1_i2 = topo["routers"]["r1"]["links"]["i2"]["interface"]
+ input_dict = {
+ "r1": {
+ "igmp": {
+ "interfaces": {
+ intf_r1_i1: {"igmp": {"static-group": IGMP_JOIN_RANGE_1}},
+ intf_r1_i2: {"igmp": {"static-group": IGMP_JOIN_RANGE_1}},
+ }
+ }
+ }
+ }
+
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure static RP for (225.1.1.1-5) as R2")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE,
+ }
+ ]
+ }
+ }
+ }
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("verify static group using show ip igmp static-group")
+ dut = "r1"
+ interfaces = [intf_r1_i1, intf_r1_i2]
+ for interface in interfaces:
+ result = verify_static_groups(tgen, dut, interface, IGMP_JOIN_RANGE_1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_mroute_with_igmp_static_groups_p0(request):
+ """
+ TC_2 Verify mroute and upstream populated with correct OIL/IIF with
+ static groups
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ app_helper.stop_all_hosts()
+ clear_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_pim_interface_traffic(tgen, topo)
+
+ # Verify BGP convergence
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(tc_name, result)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("shut down not required interfaces")
+ shutdown_interfaces(tgen)
+
+ step("Enable the PIM on all the interfaces of R1, R2, R3, R4")
+ step("configure BGP on R1, R2, R3, R4 and enable redistribute static/connected")
+ step("Enable the IGMP on R11 interfac of R1 and configure static groups")
+
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ intf_r1_i2 = topo["routers"]["r1"]["links"]["i2"]["interface"]
+ input_dict = {
+ "r1": {
+ "igmp": {
+ "interfaces": {
+ intf_r1_i1: {"igmp": {"version": "2", "static-group": IGMP_JOIN_RANGE_1}},
+ intf_r1_i2: {"igmp": {"version": "2", "static-group": IGMP_JOIN_RANGE_1}},
+ }
+ }
+ }
+ }
+
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure static RP for (225.1.1.1-5) as R2")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE,
+ }
+ ]
+ }
+ }
+ }
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("verify static group using show ip igmp static-group")
+ dut = "r1"
+ interfaces = [intf_r1_i1, intf_r1_i2]
+ for interface in interfaces:
+ result = verify_static_groups(tgen, dut, interface, IGMP_JOIN_RANGE_1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("verify RP-info populated in DUT")
+ dut = "r1"
+ rp_address = topo["routers"]["r2"]["links"]["lo"]["ipv4"].split("/")[0]
+ SOURCE = "Static"
+ oif = r1_r2_links
+ result = verify_pim_rp_info(tgen, topo, dut, GROUP_RANGE_1, oif, rp_address, SOURCE)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Send traffic from R4 to all the groups ( 225.1.1.1 to 225.1.1.5)")
+
+ result = app_helper.run_traffic("i6", IGMP_JOIN_RANGE_1, "r4")
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ source_i6 = topo["routers"]["i6"]["links"]["r4"]["ipv4"].split("/")[0]
+
+ r1_r2_r3 = r1_r2_links + r1_r3_links
+ input_dict_starg = [
+ {
+ "dut": "r1",
+ "src_address": "*",
+ "iif": r1_r2_r3,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r1",
+ "src_address": "*",
+ "iif": r1_r2_links,
+ "oil": topo["routers"]["r1"]["links"]["i2"]["interface"],
+ },
+ ]
+
+ input_dict_sg = [
+ {
+ "dut": "r1",
+ "src_address": source_i6,
+ "iif": r1_r2_r3,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r1",
+ "src_address": source_i6,
+ "iif": r1_r2_r3,
+ "oil": topo["routers"]["r1"]["links"]["i2"]["interface"],
+ },
+ ]
+
+ step("Verify mroutes and iff upstream for static groups")
+ for input_dict in [input_dict_starg, input_dict_sg]:
+ for data in input_dict:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step("Verify mroutes not created with local interface ip ")
+
+ input_dict_local_sg = [
+ {
+ "dut": "r1",
+ "src_address": intf_r1_i1,
+ "iif": r1_r2_r3,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r1",
+ "src_address": intf_r1_i2,
+ "iif": r1_r2_r3,
+ "oil": topo["routers"]["r1"]["links"]["i2"]["interface"],
+ },
+ ]
+
+ for data in input_dict_local_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {} : Failed Error: {}"
+ "sg created with local interface ip".format(tc_name, result)
+ )
+
+ result = verify_upstream_iif(
+ tgen,
+ data["dut"],
+ data["iif"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {} : Failed Error: {}"
+ "upstream created with local interface ip".format(tc_name, result)
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_igmp_static_group_with_reserved_address_p0(request):
+ """
+ TC_3 Verify static group not allowed for "224.0.0.0/24"
+ and non multicast group
+ """
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ app_helper.stop_all_hosts()
+ clear_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_pim_interface_traffic(tgen, topo)
+
+ # Verify BGP convergence
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(tc_name, result)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("shut down not required interfaces")
+ shutdown_interfaces(tgen)
+
+ step("Enable the PIM on all the interfaces of R1, R2, R3, R4")
+ step("configure BGP on R1, R2, R3, R4 and enable redistribute static/connected")
+ step("Enable the IGMP on R11 interface of R1 and configure static groups")
+
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ input_dict = {
+ "r1": {
+ "igmp": {
+ "interfaces": {
+ intf_r1_i1: {"igmp": {"version": "2", "statig-group": IGMP_JOIN_RANGE_2}}
+ }
+ }
+ }
+ }
+
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("verify static group using show ip igmp static-group")
+ dut = "r1"
+ interface = intf_r1_i1
+ result = verify_static_groups(
+ tgen, dut, interface, IGMP_JOIN_RANGE_1, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} :Failed \n Error: {}" "static group still present".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_remove_add_igmp_static_groups_p1(request):
+ """
+ TC_4 Verify static group removed from DUT while
+ removing "ip igmp static-group" CLI
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ app_helper.stop_all_hosts()
+ clear_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_pim_interface_traffic(tgen, topo)
+
+ # Verify BGP convergence
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(tc_name, result)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("shut down not required interfaces")
+ shutdown_interfaces(tgen)
+
+ step("Enable the PIM on all the interfaces of R1, R2, R3, R4")
+ step("configure BGP on R1, R2, R3, R4 and enable redistribute static/connected")
+ step("Enable the IGMP on R11 interfac of R1 and configure static groups")
+
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ intf_r1_i2 = topo["routers"]["r1"]["links"]["i2"]["interface"]
+ input_dict = {
+ "r1": {
+ "igmp": {
+ "interfaces": {
+ intf_r1_i1: {"igmp": {"version": "2", "static-group": IGMP_JOIN_RANGE_1}}
+ }
+ }
+ }
+ }
+
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure static RP for (225.1.1.1-5) as R2")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE,
+ }
+ ]
+ }
+ }
+ }
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("verify static group using show ip igmp static-group")
+ dut = "r1"
+ interface = intf_r1_i1
+ result = verify_static_groups(tgen, dut, interface, IGMP_JOIN_RANGE_1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("verify RP-info populated in DUT")
+ dut = "r1"
+ rp_address = topo["routers"]["r2"]["links"]["lo"]["ipv4"].split("/")[0]
+ SOURCE = "Static"
+ oif = r1_r2_links
+ result = verify_pim_rp_info(tgen, topo, dut, GROUP_RANGE_1, oif, rp_address, SOURCE)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Send traffic from R4 to all the groups ( 225.1.1.1 to 225.1.1.5)")
+
+ result = app_helper.run_traffic("i6", IGMP_JOIN_RANGE_1, "r4")
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ source_i6 = topo["routers"]["i6"]["links"]["r4"]["ipv4"].split("/")[0]
+
+ logger.info("waiting 30 sec for SPT switchover")
+
+ r1_r2_r3 = r1_r2_links + r1_r3_links
+ input_dict_starg = [
+ {
+ "dut": "r1",
+ "src_address": "*",
+ "iif": r1_r2_r3,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ }
+ ]
+
+ input_dict_sg = [
+ {
+ "dut": "r1",
+ "src_address": source_i6,
+ "iif": r1_r2_r3,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ }
+ ]
+
+ step("Verify mroutes and iff upstream for static groups")
+ for input_dict in [input_dict_starg, input_dict_sg]:
+ for data in input_dict:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step("Remove static group from DUT")
+ input_dict = {
+ "r1": {
+ "igmp": {
+ "interfaces": {
+ intf_r1_i1: {
+ "igmp": {
+ "static-group": IGMP_JOIN_RANGE_1,
+ "delete_attr": True,
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("verify static group removed using show ip igmp static-group")
+ dut = "r1"
+ interface = intf_r1_i1
+ result = verify_static_groups(
+ tgen, dut, interface, IGMP_JOIN_RANGE_1, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} :Failed \n Error: {}" "static group still present".format(
+ tc_name, result
+ )
+
+ step("Verify mroutes and iff upstream for static groups")
+ for input_dict in [input_dict_starg, input_dict_sg]:
+ for data in input_dict:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed Error: {}" "mroutes still present".format(
+ tc_name, result
+ )
+
+ result = verify_upstream_iif(
+ tgen,
+ data["dut"],
+ data["iif"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed Error: {}" "mroutes still present".format(
+ tc_name, result
+ )
+
+ step("Add static group on DUT again")
+ input_dict = {
+ "r1": {
+ "igmp": {
+ "interfaces": {
+ intf_r1_i1: {
+ "igmp": {
+ "static-group": IGMP_JOIN_RANGE_1,
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("verify static group using show ip igmp static-group")
+ dut = "r1"
+ interface = intf_r1_i1
+ result = verify_static_groups(tgen, dut, interface, IGMP_JOIN_RANGE_1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify mroutes and iff upstream for static groups")
+ for input_dict in [input_dict_starg, input_dict_sg]:
+ for data in input_dict:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/yang/frr-gmp.yang b/yang/frr-gmp.yang
index c8a05a2bdb..e6a1f7f640 100644
--- a/yang/frr-gmp.yang
+++ b/yang/frr-gmp.yang
@@ -147,10 +147,10 @@ module frr-gmp {
expected packet loss on a network.";
}
- list static-group {
+ list join-group {
key "group-addr source-addr";
description
- "A static multicast route, (*,G) or (S,G).
+ "A static GMP join, (*,G) or (S,G).
The version of IGMP must be 3 to support (S,G).";
leaf group-addr {
@@ -164,6 +164,23 @@ module frr-gmp {
"Multicast source address.";
}
}
+
+ list static-group {
+ key "group-addr source-addr";
+ description
+ "A static multicast group without GMP, (*,G) or (S,G).";
+
+ leaf group-addr {
+ type rt-types:ip-multicast-group-address;
+ description
+ "Multicast group address.";
+ }
+ leaf source-addr {
+ type inet:ip-address;
+ description
+ "Multicast source address.";
+ }
+ }
} // interface-config-attributes
/*