summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--bgpd/bgp_attr.c2
-rw-r--r--bgpd/bgp_bfd.c2
-rw-r--r--bgpd/bgp_bmp.c39
-rw-r--r--bgpd/bgp_network.c21
-rw-r--r--bgpd/bgp_route.c41
-rw-r--r--bgpd/bgp_rpki.c81
-rw-r--r--bgpd/bgp_updgrp.c12
-rw-r--r--bgpd/bgp_vty.c67
-rw-r--r--bgpd/bgpd.h2
-rw-r--r--doc/user/rpki.rst8
-rw-r--r--doc/user/zebra.rst6
-rw-r--r--lib/nexthop_group.c4
-rw-r--r--lib/prefix.h9
-rw-r--r--nhrpd/nhrp_nhs.c7
-rw-r--r--nhrpd/nhrp_peer.c9
-rw-r--r--nhrpd/nhrp_shortcut.c12
-rw-r--r--nhrpd/nhrp_vty.c14
-rw-r--r--ospfclient/README5
-rw-r--r--pimd/pim_register.c49
-rw-r--r--pimd/pim_register.h4
-rw-r--r--tests/topotests/bgp_default_originate/bgp_default_originate_topo1.json294
-rw-r--r--tests/topotests/bgp_default_originate/test_bgp_default_originate_topo1_1.py2537
-rw-r--r--tests/topotests/bgp_default_originate/test_bgp_default_originate_topo1_2.py2437
-rw-r--r--tests/topotests/lib/bgp.py958
-rw-r--r--tests/topotests/lib/pim.py79
-rw-r--r--tests/topotests/multicast_pim_uplink_topo1/multicast_pim_uplink_topo1.json226
-rw-r--r--tests/topotests/multicast_pim_uplink_topo1/test_multicast_pim_uplink_topo1.py3327
-rw-r--r--tests/topotests/ospf_basic_functionality/test_ospf_single_area.py87
-rw-r--r--zebra/interface.c7
-rw-r--r--zebra/zebra_nhg.c36
-rw-r--r--zebra/zebra_nhg.h29
-rw-r--r--zebra/zebra_rib.c6
-rw-r--r--zebra/zebra_router.c2
-rw-r--r--zebra/zebra_router.h3
-rw-r--r--zebra/zebra_vty.c31
35 files changed, 10164 insertions, 289 deletions
diff --git a/bgpd/bgp_attr.c b/bgpd/bgp_attr.c
index 6784e63206..dace3f2f12 100644
--- a/bgpd/bgp_attr.c
+++ b/bgpd/bgp_attr.c
@@ -1037,8 +1037,6 @@ struct attr *bgp_attr_aggregate_intern(
else
attr.aggregator_as = bgp->as;
attr.aggregator_addr = bgp->router_id;
- attr.label_index = BGP_INVALID_LABEL_INDEX;
- attr.label = MPLS_INVALID_LABEL;
/* Apply route-map */
if (aggregate->rmap.name) {
diff --git a/bgpd/bgp_bfd.c b/bgpd/bgp_bfd.c
index a859b7ad0f..d66b916b95 100644
--- a/bgpd/bgp_bfd.c
+++ b/bgpd/bgp_bfd.c
@@ -63,7 +63,7 @@ static void bfd_session_status_update(struct bfd_session_params *bsp,
if (CHECK_FLAG(peer->sflags, PEER_STATUS_NSF_MODE)
&& bfd_sess_cbit(bsp) && !bss->remote_cbit) {
if (BGP_DEBUG(bfd, BFD_LIB))
- zlog_info(
+ zlog_debug(
"%s BFD DOWN message ignored in the process of graceful restart when C bit is cleared",
peer->host);
return;
diff --git a/bgpd/bgp_bmp.c b/bgpd/bgp_bmp.c
index 25712908df..b561b50ff5 100644
--- a/bgpd/bgp_bmp.c
+++ b/bgpd/bgp_bmp.c
@@ -1889,7 +1889,6 @@ static void bmp_active_thread(struct thread *t)
struct bmp_active *ba = THREAD_ARG(t);
socklen_t slen;
int status, ret;
- char buf[SU_ADDRSTRLEN];
vrf_id_t vrf_id;
/* all 3 end up here, though only timer or read+write are active
@@ -1915,16 +1914,16 @@ static void bmp_active_thread(struct thread *t)
ret = getsockopt(ba->socket, SOL_SOCKET, SO_ERROR, (void *)&status,
&slen);
- sockunion2str(&ba->addrs[ba->addrpos], buf, sizeof(buf));
if (ret < 0 || status != 0) {
ba->last_err = strerror(status);
- zlog_warn("bmp[%s]: failed to connect to %s:%d: %s",
- ba->hostname, buf, ba->port, ba->last_err);
+ zlog_warn("bmp[%s]: failed to connect to %pSU:%d: %s",
+ ba->hostname, &ba->addrs[ba->addrpos], ba->port,
+ ba->last_err);
goto out_next;
}
- zlog_warn("bmp[%s]: outbound connection to %s:%d",
- ba->hostname, buf, ba->port);
+ zlog_warn("bmp[%s]: outbound connection to %pSU:%d", ba->hostname,
+ &ba->addrs[ba->addrpos], ba->port);
ba->bmp = bmp_open(ba->targets, ba->socket);
if (!ba->bmp)
@@ -2317,7 +2316,6 @@ DEFPY(show_bmp,
struct bmp_active *ba;
struct bmp *bmp;
struct ttable *tt;
- char buf[SU_ADDRSTRLEN];
char uptime[BGP_UPTIME_LEN];
char *out;
@@ -2364,9 +2362,8 @@ DEFPY(show_bmp,
vty_out(vty, " Listeners:\n");
frr_each (bmp_listeners, &bt->listeners, bl)
- vty_out(vty, " %s:%d\n",
- sockunion2str(&bl->addr, buf,
- SU_ADDRSTRLEN), bl->port);
+ vty_out(vty, " %pSU:%d\n", &bl->addr,
+ bl->port);
vty_out(vty, "\n Outbound connections:\n");
tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
@@ -2379,13 +2376,11 @@ DEFPY(show_bmp,
peer_uptime(ba->bmp->t_up.tv_sec,
uptime, sizeof(uptime),
false, NULL);
- ttable_add_row(tt, "%s:%d|Up|%s|%s|%s",
+ ttable_add_row(tt,
+ "%s:%d|Up|%s|%s|%pSU",
ba->hostname, ba->port,
ba->bmp->remote, uptime,
- sockunion2str(
- &ba->addrsrc,
- buf,
- SU_ADDRSTRLEN));
+ &ba->addrsrc);
continue;
}
@@ -2405,15 +2400,11 @@ DEFPY(show_bmp,
state_str = "Resolving";
}
- sockunion2str(&ba->addrsrc,
- buf,
- SU_ADDRSTRLEN);
- ttable_add_row(tt, "%s:%d|%s|%s|%s|%s",
+ ttable_add_row(tt, "%s:%d|%s|%s|%s|%pSU",
ba->hostname, ba->port,
state_str,
ba->last_err ? ba->last_err : "",
- uptime,
- buf);
+ uptime, &ba->addrsrc);
continue;
}
out = ttable_dump(tt, "\n");
@@ -2460,7 +2451,6 @@ static int bmp_config_write(struct bgp *bgp, struct vty *vty)
struct bmp_targets *bt;
struct bmp_listener *bl;
struct bmp_active *ba;
- char buf[SU_ADDRSTRLEN];
afi_t afi;
safi_t safi;
@@ -2497,9 +2487,8 @@ static int bmp_config_write(struct bgp *bgp, struct vty *vty)
afi_str, safi2str(safi));
}
frr_each (bmp_listeners, &bt->listeners, bl)
- vty_out(vty, " \n bmp listener %s port %d\n",
- sockunion2str(&bl->addr, buf, SU_ADDRSTRLEN),
- bl->port);
+ vty_out(vty, " \n bmp listener %pSU port %d\n",
+ &bl->addr, bl->port);
frr_each (bmp_actives, &bt->actives, ba) {
vty_out(vty, " bmp connect %s port %u min-retry %u max-retry %u",
diff --git a/bgpd/bgp_network.c b/bgpd/bgp_network.c
index 9417b7d59a..da4cc03b66 100644
--- a/bgpd/bgp_network.c
+++ b/bgpd/bgp_network.c
@@ -59,14 +59,10 @@ void bgp_dump_listener_info(struct vty *vty)
vty_out(vty, "Name fd Address\n");
vty_out(vty, "---------------------------\n");
- for (ALL_LIST_ELEMENTS_RO(bm->listen_sockets, node, listener)) {
- char buf[SU_ADDRSTRLEN];
-
- vty_out(vty, "%-16s %d %s\n",
+ for (ALL_LIST_ELEMENTS_RO(bm->listen_sockets, node, listener))
+ vty_out(vty, "%-16s %d %pSU\n",
listener->name ? listener->name : VRF_DEFAULT_NAME,
- listener->fd,
- sockunion2str(&listener->su, buf, sizeof(buf)));
- }
+ listener->fd, &listener->su);
}
/*
@@ -103,21 +99,18 @@ static int bgp_md5_set_socket(int socket, union sockunion *su,
#endif /* HAVE_TCP_MD5SIG */
if (ret < 0) {
- char sabuf[SU_ADDRSTRLEN];
- sockunion2str(su, sabuf, sizeof(sabuf));
-
switch (ret) {
case -2:
flog_warn(
EC_BGP_NO_TCP_MD5,
- "Unable to set TCP MD5 option on socket for peer %s (sock=%d): This platform does not support MD5 auth for prefixes",
- sabuf, socket);
+ "Unable to set TCP MD5 option on socket for peer %pSU (sock=%d): This platform does not support MD5 auth for prefixes",
+ su, socket);
break;
default:
flog_warn(
EC_BGP_NO_TCP_MD5,
- "Unable to set TCP MD5 option on socket for peer %s (sock=%d): %s",
- sabuf, socket, safe_strerror(en));
+ "Unable to set TCP MD5 option on socket for peer %pSU (sock=%d): %s",
+ su, socket, safe_strerror(en));
}
}
diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c
index 7b9e662b67..f94f5db514 100644
--- a/bgpd/bgp_route.c
+++ b/bgpd/bgp_route.c
@@ -9122,12 +9122,9 @@ void route_vty_out(struct vty *vty, const struct prefix *p,
else
vty_out(vty, "%7u ", attr->weight);
- if (json_paths) {
- char buf[BUFSIZ];
- json_object_string_add(
- json_path, "peerId",
- sockunion2str(&path->peer->su, buf, SU_ADDRSTRLEN));
- }
+ if (json_paths)
+ json_object_string_addf(json_path, "peerId", "%pSU",
+ &path->peer->su);
/* Print aspath */
if (attr->aspath) {
@@ -9719,7 +9716,6 @@ static void route_vty_out_advertised_to(struct vty *vty, struct peer *peer,
int *first, const char *header,
json_object *json_adv_to)
{
- char buf1[INET6_ADDRSTRLEN];
json_object *json_peer = NULL;
if (json_adv_to) {
@@ -9739,10 +9735,8 @@ static void route_vty_out_advertised_to(struct vty *vty, struct peer *peer,
json_object_object_add(json_adv_to, peer->conf_if,
json_peer);
else
- json_object_object_add(
- json_adv_to,
- sockunion2str(&peer->su, buf1, SU_ADDRSTRLEN),
- json_peer);
+ json_object_object_addf(json_adv_to, json_peer, "%pSU",
+ &peer->su);
} else {
if (*first) {
vty_out(vty, "%s", header);
@@ -9755,16 +9749,13 @@ static void route_vty_out_advertised_to(struct vty *vty, struct peer *peer,
vty_out(vty, " %s(%s)", peer->hostname,
peer->conf_if);
else
- vty_out(vty, " %s(%s)", peer->hostname,
- sockunion2str(&peer->su, buf1,
- SU_ADDRSTRLEN));
+ vty_out(vty, " %s(%pSU)", peer->hostname,
+ &peer->su);
} else {
if (peer->conf_if)
vty_out(vty, " %s", peer->conf_if);
else
- vty_out(vty, " %s",
- sockunion2str(&peer->su, buf1,
- SU_ADDRSTRLEN));
+ vty_out(vty, " %pSU", &peer->su);
}
}
}
@@ -10184,10 +10175,8 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct bgp_dest *bn,
else {
if (json_paths) {
- json_object_string_add(json_peer, "peerId",
- sockunion2str(&path->peer->su,
- buf,
- SU_ADDRSTRLEN));
+ json_object_string_addf(json_peer, "peerId", "%pSU",
+ &path->peer->su);
json_object_string_addf(json_peer, "routerId", "%pI4",
&path->peer->remote_id);
@@ -10221,10 +10210,8 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct bgp_dest *bn,
path->peer->hostname,
path->peer->host);
else
- vty_out(vty, " from %s",
- sockunion2str(&path->peer->su,
- buf,
- SU_ADDRSTRLEN));
+ vty_out(vty, " from %pSU",
+ &path->peer->su);
}
if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_ORIGINATOR_ID))
@@ -14963,10 +14950,8 @@ static void show_bgp_peerhash_entry(struct hash_bucket *bucket, void *arg)
{
struct vty *vty = arg;
struct peer *peer = bucket->data;
- char buf[SU_ADDRSTRLEN];
- vty_out(vty, "\tPeer: %s %s\n", peer->host,
- sockunion2str(&peer->su, buf, sizeof(buf)));
+ vty_out(vty, "\tPeer: %s %pSU\n", peer->host, &peer->su);
}
DEFUN (show_bgp_listeners,
diff --git a/bgpd/bgp_rpki.c b/bgpd/bgp_rpki.c
index 1c7dc7cb0a..de1c559641 100644
--- a/bgpd/bgp_rpki.c
+++ b/bgpd/bgp_rpki.c
@@ -114,12 +114,12 @@ static struct rtr_mgr_group *get_groups(void);
#if defined(FOUND_SSH)
static int add_ssh_cache(const char *host, const unsigned int port,
const char *username, const char *client_privkey_path,
- const char *client_pubkey_path,
const char *server_pubkey_path,
const uint8_t preference, const char *bindaddr);
#endif
static struct rtr_socket *create_rtr_socket(struct tr_socket *tr_socket);
static struct cache *find_cache(const uint8_t preference);
+static void rpki_delete_all_cache_nodes(void);
static int add_tcp_cache(const char *host, const char *port,
const uint8_t preference, const char *bindaddr);
static void print_record(const struct pfx_record *record, struct vty *vty,
@@ -276,6 +276,17 @@ static struct cache *find_cache(const uint8_t preference)
return NULL;
}
+static void rpki_delete_all_cache_nodes(void)
+{
+ struct listnode *cache_node, *cache_next;
+ struct cache *cache;
+
+ for (ALL_LIST_ELEMENTS(cache_list, cache_node, cache_next, cache)) {
+ rtr_mgr_remove_group(rtr_config, cache->preference);
+ listnode_delete(cache_list, cache);
+ }
+}
+
static void print_record(const struct pfx_record *record, struct vty *vty,
json_object *json)
{
@@ -916,7 +927,6 @@ static int add_tcp_cache(const char *host, const char *port,
#if defined(FOUND_SSH)
static int add_ssh_cache(const char *host, const unsigned int port,
const char *username, const char *client_privkey_path,
- const char *client_pubkey_path,
const char *server_pubkey_path,
const uint8_t preference, const char *bindaddr)
{
@@ -991,16 +1001,14 @@ static int config_write(struct vty *vty)
struct listnode *cache_node;
struct cache *cache;
- if (!listcount(cache_list))
- return 0;
-
if (rpki_debug)
vty_out(vty, "debug rpki\n");
vty_out(vty, "!\n");
vty_out(vty, "rpki\n");
- vty_out(vty, " rpki polling_period %d\n", polling_period);
+ if (polling_period != POLLING_PERIOD_DEFAULT)
+ vty_out(vty, " rpki polling_period %d\n", polling_period);
if (retry_interval != RETRY_INTERVAL_DEFAULT)
vty_out(vty, " rpki retry_interval %d\n", retry_interval);
if (expire_interval != EXPIRE_INTERVAL_DEFAULT)
@@ -1055,6 +1063,17 @@ DEFUN_NOSH (rpki,
return CMD_SUCCESS;
}
+DEFPY (no_rpki,
+ no_rpki_cmd,
+ "no rpki",
+ NO_STR
+ "Enable rpki and enter rpki configuration mode\n")
+{
+ rpki_delete_all_cache_nodes();
+ stop();
+ return CMD_SUCCESS;
+}
+
DEFUN (bgp_rpki_start,
bgp_rpki_start_cmd,
"rpki start",
@@ -1161,15 +1180,15 @@ DEFUN (no_rpki_retry_interval,
}
DEFPY(rpki_cache, rpki_cache_cmd,
- "rpki cache <A.B.C.D|WORD> <TCPPORT|(1-65535)$sshport SSH_UNAME SSH_PRIVKEY SSH_PUBKEY [SERVER_PUBKEY]> [source <A.B.C.D>$bindaddr] preference (1-255)",
+ "rpki cache <A.B.C.D|WORD> <TCPPORT|(1-65535)$sshport SSH_UNAME SSH_PRIVKEY [SERVER_PUBKEY]> [source <A.B.C.D>$bindaddr] preference (1-255)",
RPKI_OUTPUT_STRING
"Install a cache server to current group\n"
- "IP address of cache server\n Hostname of cache server\n"
+ "IP address of cache server\n"
+ "Hostname of cache server\n"
"TCP port number\n"
"SSH port number\n"
"SSH user name\n"
"Path to own SSH private key\n"
- "Path to own SSH public key\n"
"Path to Public key of cache server\n"
"Configure source IP address of RPKI connection\n"
"Define a Source IP Address\n"
@@ -1193,9 +1212,9 @@ DEFPY(rpki_cache, rpki_cache_cmd,
// use ssh connection
if (ssh_uname) {
#if defined(FOUND_SSH)
- return_value = add_ssh_cache(
- cache, sshport, ssh_uname, ssh_privkey, ssh_pubkey,
- server_pubkey, preference, bindaddr_str);
+ return_value =
+ add_ssh_cache(cache, sshport, ssh_uname, ssh_privkey,
+ server_pubkey, preference, bindaddr_str);
#else
return_value = SUCCESS;
vty_out(vty,
@@ -1216,20 +1235,27 @@ DEFPY(rpki_cache, rpki_cache_cmd,
DEFPY (no_rpki_cache,
no_rpki_cache_cmd,
- "no rpki cache <A.B.C.D|WORD> <TCPPORT|(1-65535)$sshport> preference (1-255)$preference",
+ "no rpki cache <A.B.C.D|WORD> <TCPPORT|(1-65535)$sshport SSH_UNAME SSH_PRIVKEY [SERVER_PUBKEY]> [source <A.B.C.D>$bindaddr] preference (1-255)",
NO_STR
RPKI_OUTPUT_STRING
- "Remove a cache server\n"
- "IP address of cache server\n Hostname of cache server\n"
+ "Install a cache server to current group\n"
+ "IP address of cache server\n"
+ "Hostname of cache server\n"
"TCP port number\n"
"SSH port number\n"
+ "SSH user name\n"
+ "Path to own SSH private key\n"
+ "Path to Public key of cache server\n"
+ "Configure source IP address of RPKI connection\n"
+ "Define a Source IP Address\n"
"Preference of the cache server\n"
"Preference value\n")
{
struct cache *cache_p = find_cache(preference);
if (!cache_p) {
- vty_out(vty, "Could not find cache %ld\n", preference);
+ vty_out(vty, "Could not find cache with preference %ld\n",
+ preference);
return CMD_WARNING;
}
@@ -1237,9 +1263,9 @@ DEFPY (no_rpki_cache,
stop();
} else if (is_running()) {
if (rtr_mgr_remove_group(rtr_config, preference) == RTR_ERROR) {
- vty_out(vty, "Could not remove cache %ld", preference);
-
- vty_out(vty, "\n");
+ vty_out(vty,
+ "Could not remove cache with preference %ld\n",
+ preference);
return CMD_WARNING;
}
}
@@ -1392,9 +1418,11 @@ DEFPY (show_rpki_cache_server,
for (ALL_LIST_ELEMENTS_RO(cache_list, cache_node, cache)) {
if (cache->type == TCP) {
if (!json) {
- vty_out(vty, "host: %s port: %s\n",
+ vty_out(vty,
+ "host: %s port: %s, preference: %hhu\n",
cache->tr_config.tcp_config->host,
- cache->tr_config.tcp_config->port);
+ cache->tr_config.tcp_config->port,
+ cache->preference);
} else {
json_server = json_object_new_object();
json_object_string_add(json_server, "mode",
@@ -1405,6 +1433,8 @@ DEFPY (show_rpki_cache_server,
json_object_string_add(
json_server, "port",
cache->tr_config.tcp_config->port);
+ json_object_int_add(json_server, "preference",
+ cache->preference);
json_object_array_add(json_servers,
json_server);
}
@@ -1413,14 +1443,15 @@ DEFPY (show_rpki_cache_server,
} else if (cache->type == SSH) {
if (!json) {
vty_out(vty,
- "host: %s port: %d username: %s server_hostkey_path: %s client_privkey_path: %s\n",
+ "host: %s port: %d username: %s server_hostkey_path: %s client_privkey_path: %s, preference: %hhu\n",
cache->tr_config.ssh_config->host,
cache->tr_config.ssh_config->port,
cache->tr_config.ssh_config->username,
cache->tr_config.ssh_config
->server_hostkey_path,
cache->tr_config.ssh_config
- ->client_privkey_path);
+ ->client_privkey_path,
+ cache->preference);
} else {
json_server = json_object_new_object();
json_object_string_add(json_server, "mode",
@@ -1442,6 +1473,8 @@ DEFPY (show_rpki_cache_server,
json_server, "clientPrivkeyPath",
cache->tr_config.ssh_config
->client_privkey_path);
+ json_object_int_add(json_server, "preference",
+ cache->preference);
json_object_array_add(json_servers,
json_server);
}
@@ -1662,6 +1695,8 @@ static void install_cli_commands(void)
install_default(RPKI_NODE);
install_element(CONFIG_NODE, &rpki_cmd);
install_element(ENABLE_NODE, &rpki_cmd);
+ install_element(CONFIG_NODE, &no_rpki_cmd);
+
install_element(ENABLE_NODE, &bgp_rpki_start_cmd);
install_element(ENABLE_NODE, &bgp_rpki_stop_cmd);
diff --git a/bgpd/bgp_updgrp.c b/bgpd/bgp_updgrp.c
index c5d049f363..443c5d8a13 100644
--- a/bgpd/bgp_updgrp.c
+++ b/bgpd/bgp_updgrp.c
@@ -414,8 +414,10 @@ static unsigned int updgrp_hash_key_make(const void *p)
if (bgp_debug_neighbor_events(peer)) {
zlog_debug(
- "%pBP Update Group Hash: sort: %d UpdGrpFlags: %u UpdGrpAFFlags: %u",
- peer, peer->sort, peer->flags & PEER_UPDGRP_FLAGS,
+ "%pBP Update Group Hash: sort: %d UpdGrpFlags: %" PRIu64
+ " UpdGrpAFFlags: %u",
+ peer, peer->sort,
+ (uint64_t)(peer->flags & PEER_UPDGRP_FLAGS),
flags & PEER_UPDGRP_AF_FLAGS);
zlog_debug(
"%pBP Update Group Hash: addpath: %u UpdGrpCapFlag: %u UpdGrpCapAFFlag: %u route_adv: %u change local as: %u",
@@ -455,8 +457,10 @@ static unsigned int updgrp_hash_key_make(const void *p)
peer->shared_network &&
peer_afi_active_nego(peer, AFI_IP6));
zlog_debug(
- "%pBP Update Group Hash: Lonesoul: %u ORF prefix: %u ORF old: %u max prefix out: %u",
- peer, CHECK_FLAG(peer->flags, PEER_FLAG_LONESOUL),
+ "%pBP Update Group Hash: Lonesoul: %" PRIu64
+ " ORF prefix: %u ORF old: %u max prefix out: %u",
+ peer,
+ (uint64_t)CHECK_FLAG(peer->flags, PEER_FLAG_LONESOUL),
CHECK_FLAG(peer->af_cap[afi][safi],
PEER_CAP_ORF_PREFIX_SM_RCV),
CHECK_FLAG(peer->af_cap[afi][safi],
diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c
index 95530c8cfe..a4fc27805c 100644
--- a/bgpd/bgp_vty.c
+++ b/bgpd/bgp_vty.c
@@ -11679,7 +11679,6 @@ static void bgp_show_neighbor_graceful_restart_time(struct vty *vty,
static void bgp_show_peer_gr_status(struct vty *vty, struct peer *p,
bool use_json, json_object *json)
{
- char buf[SU_ADDRSTRLEN] = {0};
char dn_flag[2] = {0};
/* '*' + v6 address of neighbor */
char neighborAddr[INET6_ADDRSTRLEN + 1] = {0};
@@ -11689,18 +11688,11 @@ static void bgp_show_peer_gr_status(struct vty *vty, struct peer *p,
if (p->conf_if) {
if (use_json)
- json_object_string_add(
- json, "neighborAddr",
- BGP_PEER_SU_UNSPEC(p)
- ? "none"
- : sockunion2str(&p->su, buf,
- SU_ADDRSTRLEN));
+ json_object_string_addf(json, "neighborAddr", "%pSU",
+ &p->su);
else
- vty_out(vty, "BGP neighbor on %s: %s\n", p->conf_if,
- BGP_PEER_SU_UNSPEC(p)
- ? "none"
- : sockunion2str(&p->su, buf,
- SU_ADDRSTRLEN));
+ vty_out(vty, "BGP neighbor on %s: %pSU\n", p->conf_if,
+ &p->su);
} else {
snprintf(neighborAddr, sizeof(neighborAddr), "%s%s", dn_flag,
p->host);
@@ -12368,7 +12360,7 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
json_object *json)
{
struct bgp *bgp;
- char buf1[PREFIX2STR_BUFFER], buf[SU_ADDRSTRLEN];
+ char buf1[PREFIX2STR_BUFFER];
char timebuf[BGP_UPTIME_LEN];
char dn_flag[2];
afi_t afi;
@@ -12390,11 +12382,8 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
if (!use_json) {
if (p->conf_if) /* Configured interface name. */
- vty_out(vty, "BGP neighbor on %s: %s, ", p->conf_if,
- BGP_PEER_SU_UNSPEC(p)
- ? "None"
- : sockunion2str(&p->su, buf,
- SU_ADDRSTRLEN));
+ vty_out(vty, "BGP neighbor on %s: %pSU, ", p->conf_if,
+ &p->su);
else /* Configured IP address. */
vty_out(vty, "BGP neighbor is %s%s, ", dn_flag,
p->host);
@@ -12405,9 +12394,8 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
json_object_string_add(json_neigh, "bgpNeighborAddr",
"none");
else if (p->conf_if && !BGP_PEER_SU_UNSPEC(p))
- json_object_string_add(
- json_neigh, "bgpNeighborAddr",
- sockunion2str(&p->su, buf, SU_ADDRSTRLEN));
+ json_object_string_addf(json_neigh, "bgpNeighborAddr",
+ "%pSU", &p->su);
json_object_int_add(json_neigh, "remoteAs", p->as);
@@ -13814,10 +13802,9 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
"updateSource",
p->update_if);
else if (p->update_source)
- json_object_string_add(
- json_neigh, "updateSource",
- sockunion2str(p->update_source, buf1,
- SU_ADDRSTRLEN));
+ json_object_string_addf(json_neigh,
+ "updateSource", "%pSU",
+ p->update_source);
}
} else {
/* advertisement-interval */
@@ -13831,9 +13818,7 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
if (p->update_if)
vty_out(vty, "%s", p->update_if);
else if (p->update_source)
- vty_out(vty, "%s",
- sockunion2str(p->update_source, buf1,
- SU_ADDRSTRLEN));
+ vty_out(vty, "%pSU", p->update_source);
vty_out(vty, "\n");
}
@@ -13989,15 +13974,13 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
/* Local address. */
if (p->su_local) {
if (use_json) {
- json_object_string_add(json_neigh, "hostLocal",
- sockunion2str(p->su_local, buf1,
- SU_ADDRSTRLEN));
+ json_object_string_addf(json_neigh, "hostLocal", "%pSU",
+ p->su_local);
json_object_int_add(json_neigh, "portLocal",
ntohs(p->su_local->sin.sin_port));
} else
- vty_out(vty, "Local host: %s, Local port: %d\n",
- sockunion2str(p->su_local, buf1, SU_ADDRSTRLEN),
- ntohs(p->su_local->sin.sin_port));
+ vty_out(vty, "Local host: %pSU, Local port: %d\n",
+ p->su_local, ntohs(p->su_local->sin.sin_port));
} else {
if (use_json) {
json_object_string_add(json_neigh, "hostLocal",
@@ -14009,15 +13992,13 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
/* Remote address. */
if (p->su_remote) {
if (use_json) {
- json_object_string_add(json_neigh, "hostForeign",
- sockunion2str(p->su_remote, buf1,
- SU_ADDRSTRLEN));
+ json_object_string_addf(json_neigh, "hostForeign",
+ "%pSU", p->su_remote);
json_object_int_add(json_neigh, "portForeign",
ntohs(p->su_remote->sin.sin_port));
} else
- vty_out(vty, "Foreign host: %s, Foreign port: %d\n",
- sockunion2str(p->su_remote, buf1,
- SU_ADDRSTRLEN),
+ vty_out(vty, "Foreign host: %pSU, Foreign port: %d\n",
+ p->su_remote,
ntohs(p->su_remote->sin.sin_port));
} else {
if (use_json) {
@@ -16506,7 +16487,6 @@ static void bgp_config_write_peer_global(struct vty *vty, struct bgp *bgp,
struct peer *peer)
{
struct peer *g_peer = NULL;
- char buf[SU_ADDRSTRLEN];
char *addr;
int if_pg_printed = false;
int if_ras_printed = false;
@@ -16699,9 +16679,8 @@ static void bgp_config_write_peer_global(struct vty *vty, struct bgp *bgp,
/* update-source */
if (peergroup_flag_check(peer, PEER_FLAG_UPDATE_SOURCE)) {
if (peer->update_source)
- vty_out(vty, " neighbor %s update-source %s\n", addr,
- sockunion2str(peer->update_source, buf,
- SU_ADDRSTRLEN));
+ vty_out(vty, " neighbor %s update-source %pSU\n", addr,
+ peer->update_source);
else if (peer->update_if)
vty_out(vty, " neighbor %s update-source %s\n", addr,
peer->update_if);
diff --git a/bgpd/bgpd.h b/bgpd/bgpd.h
index 98e59bcc85..a6a13a9a6e 100644
--- a/bgpd/bgpd.h
+++ b/bgpd/bgpd.h
@@ -1281,7 +1281,7 @@ struct peer {
* peer-group, the peer-specific overrides (see flags_override and
* flags_invert) must be respected.
*/
- uint32_t flags;
+ uint64_t flags;
#define PEER_FLAG_PASSIVE (1U << 0) /* passive mode */
#define PEER_FLAG_SHUTDOWN (1U << 1) /* shutdown */
#define PEER_FLAG_DONT_CAPABILITY (1U << 2) /* dont-capability */
diff --git a/doc/user/rpki.rst b/doc/user/rpki.rst
index e5bd59d9cb..cc0e7f70c6 100644
--- a/doc/user/rpki.rst
+++ b/doc/user/rpki.rst
@@ -120,7 +120,7 @@ The following commands are independent of a specific cache server.
The default value is 600 seconds.
-.. clicmd:: rpki cache (A.B.C.D|WORD) PORT [SSH_USERNAME] [SSH_PRIVKEY_PATH] [SSH_PUBKEY_PATH] [KNOWN_HOSTS_PATH] [source A.B.C.D] PREFERENCE
+.. clicmd:: rpki cache (A.B.C.D|WORD) PORT [SSH_USERNAME] [SSH_PRIVKEY_PATH] [KNOWN_HOSTS_PATH] [source A.B.C.D] preference (1-255)
Add a cache server to the socket. By default, the connection between router
@@ -137,15 +137,9 @@ The following commands are independent of a specific cache server.
SSH_USERNAME
SSH username to establish an SSH connection to the cache server.
-
SSH_PRIVKEY_PATH
Local path that includes the private key file of the router.
-
- SSH_PUBKEY_PATH
- Local path that includes the public key file of the router.
-
-
KNOWN_HOSTS_PATH
Local path that includes the known hosts file. The default value depends
on the configuration of the operating system environment, usually
diff --git a/doc/user/zebra.rst b/doc/user/zebra.rst
index 29f305520a..eca67c0609 100644
--- a/doc/user/zebra.rst
+++ b/doc/user/zebra.rst
@@ -273,6 +273,12 @@ Nexthop tracking doesn't resolve nexthops via the default route by default.
Allowing this might be useful when e.g. you want to allow BGP to peer across
the default route.
+.. clicmd:: zebra nexthop-group keep (1-3600)
+
+ Set the time that zebra will keep a created and installed nexthop group
+ before removing it from the system if the nexthop group is no longer
+ being used. The default time is 180 seconds.
+
.. clicmd:: ip nht resolve-via-default
Allow IPv4 nexthop tracking to resolve via the default route. This parameter
diff --git a/lib/nexthop_group.c b/lib/nexthop_group.c
index e8c678ad71..7284d6cea6 100644
--- a/lib/nexthop_group.c
+++ b/lib/nexthop_group.c
@@ -1094,12 +1094,10 @@ void nexthop_group_json_nexthop(json_object *j, const struct nexthop *nh)
static void nexthop_group_write_nexthop_internal(struct vty *vty,
const struct nexthop_hold *nh)
{
- char buf[100];
-
vty_out(vty, "nexthop");
if (nh->addr)
- vty_out(vty, " %s", sockunion2str(nh->addr, buf, sizeof(buf)));
+ vty_out(vty, " %pSU", nh->addr);
if (nh->intf)
vty_out(vty, " %s", nh->intf);
diff --git a/lib/prefix.h b/lib/prefix.h
index 42394ec61c..f9eef28a0b 100644
--- a/lib/prefix.h
+++ b/lib/prefix.h
@@ -594,6 +594,15 @@ static inline int is_default_host_route(const struct prefix *p)
return 0;
}
+static inline bool is_ipv6_global_unicast(const struct in6_addr *p)
+{
+ if (IN6_IS_ADDR_UNSPECIFIED(p) || IN6_IS_ADDR_LOOPBACK(p) ||
+ IN6_IS_ADDR_LINKLOCAL(p) || IN6_IS_ADDR_MULTICAST(p))
+ return false;
+
+ return true;
+}
+
/* IPv6 scope values, usable for IPv4 too (cf. below) */
/* clang-format off */
enum {
diff --git a/nhrpd/nhrp_nhs.c b/nhrpd/nhrp_nhs.c
index 63eaf1e394..03b4b533bb 100644
--- a/nhrpd/nhrp_nhs.c
+++ b/nhrpd/nhrp_nhs.c
@@ -163,7 +163,6 @@ static void nhrp_reg_send_req(struct thread *t)
{
struct nhrp_registration *r = THREAD_ARG(t);
struct nhrp_nhs *nhs = r->nhs;
- char buf1[SU_ADDRSTRLEN], buf2[SU_ADDRSTRLEN];
struct interface *ifp = nhs->ifp;
struct nhrp_interface *nifp = ifp->info;
struct nhrp_afi_data *if_ad = &nifp->afi[nhs->afi];
@@ -189,10 +188,8 @@ static void nhrp_reg_send_req(struct thread *t)
if (sockunion_family(dst_proto) == AF_UNSPEC)
dst_proto = &if_ad->addr;
- sockunion2str(&if_ad->addr, buf1, sizeof(buf1));
- sockunion2str(dst_proto, buf2, sizeof(buf2));
- debugf(NHRP_DEBUG_COMMON, "NHS: Register %s -> %s (timeout %d)", buf1,
- buf2, r->timeout);
+ debugf(NHRP_DEBUG_COMMON, "NHS: Register %pSU -> %pSU (timeout %d)",
+ &if_ad->addr, dst_proto, r->timeout);
/* No protocol address configured for tunnel interface */
if (sockunion_family(&if_ad->addr) == AF_UNSPEC)
diff --git a/nhrpd/nhrp_peer.c b/nhrpd/nhrp_peer.c
index 67d12cbcf3..4b03032566 100644
--- a/nhrpd/nhrp_peer.c
+++ b/nhrpd/nhrp_peer.c
@@ -1083,7 +1083,6 @@ err:
static void nhrp_packet_debug(struct zbuf *zb, const char *dir)
{
- char buf[2][SU_ADDRSTRLEN];
union sockunion src_nbma, src_proto, dst_proto;
struct nhrp_packet_header *hdr;
struct zbuf zhdr;
@@ -1095,14 +1094,12 @@ static void nhrp_packet_debug(struct zbuf *zb, const char *dir)
zbuf_init(&zhdr, zb->buf, zb->tail - zb->buf, zb->tail - zb->buf);
hdr = nhrp_packet_pull(&zhdr, &src_nbma, &src_proto, &dst_proto);
- sockunion2str(&src_proto, buf[0], sizeof(buf[0]));
- sockunion2str(&dst_proto, buf[1], sizeof(buf[1]));
-
reply = packet_types[hdr->type].type == PACKET_REPLY;
- debugf(NHRP_DEBUG_COMMON, "%s %s(%d) %s -> %s", dir,
+ debugf(NHRP_DEBUG_COMMON, "%s %s(%d) %pSU -> %pSU", dir,
(packet_types[hdr->type].name ? packet_types[hdr->type].name
: "Unknown"),
- hdr->type, reply ? buf[1] : buf[0], reply ? buf[0] : buf[1]);
+ hdr->type, reply ? &dst_proto : &src_proto,
+ reply ? &src_proto : &dst_proto);
}
static int proto2afi(uint16_t proto)
diff --git a/nhrpd/nhrp_shortcut.c b/nhrpd/nhrp_shortcut.c
index 71b6dd8702..4975aca006 100644
--- a/nhrpd/nhrp_shortcut.c
+++ b/nhrpd/nhrp_shortcut.c
@@ -48,23 +48,17 @@ static void nhrp_shortcut_do_expire(struct thread *t)
static void nhrp_shortcut_cache_notify(struct notifier_block *n,
unsigned long cmd)
{
- char buf2[PREFIX_STRLEN];
-
struct nhrp_shortcut *s =
container_of(n, struct nhrp_shortcut, cache_notifier);
struct nhrp_cache *c = s->cache;
- if (c)
- sockunion2str(&c->remote_addr, buf2, sizeof(buf2));
- else
- snprintf(buf2, sizeof(buf2), "(unspec)");
switch (cmd) {
case NOTIFY_CACHE_UP:
if (!s->route_installed) {
debugf(NHRP_DEBUG_ROUTE,
- "Shortcut: route install %pFX nh %s dev %s",
- s->p, buf2, c && c->ifp ?
- c->ifp->name : "<unk>");
+ "Shortcut: route install %pFX nh %pSU dev %s",
+ s->p, &c->remote_addr,
+ c && c->ifp ? c->ifp->name : "<unk>");
nhrp_route_announce(1, s->type, s->p, c ? c->ifp : NULL,
c ? &c->remote_addr : NULL, 0);
diff --git a/nhrpd/nhrp_vty.c b/nhrpd/nhrp_vty.c
index 4db2d8b89e..3a8baa2342 100644
--- a/nhrpd/nhrp_vty.c
+++ b/nhrpd/nhrp_vty.c
@@ -1001,18 +1001,15 @@ static void show_dmvpn_entry(struct nhrp_vc *vc, void *ctx)
{
struct dmvpn_cfg *ctxt = ctx;
struct vty *vty;
- char buf[2][SU_ADDRSTRLEN];
struct json_object *json = NULL;
if (!ctxt || !ctxt->vty)
return;
vty = ctxt->vty;
- sockunion2str(&vc->local.nbma, buf[0], sizeof(buf[0]));
- sockunion2str(&vc->remote.nbma, buf[1], sizeof(buf[1]));
if (ctxt->json) {
json = json_object_new_object();
- json_object_string_add(json, "src", buf[0]);
- json_object_string_add(json, "dst", buf[1]);
+ json_object_string_addf(json, "src", "%pSU", &vc->local.nbma);
+ json_object_string_addf(json, "dst", "%pSU", &vc->remote.nbma);
if (notifier_active(&vc->notifier_list))
json_object_boolean_true_add(json, "notifierActive");
@@ -1023,9 +1020,10 @@ static void show_dmvpn_entry(struct nhrp_vc *vc, void *ctx)
json_object_string_add(json, "identity", vc->remote.id);
json_object_array_add(ctxt->json, json);
} else {
- vty_out(vty, "%-24s %-24s %c %-4d %-24s\n",
- buf[0], buf[1], notifier_active(&vc->notifier_list) ?
- 'n' : ' ', vc->ipsec, vc->remote.id);
+ vty_out(vty, "%-24pSU %-24pSU %c %-4d %-24s\n",
+ &vc->local.nbma, &vc->remote.nbma,
+ notifier_active(&vc->notifier_list) ? 'n' : ' ',
+ vc->ipsec, vc->remote.id);
}
}
diff --git a/ospfclient/README b/ospfclient/README
index 894cd783ca..5f6d050831 100644
--- a/ospfclient/README
+++ b/ospfclient/README
@@ -1,4 +1,3 @@
-For more information about this software check out:
-
-http://www.tik.ee.ethz.ch/~keller/ospfapi/
+For more information checkout the developer guide at:
+https://docs.frrouting.org/projects/dev-guide/en/latest/ospf-api.html
diff --git a/pimd/pim_register.c b/pimd/pim_register.c
index 1945d99d21..8403340d86 100644
--- a/pimd/pim_register.c
+++ b/pimd/pim_register.c
@@ -234,6 +234,48 @@ int pim_register_stop_recv(struct interface *ifp, uint8_t *buf, int buf_size)
return 0;
}
+#if PIM_IPV == 6
+struct in6_addr pim_register_get_unicast_v6_addr(struct pim_interface *p_ifp)
+{
+ struct listnode *node;
+ struct listnode *nextnode;
+ struct pim_secondary_addr *sec_addr;
+ struct pim_interface *pim_ifp;
+ struct interface *ifp;
+ struct pim_instance *pim = p_ifp->pim;
+
+ /* Trying to get the unicast address from the RPF interface first */
+ for (ALL_LIST_ELEMENTS(p_ifp->sec_addr_list, node, nextnode,
+ sec_addr)) {
+ if (!is_ipv6_global_unicast(&sec_addr->addr.u.prefix6))
+ continue;
+
+ return sec_addr->addr.u.prefix6;
+ }
+
+ /* Loop through all the pim interface and try to return a global
+ * unicast ipv6 address
+ */
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ pim_ifp = ifp->info;
+
+ if (!pim_ifp)
+ continue;
+
+ for (ALL_LIST_ELEMENTS(pim_ifp->sec_addr_list, node, nextnode,
+ sec_addr)) {
+ if (!is_ipv6_global_unicast(&sec_addr->addr.u.prefix6))
+ continue;
+
+ return sec_addr->addr.u.prefix6;
+ }
+ }
+
+ zlog_warn("No global address found for use to send register message");
+ return PIMADDR_ANY;
+}
+#endif
+
void pim_register_send(const uint8_t *buf, int buf_size, pim_addr src,
struct pim_rpf *rpg, int null_register,
struct pim_upstream *up)
@@ -278,6 +320,13 @@ void pim_register_send(const uint8_t *buf, int buf_size, pim_addr src,
memcpy(b1, (const unsigned char *)buf, buf_size);
+#if PIM_IPV == 6
+ /* While sending Register message to RP, we cannot use link-local
+ * address therefore using unicast ipv6 address here, choosing it
+ * from the RPF Interface
+ */
+ src = pim_register_get_unicast_v6_addr(pinfo);
+#endif
pim_msg_build_header(src, dst, buffer, buf_size + PIM_MSG_REGISTER_LEN,
PIM_MSG_TYPE_REGISTER, false);
diff --git a/pimd/pim_register.h b/pimd/pim_register.h
index 79c64d995f..ddb34921ae 100644
--- a/pimd/pim_register.h
+++ b/pimd/pim_register.h
@@ -34,7 +34,9 @@ int pim_register_stop_recv(struct interface *ifp, uint8_t *buf, int buf_size);
int pim_register_recv(struct interface *ifp, pim_addr dest_addr,
pim_addr src_addr, uint8_t *tlv_buf, int tlv_buf_size);
-
+#if PIM_IPV == 6
+struct in6_addr pim_register_get_unicast_v6_addr(struct pim_interface *p_ifp);
+#endif
void pim_register_send(const uint8_t *buf, int buf_size, pim_addr src,
struct pim_rpf *rpg, int null_register,
struct pim_upstream *up);
diff --git a/tests/topotests/bgp_default_originate/bgp_default_originate_topo1.json b/tests/topotests/bgp_default_originate/bgp_default_originate_topo1.json
new file mode 100644
index 0000000000..5fae34dd76
--- /dev/null
+++ b/tests/topotests/bgp_default_originate/bgp_default_originate_topo1.json
@@ -0,0 +1,294 @@
+{
+ "address_types": [
+ "ipv4",
+ "ipv6"
+ ],
+ "ipv4base": "192.168.0.0",
+ "ipv4mask": 3024,
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {
+ "ipv4": "192.168.0.0",
+ "v4mask": 24,
+ "ipv6": "fd00::",
+ "v6mask": 64
+ },
+ "lo_prefix": {
+ "ipv4": "1.0.",
+ "v4mask": 32,
+ "ipv6": "2001:db8:f::",
+ "v6mask": 128
+ },
+ "routers": {
+ "r0": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r0": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r0": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r1": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r0": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "200",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r0": {
+ "dest_link": {
+ "r1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ },
+ "r2": {
+ "dest_link": {
+ "r1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r0": {
+ "dest_link": {
+ "r1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ },
+ "r2": {
+ "dest_link": {
+ "r1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "300",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {"keepalivetimer": 1,
+ "holddowntimer": 3}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2": {"keepalivetimer": 1,
+ "holddowntimer": 3}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {"keepalivetimer": 1,
+ "holddowntimer": 3}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2": {"keepalivetimer": 1,
+ "holddowntimer": 3}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r4": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "400",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r3": {"keepalivetimer": 1,
+ "holddowntimer": 3}
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3": {"keepalivetimer": 1,
+ "holddowntimer": 3}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r3": {"keepalivetimer": 1,
+ "holddowntimer": 3}
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3": {"keepalivetimer": 1,
+ "holddowntimer": 3}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r4": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r3": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "500",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4": {"keepalivetimer": 1,
+ "holddowntimer": 3}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4": {"keepalivetimer": 1,
+ "holddowntimer": 3}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/tests/topotests/bgp_default_originate/test_bgp_default_originate_topo1_1.py b/tests/topotests/bgp_default_originate/test_bgp_default_originate_topo1_1.py
new file mode 100644
index 0000000000..ee71ae16e0
--- /dev/null
+++ b/tests/topotests/bgp_default_originate/test_bgp_default_originate_topo1_1.py
@@ -0,0 +1,2537 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2022 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc. ("NetDEF")
+# in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+# Shreenidhi A R <rshreenidhi@vmware.com>
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+"""
+Following tests are covered.
+1. Verify BGP default-originate route with IBGP peer
+2. Verify BGP default-originate route with EBGP peer
+3. Verify BGP default route when default-originate configured with route-map over IBGP peer
+4. Verify BGP default route when default-originate configured with route-map over EBGP peer"
+
+"""
+import os
+import sys
+import time
+import pytest
+from time import sleep
+from copy import deepcopy
+from lib.topolog import logger
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+from lib.topojson import build_config_from_json
+from lib.topolog import logger
+
+from lib.bgp import (
+ verify_bgp_convergence,
+ verify_graceful_restart,
+ create_router_bgp,
+ verify_router_id,
+ modify_as_number,
+ verify_as_numbers,
+ clear_bgp_and_verify,
+ clear_bgp,
+ verify_bgp_rib,
+ get_prefix_count_route,
+ get_dut_as_number,
+ verify_rib_default_route,
+ verify_fib_default_route,
+ verify_bgp_advertised_routes_from_neighbor,
+ verify_bgp_received_routes_from_neighbor,
+)
+from lib.common_config import (
+ interface_status,
+ verify_prefix_lists,
+ verify_fib_routes,
+ kill_router_daemons,
+ start_router_daemons,
+ shutdown_bringup_interface,
+ step,
+ required_linux_kernel_version,
+ stop_router,
+ start_router,
+ create_route_maps,
+ create_prefix_lists,
+ get_frr_ipv6_linklocal,
+ start_topology,
+ write_test_header,
+ check_address_types,
+ write_test_footer,
+ reset_config_on_routers,
+ create_static_routes,
+ check_router_status,
+ delete_route_maps,
+)
+
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# Required to instantiate the topology builder class.
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+
+# Global variables
+topo = None
+KEEPALIVETIMER = 1
+HOLDDOWNTIMER = 3
+# Global variables
+NETWORK1_1 = {"ipv4": "1.1.1.1/32", "ipv6": "1::1/128"}
+NETWORK1_2 = {"ipv4": "1.1.1.2/32", "ipv6": "1::2/128"}
+NETWORK2_1 = {"ipv4": "2.1.1.1/32", "ipv6": "2::1/128"}
+NETWORK2_2 = {"ipv4": "2.1.1.2/32", "ipv6": "2::2/128"}
+NETWORK3_1 = {"ipv4": "3.1.1.1/32", "ipv6": "3::1/128"}
+NETWORK3_2 = {"ipv4": "3.1.1.2/32", "ipv6": "3::2/128"}
+NETWORK4_1 = {"ipv4": "4.1.1.1/32", "ipv6": "4::1/128"}
+NETWORK4_2 = {"ipv4": "4.1.1.2/32", "ipv6": "4::2/128"}
+NETWORK5_1 = {"ipv4": "5.1.1.1/32", "ipv6": "5::1/128"}
+NETWORK5_2 = {"ipv4": "5.1.1.2/32", "ipv6": "5::2/128"}
+DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+NEXT_HOP_IP = {"ipv4": "Null0", "ipv6": "Null0"}
+
+IPV4_RM = "RMVIPV4"
+IPV6_RM = "RMVIPV6"
+
+IPV4_RM1 = "RMVIPV41"
+IPV6_RM1 = "RMVIPV61"
+
+IPV4_RM2 = "RMVIPV42"
+IPV6_RM2 = "RMVIPV62"
+
+IPV4_PL_1 = "PV41"
+IPV4_PL_2 = "PV42"
+
+IPV6_PL_1 = "PV61"
+IPV6_PL_2 = "PV62"
+
+
+r1_ipv4_loopback = "1.0.1.0/24"
+r2_ipv4_loopback = "1.0.2.0/24"
+r3_ipv4_loopback = "1.0.3.0/24"
+r4_ipv4_loopback = "1.0.4.0/24"
+r1_ipv6_loopback = "2001:db8:f::1:0/120"
+r2_ipv6_loopback = "2001:db8:f::2:0/120"
+r3_ipv6_loopback = "2001:db8:f::3:0/120"
+r4_ipv6_loopback = "2001:db8:f::4:0/120"
+
+r0_connected_address_ipv4 = "192.168.0.0/24"
+r0_connected_address_ipv6 = "fd00::/64"
+r1_connected_address_ipv4 = "192.168.1.0/24"
+r1_connected_address_ipv6 = "fd00:0:0:1::/64"
+r3_connected_address_ipv4 = "192.168.2.0/24"
+r3_connected_address_ipv6 = "fd00:0:0:2::/64"
+r4_connected_address_ipv4 = "192.168.3.0/24"
+r4_connected_address_ipv6 = "fd00:0:0:3::/64"
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ # Required linux kernel version for this suite to run.
+ result = required_linux_kernel_version("4.15")
+ if result is not True:
+ pytest.skip("Kernel requirements are not met")
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ json_file = "{}/bgp_default_originate_topo1.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
+ # ... and here it calls Mininet initialization functions.
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start daemons and then start routers
+ start_topology(tgen)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ global ADDR_TYPES
+ global BGP_CONVERGENCE
+ global DEFAULT_ROUTES
+ global DEFAULT_ROUTE_NXT_HOP_R1, DEFAULT_ROUTE_NXT_HOP_R3
+ global R0_NETWORK_LOOPBACK, R0_NETWORK_LOOPBACK_NXTHOP, R1_NETWORK_LOOPBACK, R1_NETWORK_LOOPBACK_NXTHOP
+ global R0_NETWORK_CONNECTED, R0_NETWORK_CONNECTED_NXTHOP, R1_NETWORK_CONNECTED, R1_NETWORK_CONNECTED_NXTHOP
+ global R4_NETWORK_LOOPBACK, R4_NETWORK_LOOPBACK_NXTHOP, R3_NETWORK_LOOPBACK, R3_NETWORK_LOOPBACK_NXTHOP
+ global R4_NETWORK_CONNECTED, R4_NETWORK_CONNECTED_NXTHOP, R3_NETWORK_CONNECTED, R3_NETWORK_CONNECTED_NXTHOP
+
+ ADDR_TYPES = check_address_types()
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format(
+ BGP_CONVERGENCE
+ )
+ # There are the global varibles used through out the file these are acheived only after building the topology.
+
+ r0_loopback_address_ipv4 = topo["routers"]["r0"]["links"]["lo"]["ipv4"]
+ r0_loopback_address_ipv4_nxt_hop = topo["routers"]["r0"]["links"]["r1"][
+ "ipv4"
+ ].split("/")[0]
+ r0_loopback_address_ipv6 = topo["routers"]["r0"]["links"]["lo"]["ipv6"]
+ r0_loopback_address_ipv6_nxt_hop = topo["routers"]["r0"]["links"]["r1"][
+ "ipv6"
+ ].split("/")[0]
+
+ r1_loopback_address_ipv4 = topo["routers"]["r1"]["links"]["lo"]["ipv4"]
+ r1_loopback_address_ipv4_nxt_hop = topo["routers"]["r1"]["links"]["r2"][
+ "ipv4"
+ ].split("/")[0]
+ r1_loopback_address_ipv6 = topo["routers"]["r1"]["links"]["lo"]["ipv6"]
+ r1_loopback_address_ipv6_nxt_hop = topo["routers"]["r1"]["links"]["r2"][
+ "ipv6"
+ ].split("/")[0]
+
+ r4_loopback_address_ipv4 = topo["routers"]["r4"]["links"]["lo"]["ipv4"]
+ r4_loopback_address_ipv4_nxt_hop = topo["routers"]["r4"]["links"]["r3"][
+ "ipv4"
+ ].split("/")[0]
+ r4_loopback_address_ipv6 = topo["routers"]["r4"]["links"]["lo"]["ipv6"]
+ r4_loopback_address_ipv6_nxt_hop = topo["routers"]["r4"]["links"]["r3"][
+ "ipv6"
+ ].split("/")[0]
+
+ r3_loopback_address_ipv4 = topo["routers"]["r3"]["links"]["lo"]["ipv4"]
+ r3_loopback_address_ipv4_nxt_hop = topo["routers"]["r3"]["links"]["r2"][
+ "ipv4"
+ ].split("/")[0]
+ r3_loopback_address_ipv6 = topo["routers"]["r3"]["links"]["lo"]["ipv6"]
+ r3_loopback_address_ipv6_nxt_hop = topo["routers"]["r3"]["links"]["r2"][
+ "ipv6"
+ ].split("/")[0]
+
+ R0_NETWORK_LOOPBACK = {
+ "ipv4": r0_loopback_address_ipv4,
+ "ipv6": r0_loopback_address_ipv6,
+ }
+ R0_NETWORK_LOOPBACK_NXTHOP = {
+ "ipv4": r0_loopback_address_ipv4_nxt_hop,
+ "ipv6": r0_loopback_address_ipv6_nxt_hop,
+ }
+
+ R1_NETWORK_LOOPBACK = {
+ "ipv4": r1_loopback_address_ipv4,
+ "ipv6": r1_loopback_address_ipv6,
+ }
+ R1_NETWORK_LOOPBACK_NXTHOP = {
+ "ipv4": r1_loopback_address_ipv4_nxt_hop,
+ "ipv6": r1_loopback_address_ipv6_nxt_hop,
+ }
+
+ R0_NETWORK_CONNECTED = {
+ "ipv4": r0_connected_address_ipv4,
+ "ipv6": r0_connected_address_ipv6,
+ }
+ R0_NETWORK_CONNECTED_NXTHOP = {
+ "ipv4": r0_loopback_address_ipv4_nxt_hop,
+ "ipv6": r0_loopback_address_ipv6_nxt_hop,
+ }
+
+ R1_NETWORK_CONNECTED = {
+ "ipv4": r1_connected_address_ipv4,
+ "ipv6": r1_connected_address_ipv6,
+ }
+ R1_NETWORK_CONNECTED_NXTHOP = {
+ "ipv4": r1_loopback_address_ipv4_nxt_hop,
+ "ipv6": r1_loopback_address_ipv6_nxt_hop,
+ }
+
+ R4_NETWORK_LOOPBACK = {
+ "ipv4": r4_loopback_address_ipv4,
+ "ipv6": r4_loopback_address_ipv6,
+ }
+ R4_NETWORK_LOOPBACK_NXTHOP = {
+ "ipv4": r4_loopback_address_ipv4_nxt_hop,
+ "ipv6": r4_loopback_address_ipv6_nxt_hop,
+ }
+
+ R3_NETWORK_LOOPBACK = {
+ "ipv4": r3_loopback_address_ipv4,
+ "ipv6": r3_loopback_address_ipv6,
+ }
+ R3_NETWORK_LOOPBACK_NXTHOP = {
+ "ipv4": r3_loopback_address_ipv4_nxt_hop,
+ "ipv6": r3_loopback_address_ipv6_nxt_hop,
+ }
+
+ R4_NETWORK_CONNECTED = {
+ "ipv4": r4_connected_address_ipv4,
+ "ipv6": r4_connected_address_ipv6,
+ }
+ R4_NETWORK_CONNECTED_NXTHOP = {
+ "ipv4": r4_loopback_address_ipv4_nxt_hop,
+ "ipv6": r4_loopback_address_ipv6_nxt_hop,
+ }
+
+ R3_NETWORK_CONNECTED = {
+ "ipv4": r3_connected_address_ipv4,
+ "ipv6": r3_connected_address_ipv6,
+ }
+ R3_NETWORK_CONNECTED_NXTHOP = {
+ "ipv4": r3_loopback_address_ipv4_nxt_hop,
+ "ipv6": r3_loopback_address_ipv6_nxt_hop,
+ }
+
+ # populating the nexthop for default routes
+
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+
+ interface = topo["routers"]["r1"]["links"]["r2"]["interface"]
+ ipv6_link_local = get_frr_ipv6_linklocal(tgen, "r1", intf=interface)
+ ipv4_nxt_hop = topo["routers"]["r1"]["links"]["r2"]["ipv4"].split("/")[0]
+ ipv6_nxt_hop = topo["routers"]["r1"]["links"]["r2"]["ipv6"].split("/")[0]
+ DEFAULT_ROUTE_NXT_HOP_R1 = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local}
+
+ interface = topo["routers"]["r3"]["links"]["r2"]["interface"]
+ ipv6_link_local = get_frr_ipv6_linklocal(tgen, "r3", intf=interface)
+ ipv4_nxt_hop = topo["routers"]["r3"]["links"]["r2"]["ipv4"].split("/")[0]
+ ipv6_nxt_hop = topo["routers"]["r3"]["links"]["r2"]["ipv6"].split("/")[0]
+ DEFAULT_ROUTE_NXT_HOP_R3 = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local}
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """Teardown the pytest environment"""
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
+
+#####################################################
+#
+# Testcases
+#
+#####################################################
+
+def test_verify_bgp_default_originate_in_IBGP_p0(request):
+ """
+ Verify BGP default-originate route with IBGP peer
+ """
+ tgen = get_topogen()
+ global BGP_CONVERGENCE
+ global topo
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+ reset_config_on_routers(tgen)
+
+ if BGP_CONVERGENCE != True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ step("Configure IPv4 and IPv6 , IBGP neighbor between R1 and R2")
+ step("Configure IPv4 and IPv6 Loopback interface on R1, R0 and R2")
+ step("Configure IPv4 and IPv6 EBGP neighbor between R0 and R1")
+
+ r0_local_as = topo["routers"]["r0"]["bgp"]["local_as"]
+ r1_local_as = topo["routers"]["r1"]["bgp"]["local_as"]
+ r2_local_as = topo["routers"]["r2"]["bgp"]["local_as"]
+ r3_local_as = topo["routers"]["r3"]["bgp"]["local_as"]
+ r4_local_as = topo["routers"]["r4"]["bgp"]["local_as"]
+
+ input_dict = {
+ "r0": {
+ "bgp": {
+ "local_as": 1000,
+ }
+ },
+ "r1": {
+ "bgp": {
+ "local_as": 2000,
+ }
+ },
+ "r2": {
+ "bgp": {
+ "local_as": 2000,
+ }
+ },
+ "r3": {
+ "bgp": {
+ "local_as": r3_local_as,
+ }
+ },
+ "r4": {
+ "bgp": {
+ "local_as": r4_local_as,
+ }
+ },
+ }
+ result = modify_as_number(tgen, topo, input_dict)
+ try:
+ assert result is True
+ except AssertionError:
+ logger.info("Expected behaviour: {}".format(result))
+ logger.info("BGP config is not created because of invalid ASNs")
+
+ step("After changing the BGP AS Path Verify the BGP Convergence")
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, " Complete Convergence is expected after changing the ASN but failed to converge --> :Failed \n Error: {}".format(
+ BGP_CONVERGENCE
+ )
+
+ step("Configure IPv4 and IPv6 static route on R1 next-hop as NULL0")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, static_routes_input)
+ assert result is True, "Testcase {} : Failed to configure the static routes {} on router R1 \n Error: {}".format(
+ tc_name,static_routes_input, result
+ )
+ step("verify IPv4 and IPv6 static route are configured and up on R1")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ }
+ ]
+ }
+ }
+ result = verify_fib_routes(tgen, addr_type, "r1", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n After configuring the static routes {} , the routes are not found in FIB \n Error: {}".format(
+ tc_name,static_routes_input, result
+ )
+
+ step(
+ "Configure redistribute static and connected on R0 and R1, for IPv4 and IPv6 address family "
+ )
+ redistribute_static = {
+ "r0": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"},
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"},
+ ]
+ }
+ },
+ }
+ }
+ },
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"},
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"},
+ ]
+ }
+ },
+ }
+ }
+ },
+ }
+ result = create_router_bgp(tgen, topo, redistribute_static)
+ assert result is True, "Testcase {} : Failed to configure the redistribute static configuration \n Error: {}".format(tc_name, result)
+
+ step(
+ "After configuring redistribute command , verify static and connected routes ( loopback connected routes) are advertised on R2"
+ )
+
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R0_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R1_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R1_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R1_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R1_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ ]
+ }
+ }
+ result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : After redistributing static routes the routes {} expected in FIB but NOT FOUND ......! \n Error: {}".format(
+ tc_name, static_routes_input,result
+ )
+
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : After redistributing static routes the routes {} expected in RIB but NOT FOUND ......! \n Error: {}".format(
+ tc_name, static_routes_input , result
+ )
+
+ step(
+ "Taking the snapshot of the prefix count before configuring the default originate"
+ )
+ snapshot1 = get_prefix_count_route(tgen, topo, dut="r2", peer="r1")
+
+ step(
+ "Configure Default originate on R1 for R1 to R2, for IPv4 and IPv6 BGP address family "
+ )
+ local_as = get_dut_as_number(tgen, dut="r1")
+ default_originate_config = {
+ "r1": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {"unicast": {"default_originate": {"r2": {}}}},
+ "ipv6": {"unicast": {"default_originate": {"r2": {}}}},
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed Configuring default originate configuration. \n Error: {}".format(tc_name, result)
+
+ step(
+ "After configuring default-originate command , verify default routes are advertised on R2 "
+ " R1 static and loopback routes received on R2 BGP and FIB"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [R1_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R1_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R1_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R1_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ ]
+ }
+ }
+
+ result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : post configuring the BGP Default originate configuration static and connected routes should not be effected but impacted on FIB .......! FAILED \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failedpost configuring the BGP Default originate configuration static and connected routes should not be effected but impacted on RIB......! FAILED \n Error: {}".format(
+ tc_name, result
+ )
+ step(
+ "Verify default route for IPv4 and IPv6 present with path=igp metric =0 , local-preference= 100 "
+ )
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+ metric=0,
+ locPrf=100,
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+ step(
+ "Taking the snapshot2 of the prefix count after configuring the default originate"
+ )
+ snapshot2 = get_prefix_count_route(tgen, topo, dut="r2", peer="r1")
+
+ step("verifying the prefix count incrementing or not ")
+ isIPv4prefix_incremented = False
+ isIPv6prefix_incremented = False
+ if snapshot1["ipv4_count"] < snapshot2["ipv4_count"]:
+ isIPv4prefix_incremented = True
+ if snapshot1["ipv6_count"] < snapshot2["ipv6_count"]:
+ isIPv6prefix_incremented = True
+
+ assert (
+ isIPv4prefix_incremented is True
+ ), "Testcase {} : Failed Error: IPV4 Prefix is not incremented on receiveing ".format(
+ tc_name
+ )
+
+ assert (
+ isIPv6prefix_incremented is True
+ ), "Testcase {} : Failed Error: IPV6 Prefix is not incremented on receiveing ".format(
+ tc_name
+ )
+ write_test_footer(tc_name)
+
+
+def test_verify_bgp_default_originate_in_EBGP_p0(request):
+ """
+ Verify BGP default-originate route with EBGP peer
+ """
+ tgen = get_topogen()
+ global BGP_CONVERGENCE
+ global topo
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+ reset_config_on_routers(tgen)
+
+ if BGP_CONVERGENCE != True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ step("Configure IPv4 and IPv6 , EBGP neighbor between R3 and R2")
+ step("Configure lPv4 and IPv6 Loopback interface on R3, R4 and R2")
+ step("Configure IPv4 and IPv6 IBGP neighbor between R4 and R3")
+ r0_local_as = topo["routers"]["r0"]["bgp"]["local_as"]
+ r1_local_as = topo["routers"]["r1"]["bgp"]["local_as"]
+ r2_local_as = topo["routers"]["r2"]["bgp"]["local_as"]
+ r3_local_as = topo["routers"]["r3"]["bgp"]["local_as"]
+ r4_local_as = topo["routers"]["r4"]["bgp"]["local_as"]
+
+ input_dict = {
+ "r0": {
+ "bgp": {
+ "local_as": r0_local_as,
+ }
+ },
+ "r1": {
+ "bgp": {
+ "local_as": r1_local_as,
+ }
+ },
+ "r2": {
+ "bgp": {
+ "local_as": r2_local_as,
+ }
+ },
+ "r3": {
+ "bgp": {
+ "local_as": 4000,
+ }
+ },
+ "r4": {
+ "bgp": {
+ "local_as": 4000,
+ }
+ },
+ }
+ result = modify_as_number(tgen, topo, input_dict)
+ try:
+ assert result is True
+ except AssertionError:
+ logger.info("Expected behaviour: {}".format(result))
+ logger.info("BGP config is not created because of invalid ASNs")
+ step("After changing the BGP AS Path Verify the BGP Convergence")
+
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "Complete convergence is expeceted after changing the ASN os the routes ..! :Failed \n Error: {}".format(
+ BGP_CONVERGENCE
+ )
+
+ step(" Configure IPv4 and IPv6 static route on R3 next-hop on R4 interface")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, static_routes_input)
+ assert result is True, "Testcase {} : Failed to configure the static routes ....! Failed \n Error: {}".format(
+ tc_name, result
+ )
+ step("verify IPv4 and IPv6 static route are configured and up on R1")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ }
+ ]
+ }
+ }
+ result = verify_fib_routes(tgen, addr_type, "r3", static_routes_input)
+ assert result is True, "Testcase {} : Route is not found in {} in FIB ......! Failed \n Error: {}".format(
+ tc_name, static_routes_input,result
+ )
+
+ step(
+ "Configure redistribute static and connected on R3 and R4 for IPv4 and IPv6 address family "
+ )
+ redistribute_static = {
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"},
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"},
+ ]
+ }
+ },
+ }
+ }
+ },
+ "r4": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"},
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"},
+ ]
+ }
+ },
+ }
+ }
+ },
+ }
+ result = create_router_bgp(tgen, topo, redistribute_static)
+ assert result is True, "Testcase {} : Failed to configure redistribute configuratin \n Error: {}".format(tc_name, result)
+
+ step(
+ "After configuring redistribute command , verify static and connected routes ( loopback connected routes) are advertised on R2"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [R3_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R3_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R3_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R3_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R4_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ ]
+ }
+ }
+ result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : static & and connected routes are expected but not found in FIB .... ! \n Error: {}".format(
+ tc_name, result
+ )
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : static & and connected routes are expected but not found in RIB .... ! \n Error: {}".format(
+ tc_name, result
+ )
+ snapshot1 = get_prefix_count_route(tgen, topo, dut="r2", peer="r3")
+ step(
+ "Configure Default originate on R3 for R3 to R2, on IPv4 and IPv6 BGP address family"
+ )
+ local_as = get_dut_as_number(tgen, dut="r3")
+ default_originate_config = {
+ "r3": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {"unicast": {"default_originate": {"r2": {}}}},
+ "ipv6": {"unicast": {"default_originate": {"r2": {}}}},
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed to configure the default originate configuration \n Error: {}".format(tc_name, result)
+
+ step(
+ "After configuring default-originate command , verify default routes are advertised on R2 on both BGP RIB and FIB"
+ )
+
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ },
+ ]
+ }
+ }
+
+ result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : static route from R1 {} and default route from R3 is expected in R2 FIB .....! NOT FOUND \n Error: {}".format(
+ tc_name, NETWORK1_1,result
+ )
+
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : static route from R1 {} and default route from R3 is expected in R2 RIB .....! NOT FOUND \n Error: {}".format(
+ tc_name,NETWORK1_1, result
+ )
+
+ step(
+ "Verify default route for IPv4 and IPv6 present with path = ebgp as path, metric =0 "
+ )
+ # local preference will bgp not applicable for eBGP
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+ metric=0,
+ expected_aspath="4000",
+ )
+ assert result is True, "Testcase {} : Default route from R3 is expected with attributes in R2 RIB .....! NOT FOUND Error: {}".format(tc_name, result)
+
+ step(
+ "Taking the snapshot2 of the prefix count after configuring the default originate"
+ )
+ snapshot2 = get_prefix_count_route(tgen, topo, dut="r2", peer="r3")
+ step(
+ "Verify out-prefix count is incremented default route on IPv4 and IPv6 neighbor"
+ )
+ isIPv4prefix_incremented = False
+ isIPv6prefix_incremented = False
+ if snapshot1["ipv4_count"] < snapshot2["ipv4_count"]:
+ isIPv4prefix_incremented = True
+ if snapshot1["ipv6_count"] < snapshot2["ipv6_count"]:
+ isIPv6prefix_incremented = True
+
+ assert (
+ isIPv4prefix_incremented is True
+ ), "Testcase {} : Failed Error: IPV4 Prefix is not incremented on receiveing ".format(
+ tc_name
+ )
+
+ assert (
+ isIPv6prefix_incremented is True
+ ), "Testcase {} : Failed Error: IPV6 Prefix is not incremented on receiveing ".format(
+ tc_name
+ )
+ write_test_footer(tc_name)
+
+
+def test_verify_bgp_default_originate_in_IBGP_with_route_map_p0(request):
+ """
+ test_verify_bgp_default_originate_in_IBGP_with_route_map_p0
+ """
+ tgen = get_topogen()
+ global BGP_CONVERGENCE
+ global topo
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+ reset_config_on_routers(tgen)
+
+ if BGP_CONVERGENCE != True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ step("Configure IPv4 and IPv6 , IBGP neighbor between R1 and R2")
+ step("Configure IPv4 and IPv6 , EBGP neighbor between R1 and R0")
+ r0_local_as = topo["routers"]["r0"]["bgp"]["local_as"]
+ r1_local_as = topo["routers"]["r1"]["bgp"]["local_as"]
+ r2_local_as = topo["routers"]["r2"]["bgp"]["local_as"]
+ r3_local_as = topo["routers"]["r3"]["bgp"]["local_as"]
+ r4_local_as = topo["routers"]["r4"]["bgp"]["local_as"]
+
+ input_dict = {
+ "r0": {
+ "bgp": {
+ "local_as": r0_local_as,
+ }
+ },
+ "r1": {
+ "bgp": {
+ "local_as": 1000,
+ }
+ },
+ "r2": {
+ "bgp": {
+ "local_as": 1000,
+ }
+ },
+ "r3": {
+ "bgp": {
+ "local_as": r3_local_as,
+ }
+ },
+ "r4": {
+ "bgp": {
+ "local_as": r4_local_as,
+ }
+ },
+ }
+ result = modify_as_number(tgen, topo, input_dict)
+ try:
+ assert result is True
+ except AssertionError:
+ logger.info("Expected behaviour: {}".format(result))
+ logger.info("BGP config is not created because of invalid ASNs")
+
+ step("After changing the BGP AS Path Verify the BGP Convergence")
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "Complete convergence is expected after changing ASN ....! ERROR :Failed \n Error: {}".format(
+ BGP_CONVERGENCE
+ )
+
+ step("Configure 2 IPv4 and 2 IPv6 Static route on R0 with next-hop as Null0")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ ]
+ }
+ }
+ result = create_static_routes(tgen, static_routes_input)
+ assert result is True, "Testcase {} : Static Configuration is Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("verify IPv4 and IPv6 static route are configured and up on R0")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ ]
+ }
+ }
+ result = verify_fib_routes(tgen, addr_type, "r0", static_routes_input)
+ assert result is True, "Testcase {} : routes {} unable is not found in R0 FIB \n Error: {}".format(
+ tc_name, static_routes_input,result
+ )
+
+ step(
+ "Configure redistribute static on IPv4 and IPv6 address family on R0 for R0 to R1 neighbor "
+ )
+ redistribute_static = {
+ "r0": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
+ "ipv6": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, redistribute_static)
+ assert result is True, "Testcase {} : Failed to configure redistribute static configuration....! \n Error: {}".format(tc_name, result)
+
+ step("verify IPv4 and IPv6 static route are configured and up on R1")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ ]
+ }
+ }
+ result = verify_fib_routes(tgen, addr_type, "r1", static_routes_input)
+ assert result is True, "Testcase {} : Failed... Routes {} expected in r0 FIB after configuring the redistribute config \n Error: {}".format(
+ tc_name,static_routes_input, result
+ )
+
+ result = verify_bgp_rib(tgen, addr_type, "r1", static_routes_input)
+ assert result is True, "Testcase {} : Failed... Routes {} expected in r0 RIB after configuring the redistribute config \n Error: {}".format(
+ tc_name, static_routes_input,result
+ )
+
+ step(
+ "Configure IPv4 prefix-list Pv4 and and IPv6 prefix-list Pv6 on R1 to match BGP route Sv41, Sv42, IPv6 route Sv61 Sv62 permit "
+ )
+ input_dict_3 = {
+ "r1": {
+ "prefix_lists": {
+ "ipv4": {
+ "Pv4": [
+ {
+ "seqid": "1",
+ "network": NETWORK1_1["ipv4"],
+ "action": "permit",
+ },
+ {
+ "seqid": "2",
+ "network": NETWORK2_1["ipv4"],
+ "action": "permit",
+ },
+ ]
+ },
+ "ipv6": {
+ "Pv6": [
+ {
+ "seqid": "1",
+ "network": NETWORK1_1["ipv6"],
+ "action": "permit",
+ },
+ {
+ "seqid": "2",
+ "network": NETWORK2_1["ipv6"],
+ "action": "permit",
+ },
+ ]
+ },
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed to configure the prefix list \n Error: {}".format(tc_name, result)
+
+ step(
+ "Configure IPV4 and IPv6 route-map (RMv4 and RMv6 ) matching prefix-list (Pv4 and Pv6) respectively on R1"
+ )
+ input_dict_3 = {
+ "r1": {
+ "route_maps": {
+ "RMv4": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "match": {"ipv4": {"prefix_lists": "Pv4"}},
+ },
+ ],
+ "RMv6": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "match": {"ipv6": {"prefix_lists": "Pv6"}},
+ },
+ ],
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed to configure the route map \n Error: {}".format(tc_name, result)
+
+ step(
+ "Configure default-originate with route-map (RMv4 and RMv6) on R1, on BGP IPv4 and IPv6 address family "
+ )
+ local_as = get_dut_as_number(tgen, dut="r1")
+ default_originate_config = {
+ "r1": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {
+ "unicast": {"default_originate": {"r2": {"route_map": "RMv4"}}}
+ },
+ "ipv6": {
+ "unicast": {"default_originate": {"r2": {"route_map": "RMv6"}}}
+ },
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed to configure the default originate \n Error: {}".format(tc_name, result)
+
+ step("Verify the default route is received in BGP RIB and FIB")
+ step(
+ "After configuring default-originate command , verify default routes are advertised on R2 "
+ )
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ }
+ ]
+ }
+ }
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed...! Expected default route from R1 not found in FIB \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed...! Expected default route from R1 not found in RIB \n Error: {}".format(
+ tc_name, result
+ )
+ step("Remove route-map RMv4 and RMv6 from default-originate command in R1")
+ NOTE = """ Configuring the default-originate should remove the previously applied default originate with condtional route-map"""
+ local_as = get_dut_as_number(tgen, dut="r1")
+ default_originate_config = {
+ "r1": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {"unicast": {"default_originate": {"r2": {}}}},
+ "ipv6": {"unicast": {"default_originate": {"r2": {}}}},
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed to remove the default originate conditional route-map \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify BGP RIB and FIB After removing route-map , default route still present on R2"
+ )
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed Default route from R1 is not found in FIB \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed Default route from R1 is not found in RIB \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Configure default-originate with route-map (RMv4 and RMv6) on R1 ")
+ local_as = get_dut_as_number(tgen, dut="r1")
+ default_originate_config = {
+ "r1": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "default_originate": {
+ "r2": {
+ "route_map": "RMv4",
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "default_originate": {
+ "r2": {
+ "route_map": "RMv6",
+ }
+ }
+ }
+ },
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed to configure the Default originate route-map \n Error: {}".format(tc_name, result)
+
+ step(
+ "After configuring default-originate command , verify default routes are advertised on R2 "
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed Default Route from R1 is not found in FIB \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed Default Route from R1 is not found in RIB \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Delete prefix list using no prefix-list")
+ input_dict_3 = {
+ "r1": {
+ "prefix_lists": {
+ "ipv4": {
+ "Pv4": [
+ {
+ "seqid": "1",
+ "network": NETWORK1_1["ipv4"],
+ "action": "permit",
+ "delete": True,
+ },
+ {
+ "seqid": "2",
+ "network": NETWORK2_1["ipv4"],
+ "action": "permit",
+ "delete": True,
+ },
+ ]
+ },
+ "ipv6": {
+ "Pv6": [
+ {
+ "seqid": "1",
+ "network": NETWORK1_1["ipv6"],
+ "action": "permit",
+ "delete": True,
+ },
+ {
+ "seqid": "2",
+ "network": NETWORK2_1["ipv6"],
+ "action": "permit",
+ "delete": True,
+ },
+ ]
+ },
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed to delete the prefix list Error: {}".format(tc_name, result)
+
+ step(
+ "Verify BGP RIB and FIB After deleting prefix-list , verify IPv4 and IPv6 default route got removed from DUT "
+ )
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ }
+ ]
+ }
+ }
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed\n After deleteing prefix default route is not expected in FIB \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n After deleteing prefix default route is not expected in RIB \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Configure prefix-list and delete route-map using no route-map")
+ input_dict_3 = {
+ "r1": {
+ "prefix_lists": {
+ "ipv4": {
+ "Pv4": [
+ {
+ "seqid": "1",
+ "network": NETWORK1_1["ipv4"],
+ "action": "permit",
+ },
+ {
+ "seqid": "2",
+ "network": NETWORK2_1["ipv4"],
+ "action": "permit",
+ },
+ ]
+ },
+ "ipv6": {
+ "Pv6": [
+ {
+ "seqid": "1",
+ "network": NETWORK1_1["ipv6"],
+ "action": "permit",
+ },
+ {
+ "seqid": "2",
+ "network": NETWORK2_1["ipv6"],
+ "action": "permit",
+ },
+ ]
+ },
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed to configure the prefix lists Error: {}".format(tc_name, result)
+
+ step(
+ "After configuring the Prefixlist cross checking the BGP Default route is configured again , before deleting the route map"
+ )
+
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ }
+ ]
+ }
+ }
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ expected=True,
+ )
+ assert result is True, "Testcase {} : Failed Default route from R1 is expected in FIB but not found \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ expected=True,
+ )
+ assert result is True, "Testcase {} : Failed Default route from R1 is expected in RIB but not found \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Deleting the routemap")
+ input_dict = {"r1": {"route_maps": ["RMv4", "RMv6"]}}
+ result = delete_route_maps(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed to delete the Route-map \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify BGP RIB and FIB ,After deleting route-map , verify IPv4 and IPv6 default route got removed from DUT"
+ )
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n After deleteing route-map default route is not expected in FIB \nError: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n After deleteing route-map default route is not expected in RIB \n Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_verify_bgp_default_originate_in_EBGP_with_route_map_p0(request):
+ """
+ test_verify_bgp_default_originate_in_EBGP_with_route_map_p0
+ """
+ tgen = get_topogen()
+ global BGP_CONVERGENCE
+ global topo
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+ reset_config_on_routers(tgen)
+
+ if BGP_CONVERGENCE != True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ step("Configure IPv4 and IPv6 , EBGP neighbor between R3 and R2")
+ step("Configure IPv4 and IPv6 IBGP neighbor between R3 and R4")
+ r0_local_as = topo["routers"]["r0"]["bgp"]["local_as"]
+ r1_local_as = topo["routers"]["r1"]["bgp"]["local_as"]
+ r2_local_as = topo["routers"]["r2"]["bgp"]["local_as"]
+ r3_local_as = topo["routers"]["r3"]["bgp"]["local_as"]
+ r4_local_as = topo["routers"]["r4"]["bgp"]["local_as"]
+
+ input_dict = {
+ "r0": {
+ "bgp": {
+ "local_as": r0_local_as,
+ }
+ },
+ "r1": {
+ "bgp": {
+ "local_as": r1_local_as,
+ }
+ },
+ "r2": {
+ "bgp": {
+ "local_as": r2_local_as,
+ }
+ },
+ "r3": {
+ "bgp": {
+ "local_as": 4000,
+ }
+ },
+ "r4": {
+ "bgp": {
+ "local_as": 4000,
+ }
+ },
+ }
+ result = modify_as_number(tgen, topo, input_dict)
+ try:
+ assert result is True
+ except AssertionError:
+ logger.info("Expected behaviour: {}".format(result))
+ logger.info("BGP config is not created because of invalid ASNs")
+ step("After changing the BGP AS Path Verify the BGP Convergence")
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format(
+ BGP_CONVERGENCE
+ )
+
+ step(
+ "Configure 2 IPv4 and 2 IPv6, Static route on R4 with next-hop as Null0 IPv4 route Sv41, Sv42, IPv6 route Sv61 Sv62"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r4": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ ]
+ }
+ }
+ result = create_static_routes(tgen, static_routes_input)
+ assert result is True, "Testcase {} : Failed to configure the static routes \n Error: {}".format(
+ tc_name, result
+ )
+ step("verify IPv4 and IPv6 static route are configured and up on R4")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r4": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ ]
+ }
+ }
+ result = verify_fib_routes(tgen, addr_type, "r4", static_routes_input)
+ assert result is True, "Testcase {} : Failed Static route {} is not found in R4 FIB \n Error: {}".format(
+ tc_name, static_routes_input,result
+ )
+
+ step(
+ "Configure redistribute static on IPv4 and IPv6 address family on R4 for R4 to R3 neighbo"
+ )
+ redistribute_static = {
+ "r4": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
+ "ipv6": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, redistribute_static)
+ assert result is True, "Testcase {} : Failed to configure the redistribute static \n Error: {}".format(tc_name, result)
+
+ step("verify IPv4 and IPv6 static route are configured and up on R3")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ ]
+ }
+ }
+ result = verify_fib_routes(tgen, addr_type, "r3", static_routes_input)
+ assert result is True, "Testcase {} : Failed static routes from R1 and R3 is not found in FIB \n Error: {}".format(
+ tc_name, result
+ )
+ result = verify_bgp_rib(tgen, addr_type, "r3", static_routes_input)
+ assert result is True, "Testcase {} : Failed static routes from R1 and R3 is not found in RIB \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Configure IPv4 prefix-list Pv4 and and IPv6 prefix-list Pv6 on R3 so new route which is not present on R3"
+ )
+ input_dict_3 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "Pv4": [
+ {
+ "seqid": "1",
+ "network": NETWORK3_1["ipv4"],
+ "action": "permit",
+ }
+ ]
+ },
+ "ipv6": {
+ "Pv6": [
+ {
+ "seqid": "1",
+ "network": NETWORK3_1["ipv6"],
+ "action": "permit",
+ }
+ ]
+ },
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed to configure the prefix lists \n Error: {}".format(tc_name, result)
+
+ step("verify IPv4 and IPv6 Prefix list got configured on R3")
+ input_dict = {"r3": {"prefix_lists": ["Pv4", "Pv6"]}}
+ result = verify_prefix_lists(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed ..! configured prefix lists {} are not found \n Error: {}".format(tc_name,input_dict, result)
+
+ step(
+ "Configure IPv4 and IPv6 route-map ( RMv4 and RMv6 ) matching prefix-list (Pv4 and Pv6 ) respectively on R3"
+ )
+ input_dict_3 = {
+ "r3": {
+ "route_maps": {
+ "RMv4": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "match": {"ipv4": {"prefix_lists": "Pv4"}},
+ },
+ ],
+ "RMv6": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "match": {"ipv6": {"prefix_lists": "Pv6"}},
+ },
+ ],
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed to configure the route-map \n Error: {}".format(tc_name, result)
+ step(
+ "Taking the snapshot of the prefix count before configuring the default originate"
+ )
+ snapshot1 = get_prefix_count_route(tgen, topo, dut="r2", peer="r3")
+ step(
+ "Configure default-originate with IPv4 and IPv6 route-map (RMv4 and RMv6) on R3"
+ )
+ local_as = get_dut_as_number(tgen, dut="r3")
+ default_originate_config = {
+ "r3": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {
+ "unicast": {"default_originate": {"r2": {"route_map": "RMv4"}}}
+ },
+ "ipv6": {
+ "unicast": {"default_originate": {"r2": {"route_map": "RMv6"}}}
+ },
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed to configure default-originate \n Error: {}".format(tc_name, result)
+
+ step("Verify the default route is NOT received in BGP RIB and FIB on R2 ")
+ step(
+ "After configuring default-originate command , verify default routes are not Received on R2 "
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Default route is not expected due to deny in prefix list \nError: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \nDefault route is not expected due to deny in prefix list\n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Add route Sv41, Sv42, IPv6 route Sv61 Sv62 on prefix list Pv4 and Pv6")
+ input_dict_3 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "Pv4": [
+ {
+ "seqid": "1",
+ "network": NETWORK1_1["ipv4"],
+ "action": "permit",
+ },
+ {
+ "seqid": "2",
+ "network": NETWORK2_1["ipv4"],
+ "action": "permit",
+ },
+ ]
+ },
+ "ipv6": {
+ "Pv6": [
+ {
+ "seqid": "1",
+ "network": NETWORK1_1["ipv6"],
+ "action": "permit",
+ },
+ {
+ "seqid": "2",
+ "network": NETWORK2_1["ipv6"],
+ "action": "permit",
+ },
+ ]
+ },
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed to configure the prefix lists Error: {}".format(tc_name, result)
+
+ step("Verify BGP default route for IPv4 and IPv6 is received on R2")
+
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed Default routes are expected in R2 FIB from R3 but not found ....! \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed Default routes are expected in R2 RIB from R3 but not found ....! \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Remove route Sv41, Sv42, IPv6 route Sv61 Sv62 on prefix list Pv4 and Pv6")
+ input_dict_3 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "Pv4": [
+ {
+ "seqid": "1",
+ "network": NETWORK1_1["ipv4"],
+ "action": "permit",
+ "delete": True,
+ },
+ {
+ "seqid": "2",
+ "network": NETWORK2_1["ipv4"],
+ "action": "permit",
+ "delete": True,
+ },
+ ]
+ },
+ "ipv6": {
+ "Pv6": [
+ {
+ "seqid": "1",
+ "network": NETWORK1_1["ipv6"],
+ "action": "permit",
+ "delete": True,
+ },
+ {
+ "seqid": "2",
+ "network": NETWORK2_1["ipv6"],
+ "action": "permit",
+ "delete": True,
+ },
+ ]
+ },
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed to remove prefix-lists from R3 Error: {}".format(tc_name, result)
+
+ step(
+ "After Removing route BGP default route for IPv4 and IPv6 is NOT received on R2"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n After Removing route in prefix list the default route is not expected in FIB \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n After Removing route in prefix list the default route is not expected in RIB\n Error: {}".format(
+ tc_name, result
+ )
+
+ step(" Add route Sv41, Sv42, IPv6 route Sv61 Sv62 on prefix list Pv4 and Pv6")
+ input_dict_3 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "Pv4": [
+ {
+ "seqid": "1",
+ "network": NETWORK1_1["ipv4"],
+ "action": "permit",
+ },
+ {
+ "seqid": "2",
+ "network": NETWORK2_1["ipv4"],
+ "action": "permit",
+ },
+ ]
+ },
+ "ipv6": {
+ "Pv6": [
+ {
+ "seqid": "1",
+ "network": NETWORK1_1["ipv6"],
+ "action": "permit",
+ },
+ {
+ "seqid": "2",
+ "network": NETWORK2_1["ipv6"],
+ "action": "permit",
+ },
+ ]
+ },
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Verify BGP default route for IPv4 and IPv6 is received on R2")
+
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Change IPv4 and IPv6 prefix-list permit and deny ")
+ input_dict_3 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "Pv4": [
+ {"seqid": "1", "network": NETWORK1_1["ipv4"], "action": "deny"},
+ {"seqid": "2", "network": NETWORK2_1["ipv4"], "action": "deny"},
+ ]
+ },
+ "ipv6": {
+ "Pv6": [
+ {"seqid": "1", "network": NETWORK1_1["ipv6"], "action": "deny"},
+ {"seqid": "2", "network": NETWORK2_1["ipv6"], "action": "deny"},
+ ]
+ },
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Verify BGP default route for IPv4 and IPv6 is not received on R2")
+
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n after denying the prefix list default route is not expected in FIB \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n after denying the prefix list default route is not expected in RIB \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Change IPv4 and IPv6 prefix-list deny to permit ")
+ input_dict_3 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "Pv4": [
+ {
+ "seqid": "1",
+ "network": NETWORK1_1["ipv4"],
+ "action": "permit",
+ },
+ {
+ "seqid": "2",
+ "network": NETWORK2_1["ipv4"],
+ "action": "permit",
+ },
+ ]
+ },
+ "ipv6": {
+ "Pv6": [
+ {
+ "seqid": "1",
+ "network": NETWORK1_1["ipv6"],
+ "action": "permit",
+ },
+ {
+ "seqid": "2",
+ "network": NETWORK2_1["ipv6"],
+ "action": "permit",
+ },
+ ]
+ },
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Verify BGP default route for IPv4 and IPv6 is received on R2")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Taking the snapshot2 of the prefix count after configuring the default originate"
+ )
+ snapshot2 = get_prefix_count_route(tgen, topo, dut="r2", peer="r3")
+
+ step("verifying the prefix count incrementing or not ")
+ isIPv4prefix_incremented = False
+ isIPv6prefix_incremented = False
+ if snapshot1["ipv4_count"] < snapshot2["ipv4_count"]:
+ isIPv4prefix_incremented = True
+ if snapshot1["ipv6_count"] < snapshot2["ipv6_count"]:
+ isIPv6prefix_incremented = True
+
+ assert (
+ isIPv4prefix_incremented is True
+ ), "Testcase {} : Failed Error: IPV4 Prefix is not incremented on receiveing ".format(
+ tc_name
+ )
+
+ assert (
+ isIPv6prefix_incremented is True
+ ), "Testcase {} : Failed Error: IPV6 Prefix is not incremented on receiveing ".format(
+ tc_name
+ )
+
+ step(
+ "Configure another IPv4 and IPv6 route-map and match same prefix-list (Sv41, Sv42, IPv6 route Sv61 Sv62) with deny statement "
+ )
+ input_dict_3 = {
+ "r3": {
+ "route_maps": {
+ "RMv41": [
+ {
+ "action": "deny",
+ "seq_id": "1",
+ "match": {"ipv4": {"prefix_lists": "Pv4"}},
+ },
+ ],
+ "RMv61": [
+ {
+ "action": "deny",
+ "seq_id": "1",
+ "match": {"ipv6": {"prefix_lists": "Pv6"}},
+ },
+ ],
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Attach route-map on IPv4 and IP6 BGP neighbor on fly")
+ local_as = get_dut_as_number(tgen, dut="r3")
+ default_originate_config = {
+ "r3": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {
+ "unicast": {"default_originate": {"r2": {"route_map": "RMv41"}}}
+ },
+ "ipv6": {
+ "unicast": {"default_originate": {"r2": {"route_map": "RMv61"}}}
+ },
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "After attaching route-map verify IPv4 and IPv6 default route is withdrawn from the R2"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ expected=False,
+ )
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ expected=False,
+ )
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Change the recently added Routemap from deny to permit")
+ input_dict_3 = {
+ "r3": {
+ "route_maps": {
+ "RMv41": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "match": {"ipv4": {"prefix_lists": "Pv4"}},
+ },
+ ],
+ "RMv61": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "match": {"ipv6": {"prefix_lists": "Pv6"}},
+ },
+ ],
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify IPv4 and IPv6 default route is advertised from the R2")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Delete default-originate route-map command while configuring ( neighbor x.x.x default-originate) for IPv4 and IPv6 BGP neighbor "
+ )
+ """ Configuring the Default originate on neighbor must remove the previously assigned deault-originate with routemap config """
+ local_as = get_dut_as_number(tgen, dut="r3")
+ default_originate_config = {
+ "r3": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {"unicast": {"default_originate": {"r2": {}}}},
+ "ipv6": {"unicast": {"default_originate": {"r2": {}}}},
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify in running config from BGP that default-originate with route-map command is removed and default-originate command is still present and default route for IPv4 and IPv6 present in RIB and FIB"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Configure default-originate with conditional route-map command on IPv4 and IPv6 address family "
+ )
+ local_as = get_dut_as_number(tgen, dut="r3")
+ default_originate_config = {
+ "r3": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {
+ "unicast": {"default_originate": {"r2": {"route_map": "RMv41"}}}
+ },
+ "ipv6": {
+ "unicast": {"default_originate": {"r2": {"route_map": "RMv61"}}}
+ },
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify in running config from BGP that default-originate with route-map command is present and default route for IPv4 and IPv6 present"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Delete default originate with 'no bgp default-originate' from IPV4 and IPV6 address family "
+ )
+ local_as = get_dut_as_number(tgen, dut="r3")
+ default_originate_config = {
+ "r3": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {
+ "unicast": {"default_originate": {"r2": {"delete": True}}}
+ },
+ "ipv6": {
+ "unicast": {"default_originate": {"r2": {"delete": True}}}
+ },
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ " Verify in running config from BGP that default-originate complete CLI is removed for IPV4 and IPV6 address family and default originate routes got deleted"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Default Route is not expected in FIB \nError: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Default Route is not expected in RIB\nError: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_default_originate/test_bgp_default_originate_topo1_2.py b/tests/topotests/bgp_default_originate/test_bgp_default_originate_topo1_2.py
new file mode 100644
index 0000000000..a9987a8f96
--- /dev/null
+++ b/tests/topotests/bgp_default_originate/test_bgp_default_originate_topo1_2.py
@@ -0,0 +1,2437 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2022 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc. ("NetDEF")
+# in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+# Shreenidhi A R <rshreenidhi@vmware.com>
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+"""
+Following tests are covered.
+5. Verify BGP default originate route-map with OUT route-map
+6. Verify BGP default originate route-map with IN route-map
+8. Verify BGP default route after removing default-originate
+9. Verify default-originate route with GR
+"""
+import os
+import sys
+import time
+import pytest
+from time import sleep
+from copy import deepcopy
+from lib.topolog import logger
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+from lib.topojson import build_config_from_json
+from lib.topolog import logger
+
+from lib.bgp import (
+ verify_bgp_convergence,
+ verify_graceful_restart,
+ create_router_bgp,
+ verify_router_id,
+ modify_as_number,
+ verify_as_numbers,
+ clear_bgp_and_verify,
+ clear_bgp,
+ verify_bgp_rib,
+ get_prefix_count_route,
+ get_dut_as_number,
+ verify_rib_default_route,
+ verify_fib_default_route,
+ verify_bgp_advertised_routes_from_neighbor,
+ verify_bgp_received_routes_from_neighbor,
+)
+from lib.common_config import (
+ interface_status,
+ verify_prefix_lists,
+ verify_fib_routes,
+ kill_router_daemons,
+ start_router_daemons,
+ shutdown_bringup_interface,
+ step,
+ required_linux_kernel_version,
+ stop_router,
+ start_router,
+ create_route_maps,
+ create_prefix_lists,
+ get_frr_ipv6_linklocal,
+ start_topology,
+ write_test_header,
+ check_address_types,
+ write_test_footer,
+ reset_config_on_routers,
+ create_static_routes,
+ check_router_status,
+ delete_route_maps,
+)
+
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# Required to instantiate the topology builder class.
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+
+# Global variables
+topo = None
+KEEPALIVETIMER = 1
+HOLDDOWNTIMER = 3
+# Global variables
+NETWORK1_1 = {"ipv4": "1.1.1.1/32", "ipv6": "1::1/128"}
+NETWORK1_2 = {"ipv4": "1.1.1.2/32", "ipv6": "1::2/128"}
+NETWORK2_1 = {"ipv4": "2.1.1.1/32", "ipv6": "2::1/128"}
+NETWORK2_2 = {"ipv4": "2.1.1.2/32", "ipv6": "2::2/128"}
+NETWORK3_1 = {"ipv4": "3.1.1.1/32", "ipv6": "3::1/128"}
+NETWORK3_2 = {"ipv4": "3.1.1.2/32", "ipv6": "3::2/128"}
+NETWORK4_1 = {"ipv4": "4.1.1.1/32", "ipv6": "4::1/128"}
+NETWORK4_2 = {"ipv4": "4.1.1.2/32", "ipv6": "4::2/128"}
+NETWORK5_1 = {"ipv4": "5.1.1.1/32", "ipv6": "5::1/128"}
+NETWORK5_2 = {"ipv4": "5.1.1.2/32", "ipv6": "5::2/128"}
+DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+NEXT_HOP_IP = {"ipv4": "Null0", "ipv6": "Null0"}
+
+IPV4_RM = "RMVIPV4"
+IPV6_RM = "RMVIPV6"
+
+IPV4_RM1 = "RMVIPV41"
+IPV6_RM1 = "RMVIPV61"
+
+IPV4_RM2 = "RMVIPV42"
+IPV6_RM2 = "RMVIPV62"
+
+IPV4_PL_1 = "PV41"
+IPV4_PL_2 = "PV42"
+
+IPV6_PL_1 = "PV61"
+IPV6_PL_2 = "PV62"
+
+
+r1_ipv4_loopback = "1.0.1.0/24"
+r2_ipv4_loopback = "1.0.2.0/24"
+r3_ipv4_loopback = "1.0.3.0/24"
+r4_ipv4_loopback = "1.0.4.0/24"
+r1_ipv6_loopback = "2001:db8:f::1:0/120"
+r2_ipv6_loopback = "2001:db8:f::2:0/120"
+r3_ipv6_loopback = "2001:db8:f::3:0/120"
+r4_ipv6_loopback = "2001:db8:f::4:0/120"
+
+r0_connected_address_ipv4 = "192.168.0.0/24"
+r0_connected_address_ipv6 = "fd00::/64"
+r1_connected_address_ipv4 = "192.168.1.0/24"
+r1_connected_address_ipv6 = "fd00:0:0:1::/64"
+r3_connected_address_ipv4 = "192.168.2.0/24"
+r3_connected_address_ipv6 = "fd00:0:0:2::/64"
+r4_connected_address_ipv4 = "192.168.3.0/24"
+r4_connected_address_ipv6 = "fd00:0:0:3::/64"
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ # Required linux kernel version for this suite to run.
+ result = required_linux_kernel_version("4.15")
+ if result is not True:
+ pytest.skip("Kernel requirements are not met")
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ json_file = "{}/bgp_default_originate_topo1.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
+ # ... and here it calls Mininet initialization functions.
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start daemons and then start routers
+ start_topology(tgen)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ global ADDR_TYPES
+ global BGP_CONVERGENCE
+ global DEFAULT_ROUTES
+ global DEFAULT_ROUTE_NXT_HOP_R1, DEFAULT_ROUTE_NXT_HOP_R3
+ global R0_NETWORK_LOOPBACK, R0_NETWORK_LOOPBACK_NXTHOP, R1_NETWORK_LOOPBACK, R1_NETWORK_LOOPBACK_NXTHOP
+ global R0_NETWORK_CONNECTED, R0_NETWORK_CONNECTED_NXTHOP, R1_NETWORK_CONNECTED, R1_NETWORK_CONNECTED_NXTHOP
+ global R4_NETWORK_LOOPBACK, R4_NETWORK_LOOPBACK_NXTHOP, R3_NETWORK_LOOPBACK, R3_NETWORK_LOOPBACK_NXTHOP
+ global R4_NETWORK_CONNECTED, R4_NETWORK_CONNECTED_NXTHOP, R3_NETWORK_CONNECTED, R3_NETWORK_CONNECTED_NXTHOP
+
+ ADDR_TYPES = check_address_types()
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format(
+ BGP_CONVERGENCE
+ )
+ # There are the global varibles used through out the file these are acheived only after building the topology.
+
+ r0_loopback_address_ipv4 = topo["routers"]["r0"]["links"]["lo"]["ipv4"]
+ r0_loopback_address_ipv4_nxt_hop = topo["routers"]["r0"]["links"]["r1"][
+ "ipv4"
+ ].split("/")[0]
+ r0_loopback_address_ipv6 = topo["routers"]["r0"]["links"]["lo"]["ipv6"]
+ r0_loopback_address_ipv6_nxt_hop = topo["routers"]["r0"]["links"]["r1"][
+ "ipv6"
+ ].split("/")[0]
+
+ r1_loopback_address_ipv4 = topo["routers"]["r1"]["links"]["lo"]["ipv4"]
+ r1_loopback_address_ipv4_nxt_hop = topo["routers"]["r1"]["links"]["r2"][
+ "ipv4"
+ ].split("/")[0]
+ r1_loopback_address_ipv6 = topo["routers"]["r1"]["links"]["lo"]["ipv6"]
+ r1_loopback_address_ipv6_nxt_hop = topo["routers"]["r1"]["links"]["r2"][
+ "ipv6"
+ ].split("/")[0]
+
+ r4_loopback_address_ipv4 = topo["routers"]["r4"]["links"]["lo"]["ipv4"]
+ r4_loopback_address_ipv4_nxt_hop = topo["routers"]["r4"]["links"]["r3"][
+ "ipv4"
+ ].split("/")[0]
+ r4_loopback_address_ipv6 = topo["routers"]["r4"]["links"]["lo"]["ipv6"]
+ r4_loopback_address_ipv6_nxt_hop = topo["routers"]["r4"]["links"]["r3"][
+ "ipv6"
+ ].split("/")[0]
+
+ r3_loopback_address_ipv4 = topo["routers"]["r3"]["links"]["lo"]["ipv4"]
+ r3_loopback_address_ipv4_nxt_hop = topo["routers"]["r3"]["links"]["r2"][
+ "ipv4"
+ ].split("/")[0]
+ r3_loopback_address_ipv6 = topo["routers"]["r3"]["links"]["lo"]["ipv6"]
+ r3_loopback_address_ipv6_nxt_hop = topo["routers"]["r3"]["links"]["r2"][
+ "ipv6"
+ ].split("/")[0]
+
+ R0_NETWORK_LOOPBACK = {
+ "ipv4": r0_loopback_address_ipv4,
+ "ipv6": r0_loopback_address_ipv6,
+ }
+ R0_NETWORK_LOOPBACK_NXTHOP = {
+ "ipv4": r0_loopback_address_ipv4_nxt_hop,
+ "ipv6": r0_loopback_address_ipv6_nxt_hop,
+ }
+
+ R1_NETWORK_LOOPBACK = {
+ "ipv4": r1_loopback_address_ipv4,
+ "ipv6": r1_loopback_address_ipv6,
+ }
+ R1_NETWORK_LOOPBACK_NXTHOP = {
+ "ipv4": r1_loopback_address_ipv4_nxt_hop,
+ "ipv6": r1_loopback_address_ipv6_nxt_hop,
+ }
+
+ R0_NETWORK_CONNECTED = {
+ "ipv4": r0_connected_address_ipv4,
+ "ipv6": r0_connected_address_ipv6,
+ }
+ R0_NETWORK_CONNECTED_NXTHOP = {
+ "ipv4": r0_loopback_address_ipv4_nxt_hop,
+ "ipv6": r0_loopback_address_ipv6_nxt_hop,
+ }
+
+ R1_NETWORK_CONNECTED = {
+ "ipv4": r1_connected_address_ipv4,
+ "ipv6": r1_connected_address_ipv6,
+ }
+ R1_NETWORK_CONNECTED_NXTHOP = {
+ "ipv4": r1_loopback_address_ipv4_nxt_hop,
+ "ipv6": r1_loopback_address_ipv6_nxt_hop,
+ }
+
+ R4_NETWORK_LOOPBACK = {
+ "ipv4": r4_loopback_address_ipv4,
+ "ipv6": r4_loopback_address_ipv6,
+ }
+ R4_NETWORK_LOOPBACK_NXTHOP = {
+ "ipv4": r4_loopback_address_ipv4_nxt_hop,
+ "ipv6": r4_loopback_address_ipv6_nxt_hop,
+ }
+
+ R3_NETWORK_LOOPBACK = {
+ "ipv4": r3_loopback_address_ipv4,
+ "ipv6": r3_loopback_address_ipv6,
+ }
+ R3_NETWORK_LOOPBACK_NXTHOP = {
+ "ipv4": r3_loopback_address_ipv4_nxt_hop,
+ "ipv6": r3_loopback_address_ipv6_nxt_hop,
+ }
+
+ R4_NETWORK_CONNECTED = {
+ "ipv4": r4_connected_address_ipv4,
+ "ipv6": r4_connected_address_ipv6,
+ }
+ R4_NETWORK_CONNECTED_NXTHOP = {
+ "ipv4": r4_loopback_address_ipv4_nxt_hop,
+ "ipv6": r4_loopback_address_ipv6_nxt_hop,
+ }
+
+ R3_NETWORK_CONNECTED = {
+ "ipv4": r3_connected_address_ipv4,
+ "ipv6": r3_connected_address_ipv6,
+ }
+ R3_NETWORK_CONNECTED_NXTHOP = {
+ "ipv4": r3_loopback_address_ipv4_nxt_hop,
+ "ipv6": r3_loopback_address_ipv6_nxt_hop,
+ }
+
+ # populating the nexthop for default routes
+
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+
+ interface = topo["routers"]["r1"]["links"]["r2"]["interface"]
+ ipv6_link_local = get_frr_ipv6_linklocal(tgen, "r1", intf=interface)
+ ipv4_nxt_hop = topo["routers"]["r1"]["links"]["r2"]["ipv4"].split("/")[0]
+ ipv6_nxt_hop = topo["routers"]["r1"]["links"]["r2"]["ipv6"].split("/")[0]
+ DEFAULT_ROUTE_NXT_HOP_R1 = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local}
+
+ interface = topo["routers"]["r3"]["links"]["r2"]["interface"]
+ ipv6_link_local = get_frr_ipv6_linklocal(tgen, "r3", intf=interface)
+ ipv4_nxt_hop = topo["routers"]["r3"]["links"]["r2"]["ipv4"].split("/")[0]
+ ipv6_nxt_hop = topo["routers"]["r3"]["links"]["r2"]["ipv6"].split("/")[0]
+ DEFAULT_ROUTE_NXT_HOP_R3 = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local}
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """Teardown the pytest environment"""
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
+
+
+#####################################################
+#
+# Local API's
+#
+#####################################################
+
+
+def configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut, peer):
+ """
+ This function groups the repetitive function calls into one function.
+ """
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ return True
+
+
+#####################################################
+#
+# Testcases
+#
+#####################################################
+
+
+def test_verify_bgp_default_originate_route_map_in_OUT_p1(request):
+ """
+ test_verify_bgp_default_originate_route_map_in_OUT_p1
+ """
+ tgen = get_topogen()
+ global BGP_CONVERGENCE
+ global topo
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+ reset_config_on_routers(tgen)
+
+ if BGP_CONVERGENCE != True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ step("Configure IPv4 and IPv6 , EBGP neighbor between R3 and R2")
+ step("Configure IPv4 and IPv6 IBGP neighbor between R3 and R4")
+ r0_local_as = topo["routers"]["r0"]["bgp"]["local_as"]
+ r1_local_as = topo["routers"]["r1"]["bgp"]["local_as"]
+ r2_local_as = topo["routers"]["r2"]["bgp"]["local_as"]
+ r3_local_as = topo["routers"]["r3"]["bgp"]["local_as"]
+ r4_local_as = topo["routers"]["r4"]["bgp"]["local_as"]
+ input_dict = {
+ "r0": {
+ "bgp": {
+ "local_as": r0_local_as,
+ }
+ },
+ "r1": {
+ "bgp": {
+ "local_as": r1_local_as,
+ }
+ },
+ "r2": {
+ "bgp": {
+ "local_as": r2_local_as,
+ }
+ },
+ "r3": {
+ "bgp": {
+ "local_as": 4000,
+ }
+ },
+ "r4": {
+ "bgp": {
+ "local_as": 4000,
+ }
+ },
+ }
+ result = modify_as_number(tgen, topo, input_dict)
+ try:
+ assert result is True
+ except AssertionError:
+ logger.info("Expected behaviour: {}".format(result))
+ logger.info("BGP config is not created because of invalid ASNs")
+ step("After changing the BGP AS Path Verify the BGP Convergence")
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format(
+ BGP_CONVERGENCE
+ )
+
+ step(
+ "Configure 2 IPv4 and 2 IPv6, Static route on R4 with next-hop as Null0 IPv4 route Sv41, Sv42, IPv6 route Sv61 Sv62"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r4": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ ]
+ }
+ }
+ result = create_static_routes(tgen, static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("verify IPv4 and IPv6 static route are configured and up on R4")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r4": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ ]
+ }
+ }
+ result = verify_fib_routes(tgen, addr_type, "r4", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Configure redistribute static knob on R4 , for R4 to R3 neighbor ")
+ redistribute_static = {
+ "r4": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
+ "ipv6": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, redistribute_static)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ expected_routes = {
+ "ipv4": [
+ {"network": NETWORK1_1["ipv4"], "nexthop": NEXT_HOP_IP["ipv4"]},
+ {"network": NETWORK2_1["ipv4"], "nexthop": NEXT_HOP_IP["ipv4"]},
+ ],
+ "ipv6": [
+ {"network": NETWORK1_1["ipv6"], "nexthop": NEXT_HOP_IP["ipv4"]},
+ {"network": NETWORK2_1["ipv6"], "nexthop": NEXT_HOP_IP["ipv4"]},
+ ],
+ }
+ result = verify_bgp_advertised_routes_from_neighbor(
+ tgen, topo, dut="r4", peer="r3", expected_routes=expected_routes
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "After redistribute static verify the routes is recevied in router R3 in RIB and FIB"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ ]
+ }
+ }
+ result = verify_fib_routes(tgen, addr_type, "r3", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ result = verify_bgp_rib(tgen, addr_type, "r3", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Configure IPv4 prefix-list Pv4 and and IPv6 prefix-list Pv6 on R3 to match BGP route Sv41, IPv6 route Sv61 with permit option "
+ )
+ input_dict_3 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "Pv4": [
+ {
+ "seqid": "1",
+ "network": NETWORK1_1["ipv4"],
+ "action": "permit",
+ }
+ ]
+ },
+ "ipv6": {
+ "Pv6": [
+ {
+ "seqid": "1",
+ "network": NETWORK1_1["ipv6"],
+ "action": "permit",
+ }
+ ]
+ },
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("verify IPv4 and IPv6 Prefix list got configured on R3")
+ input_dict = {"r3": {"prefix_lists": ["Pv4", "Pv6"]}}
+ result = verify_prefix_lists(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Configure IPv4 and IPv6 route-map RMv4 and RMv6 matching prefix-list Pv4 and Pv6 with permit option "
+ )
+ input_dict_3 = {
+ "r3": {
+ "route_maps": {
+ "RM4": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "match": {"ipv4": {"prefix_lists": "Pv4"}},
+ },
+ ],
+ "RM6": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "match": {"ipv6": {"prefix_lists": "Pv6"}},
+ },
+ ],
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Configure IPv4 prefix-list Pv42 and and IPv6 prefix-list Pv62 on R3 to match BGP route Sv42, IPv6 route Sv62 with deny option"
+ )
+ input_dict_3 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "Pv42": [
+ {"seqid": "1", "network": NETWORK2_1["ipv4"], "action": "deny"}
+ ]
+ },
+ "ipv6": {
+ "Pv62": [
+ {"seqid": "1", "network": NETWORK2_1["ipv6"], "action": "deny"}
+ ]
+ },
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("verify IPv4 and IPv6 Prefix list got configured on R3")
+ input_dict = {"r3": {"prefix_lists": ["Pv42", "Pv62"]}}
+ result = verify_prefix_lists(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Configure IPv4 and IPv6 route-map (RMv42 and RMv62 )matching prefix-list Pv42 and Pv62 with permit option "
+ )
+ input_dict_3 = {
+ "r3": {
+ "route_maps": {
+ "RMv42": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "match": {"ipv4": {"prefix_lists": "Pv42"}},
+ },
+ ],
+ "RMv62": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "match": {"ipv6": {"prefix_lists": "Pv62"}},
+ },
+ ],
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Apply IPv4 and IPv6 route-map RMv4 and RMv6 with default-originate on R3 , for R3 to R2 peers and Apply IPv4 and IPv6 out route-map RMv42 and RMv62 on R3 , for R3 to R2 peers "
+ )
+ local_as = get_dut_as_number(tgen, "r3")
+ default_originate_config = {
+ "r3": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {
+ "unicast": {"default_originate": {"r2": {"route_map": "RM4"}}}
+ },
+ "ipv6": {
+ "unicast": {"default_originate": {"r2": {"route_map": "RM6"}}}
+ },
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ updated_topo = topo
+ updated_topo["routers"]["r0"]["bgp"]["local_as"] = get_dut_as_number(tgen, "r0")
+ updated_topo["routers"]["r1"]["bgp"]["local_as"] = get_dut_as_number(tgen, "r1")
+ updated_topo["routers"]["r2"]["bgp"]["local_as"] = get_dut_as_number(tgen, "r2")
+ updated_topo["routers"]["r3"]["bgp"]["local_as"] = get_dut_as_number(tgen, "r3")
+ updated_topo["routers"]["r4"]["bgp"]["local_as"] = get_dut_as_number(tgen, "r4")
+
+ step(
+ "Apply IPv4 and IPv6 route-map RMv42 and RMv62 on R3 (OUT Direction), for R3 to R2 peers "
+ )
+ input_dict_4 = {
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {"name": "RMv42", "direction": "out"}
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {"name": "RMv62", "direction": "out"}
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ }
+ }
+
+ result = create_router_bgp(tgen, updated_topo, input_dict_4)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ NOTE = """
+ After applying route-map on neighbor verify default BGP route IPv4 IPv6 route populated in R2 BGP and routing table , verify using "show ip bgp json" "show ipv6 bgp json" "show ip route json" "show ip route json"
+ Sv42 and Sv62 route should not be present on R2
+ """
+ step(NOTE)
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ ]
+ }
+ }
+
+ result = verify_fib_routes(
+ tgen, addr_type, "r2", static_routes_input, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Static routes are not expected due to conditions \nError: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen, addr_type, "r2", static_routes_input, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Static routes are not expected due to conditions\n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Change IPv4 prefix-list Pv42 and and IPv6 prefix-list Pv62 deny to permit")
+ input_dict_3 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "Pv42": [
+ {
+ "seqid": "1",
+ "network": NETWORK2_1["ipv4"],
+ "action": "permit",
+ }
+ ]
+ },
+ "ipv6": {
+ "Pv62": [
+ {
+ "seqid": "1",
+ "network": NETWORK2_1["ipv6"],
+ "action": "permit",
+ }
+ ]
+ },
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("verify IPv4 and IPv6 Prefix list got configured on R3")
+ input_dict = {"r3": {"prefix_lists": ["Pv42", "Pv62"]}}
+ result = verify_prefix_lists(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ NOTE = """Default BGP route and IPv4 ( Sv42) , IPv6 (Sv62) route populated in R2 BGP and routing table"""
+ step(NOTE)
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("IPv4 prefix-list Pv4 and and IPv6 prefix-list Pv6 permit to deny ")
+ input_dict_3 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "Pv4": [
+ {"seqid": "1", "network": NETWORK1_1["ipv4"], "action": "deny"}
+ ]
+ },
+ "ipv6": {
+ "Pv6": [
+ {"seqid": "1", "network": NETWORK1_1["ipv6"], "action": "deny"}
+ ]
+ },
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ NOTE = """
+ Verify default-originate route (IPv4 and IPv6 ) not present on R2
+ IPv4 ( Sv42) , IPv6 (Sv62) route populated in R2 BGP
+ """
+ step(NOTE)
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n default-route in FIB is not expected due to conditions \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n default-route in RIB is not expected due to conditions \n Error: {}".format(
+ tc_name, result
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+def test_verify_bgp_default_originate_route_map_in_IN_p1(request):
+ """Verify BGP default originate route-map with IN route-map"""
+ tgen = get_topogen()
+ global BGP_CONVERGENCE
+ global topo
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+ reset_config_on_routers(tgen)
+
+ if BGP_CONVERGENCE != True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ step("Configure IPv4 and IPv6 , EBGP neighbor between R1 and R2")
+ step("Configure IPv4 and IPv6 , IBGP neighbor between R1 and R0")
+ r0_local_as = topo["routers"]["r0"]["bgp"]["local_as"]
+ r1_local_as = topo["routers"]["r1"]["bgp"]["local_as"]
+ r2_local_as = topo["routers"]["r2"]["bgp"]["local_as"]
+ r3_local_as = topo["routers"]["r3"]["bgp"]["local_as"]
+ r4_local_as = topo["routers"]["r4"]["bgp"]["local_as"]
+ input_dict = {
+ "r0": {
+ "bgp": {
+ "local_as": 1000,
+ }
+ },
+ "r1": {
+ "bgp": {
+ "local_as": 1000,
+ }
+ },
+ "r2": {
+ "bgp": {
+ "local_as": r2_local_as,
+ }
+ },
+ "r3": {
+ "bgp": {
+ "local_as": r3_local_as,
+ }
+ },
+ "r4": {
+ "bgp": {
+ "local_as": r4_local_as,
+ }
+ },
+ }
+ result = modify_as_number(tgen, topo, input_dict)
+ try:
+ assert result is True
+ except AssertionError:
+ logger.info("Expected behaviour: {}".format(result))
+ logger.info("BGP config is not created because of invalid ASNs")
+ step("After changing the BGP AS Path Verify the BGP Convergence")
+
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format(
+ BGP_CONVERGENCE
+ )
+
+ step(
+ "Configure 2 IPv4 and 2 IPv6, Static route on R0 with next-hop as Null0 IPv4 route Sv41, Sv42, IPv6 route Sv61 Sv62"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ ]
+ }
+ }
+ result = create_static_routes(tgen, static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("verifyIPv4 and IPv6 static routes are configure and up on R0 ")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ ]
+ }
+ }
+ result = verify_fib_routes(tgen, addr_type, "r0", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Configure redistribute static knob on R0 , for R0 to R1 IPv4 and IPv6 neighbor"
+ )
+ redistribute_static = {
+ "r0": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
+ "ipv6": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, redistribute_static)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify IPv4 and IPv6 route received on R1 ")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ ]
+ }
+ }
+ result = verify_fib_routes(tgen, addr_type, "r1", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(tgen, addr_type, "r1", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Configure IPv4 prefix-list Pv4 and and IPv6 prefix-list Pv6 on R1 to match BGP route Sv41, Sv42, IPv6 route Sv61 Sv62"
+ )
+ input_dict_3 = {
+ "r1": {
+ "prefix_lists": {
+ "ipv4": {
+ "Pv4": [
+ {
+ "seqid": "1",
+ "network": NETWORK1_1["ipv4"],
+ "action": "permit",
+ },
+ {
+ "seqid": "2",
+ "network": NETWORK2_1["ipv4"],
+ "action": "permit",
+ },
+ ]
+ },
+ "ipv6": {
+ "Pv6": [
+ {
+ "seqid": "1",
+ "network": NETWORK1_1["ipv6"],
+ "action": "permit",
+ },
+ {
+ "seqid": "2",
+ "network": NETWORK2_1["ipv6"],
+ "action": "permit",
+ },
+ ]
+ },
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("verify IPv4 and IPv6 Prefix list got configured on R1")
+ input_dict = {"r1": {"prefix_lists": ["Pv4", "Pv6"]}}
+ result = verify_prefix_lists(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Configure IPv4 and IPv6 route-map RMv4 and RMv6 matching prefix-list Pv4 and Pv6 with deny option on R1"
+ )
+ input_dict_3 = {
+ "r1": {
+ "route_maps": {
+ "RMv4": [
+ {
+ "action": "deny",
+ "seq_id": "1",
+ "match": {"ipv4": {"prefix_lists": "Pv4"}},
+ },
+ ],
+ "RMv6": [
+ {
+ "action": "deny",
+ "seq_id": "1",
+ "match": {"ipv6": {"prefix_lists": "Pv6"}},
+ },
+ ],
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Apply route-map IN direction in R1 (R1 to R0) IPv4 and IPv6 neighbor")
+ updated_topo = topo
+ updated_topo["routers"]["r0"]["bgp"]["local_as"] = get_dut_as_number(tgen, "r0")
+ updated_topo["routers"]["r1"]["bgp"]["local_as"] = get_dut_as_number(tgen, "r1")
+ updated_topo["routers"]["r2"]["bgp"]["local_as"] = get_dut_as_number(tgen, "r2")
+ updated_topo["routers"]["r3"]["bgp"]["local_as"] = get_dut_as_number(tgen, "r3")
+ updated_topo["routers"]["r4"]["bgp"]["local_as"] = get_dut_as_number(tgen, "r4")
+
+ local_as_r1 = get_dut_as_number(tgen, dut="r1")
+ input_dict_4 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r0": {
+ "dest_link": {
+ "r1": {
+ "route_maps": [
+ {
+ "name": "RMv4",
+ "direction": "in",
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r0": {
+ "dest_link": {
+ "r1": {
+ "route_maps": [
+ {
+ "name": "RMv6",
+ "direction": "in",
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ }
+ }
+
+ result = create_router_bgp(tgen, updated_topo, input_dict_4)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ STEP = "After applying route-map verify that IPv4 route Sv41, Sv42, IPv6 route Sv61 Sv62 should not present on R1 BGP and routing table "
+ step(STEP)
+
+ step(
+ "After applying route-map verify that IPv4 route Sv41, Sv42, IPv6 route Sv61 Sv62 should not present on R1 "
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ ]
+ }
+ }
+
+ result = verify_fib_routes(
+ tgen, addr_type, "r1", static_routes_input, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n default-route in FIB is not expected due to conditions \nError: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen, addr_type, "r1", static_routes_input, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n default-route in FIB is not expected due to conditions \nError: {}".format(
+ tc_name, result
+ )
+ # Routes should come to dut but not the shown in RIB thus verifying using show ip bgp nbr xxx received route
+ step(
+ " Verify the received routes \n using 'show ip bgp nbr xxx received route' in Router R1"
+ )
+ expected_routes = {
+ "ipv4": [
+ {"network": NETWORK1_1["ipv4"], "nexthop": NEXT_HOP_IP["ipv4"]},
+ {"network": NETWORK2_1["ipv4"], "nexthop": NEXT_HOP_IP["ipv4"]},
+ ],
+ "ipv6": [
+ {"network": NETWORK1_1["ipv6"], "nexthop": NEXT_HOP_IP["ipv6"]},
+ {"network": NETWORK2_1["ipv6"], "nexthop": NEXT_HOP_IP["ipv6"]},
+ ],
+ }
+ result = verify_bgp_received_routes_from_neighbor(
+ tgen, topo, dut="r1", peer="r0", expected_routes=expected_routes
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Configure default-originate on R1 for R1 to R2 IPv4 and IPv6 neighbor ")
+ local_as_r1 = get_dut_as_number(tgen, dut="r1")
+ default_originate_config = {
+ "r1": {
+ "bgp": {
+ "local_as": local_as_r1,
+ "address_family": {
+ "ipv4": {"unicast": {"default_originate": {"r2": {}}}},
+ "ipv6": {"unicast": {"default_originate": {"r2": {}}}},
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify Default originate knob is configured and default route advertised to R2 , verify on R1 "
+ )
+ expected_routes = {
+ "ipv4": [
+ {"network": "0.0.0.0/0", "nexthop": ""},
+ ],
+ "ipv6": [
+ {"network": "::/0", "nexthop": ""},
+ ],
+ }
+ result = verify_bgp_advertised_routes_from_neighbor(
+ tgen, topo, dut="r1", peer="r2", expected_routes=expected_routes
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify the Default route Route in FIB in R2")
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ }
+ ]
+ }
+ }
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Change route-map RMv4 and RMv6 from deny to permit")
+ input_dict_3 = {
+ "r1": {
+ "route_maps": {
+ "RMv4": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "match": {"ipv4": {"prefix_lists": "Pv4"}},
+ },
+ ],
+ "RMv6": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "match": {"ipv6": {"prefix_lists": "Pv6"}},
+ },
+ ],
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ NOTE = """After changing route-map to permit verify that IPv4 routes Sv41, Sv42, IPv6 routes Sv61 Sv62 present on R1 BGP and routing table , using "show ip route " "show ip bgp nbr xxx received route " "show ipv6 route " "show ipv6 bgp nbr xxx receied route """
+ step(NOTE)
+ expected_routes = {
+ "ipv4": [{"network": NETWORK1_1["ipv4"], "nexthop": NEXT_HOP_IP["ipv4"]}],
+ "ipv6": [{"network": NETWORK1_1["ipv6"], "nexthop": NEXT_HOP_IP["ipv4"]}],
+ }
+ result = verify_bgp_received_routes_from_neighbor(
+ tgen, topo, dut="r1", peer="r0", expected_routes=expected_routes
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ ]
+ }
+ }
+ result = verify_bgp_rib(tgen, addr_type, "r1", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ result = verify_fib_routes(tgen, addr_type, "r1", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Configure default static route (IPv4 and IPv6) on R2 nexthop as R1 ")
+ NEXT_HOP_IP_R1 = {}
+ r1_r2_ipv4_neighbor = topo["routers"]["r1"]["links"]["r2"]["ipv4"].split("/")[0]
+ r1_r2_ipv6_neighbor = topo["routers"]["r1"]["links"]["r2"]["ipv6"].split("/")[0]
+ NEXT_HOP_IP_R1["ipv4"] = r1_r2_ipv4_neighbor
+ NEXT_HOP_IP_R1["ipv6"] = r1_r2_ipv6_neighbor
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": "0.0.0.0/0",
+ "next_hop": NEXT_HOP_IP_R1["ipv4"],
+ },
+ {
+ "network": "0::0/0",
+ "next_hop": NEXT_HOP_IP_R1["ipv6"],
+ },
+ ]
+ }
+ }
+ result = create_static_routes(tgen, static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify Static default route is taking preference over BGP default routes , BGP default route is inactive IN RIB and static is up and installed in RIB and FIB "
+ )
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ ipv4_nxt_hop = topo["routers"]["r1"]["links"]["r2"]["ipv4"].split("/")[0]
+ ipv6_nxt_hop = topo["routers"]["r1"]["links"]["r2"]["ipv6"].split("/")[0]
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ DEFAULT_ROUTE_NXT_HOP = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_nxt_hop}
+
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP[addr_type],
+ "protocol": "static",
+ }
+ ]
+ }
+ }
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ write_test_footer(tc_name)
+
+def test_verify_default_originate_after_removing_default_originate_p1(request):
+ """Verify BGP default route after removing default-originate"""
+
+ tgen = get_topogen()
+ global BGP_CONVERGENCE
+ global topo
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+ reset_config_on_routers(tgen)
+
+ if BGP_CONVERGENCE != True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ step("Configure EBGP between R0 to R1 and IBGP between R1 to R2")
+ step("Configure EBGP between R2 to R3 and IBGP between R3 to R4")
+ r0_local_as = topo["routers"]["r0"]["bgp"]["local_as"]
+ r1_local_as = topo["routers"]["r1"]["bgp"]["local_as"]
+ r2_local_as = topo["routers"]["r2"]["bgp"]["local_as"]
+ r3_local_as = topo["routers"]["r3"]["bgp"]["local_as"]
+ r4_local_as = topo["routers"]["r4"]["bgp"]["local_as"]
+ input_dict = {
+ "r0": {
+ "bgp": {
+ "local_as": r0_local_as,
+ }
+ },
+ "r1": {
+ "bgp": {
+ "local_as": 2000,
+ }
+ },
+ "r2": {
+ "bgp": {
+ "local_as": 2000,
+ }
+ },
+ "r3": {
+ "bgp": {
+ "local_as": 5000,
+ }
+ },
+ "r4": {
+ "bgp": {
+ "local_as": 5000,
+ }
+ },
+ }
+ result = modify_as_number(tgen, topo, input_dict)
+ try:
+ assert result is True
+ except AssertionError:
+ logger.info("Expected behaviour: {}".format(result))
+ logger.info("BGP config is not created because of invalid ASNs")
+ step("After changing the BGP AS Path Verify the BGP Convergence")
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format(
+ BGP_CONVERGENCE
+ )
+
+ step("Configure IPv4 and IPv6 static route on R0 and R4")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ }
+ ]
+ },
+ "r4": {
+ "static_routes": [
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ }
+ ]
+ },
+ }
+ result = create_static_routes(tgen, static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("verify IPv4 and IPv6 static route are configured and up on R0 and R4")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ }
+ ]
+ }
+ }
+ result = verify_fib_routes(tgen, addr_type, "r0", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r4": {
+ "static_routes": [
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ }
+ ]
+ }
+ }
+ result = verify_fib_routes(tgen, addr_type, "r4", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ step(
+ "Configure redistribute connected and static on R0 (R0-R1) on R4 ( R4-R3) IPv4 and IPv6 address family "
+ )
+ redistribute_static = {
+ "r0": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"},
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"},
+ ]
+ }
+ },
+ }
+ }
+ },
+ "r4": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"},
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"},
+ ]
+ }
+ },
+ }
+ }
+ },
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {"redistribute": [{"redist_type": "connected"}]}
+ },
+ "ipv6": {
+ "unicast": {"redistribute": [{"redist_type": "connected"}]}
+ },
+ }
+ }
+ },
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {"redistribute": [{"redist_type": "connected"}]}
+ },
+ "ipv6": {
+ "unicast": {"redistribute": [{"redist_type": "connected"}]}
+ },
+ }
+ }
+ },
+ }
+ result = create_router_bgp(tgen, topo, redistribute_static)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("verify IPv4 and IPv6 static route are configured and up on R1 and R3")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R0_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R1_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R1_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R1_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R1_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ ]
+ }
+ }
+
+ result = verify_fib_routes(tgen, addr_type, "r1", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ result = verify_bgp_rib(tgen, addr_type, "r1", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("verify IPv4 and IPv6 static route are configured and up on R1 and R3")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [R3_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R3_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R3_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R3_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R4_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ ]
+ }
+ }
+
+ result = verify_fib_routes(tgen, addr_type, "r3", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(tgen, addr_type, "r3", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Configure default-originate on R1 for R1 to R2 neighbor for IPv4 and IPv6 peer "
+ )
+ local_as = get_dut_as_number(tgen, dut="r1")
+ default_originate_config = {
+ "r1": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {"unicast": {"default_originate": {"r2": {}}}},
+ "ipv6": {"unicast": {"default_originate": {"r2": {}}}},
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ step(
+ "Verify all the static , connected and loopback routes from R0,R1,R3 and R4 is receieved on R2 "
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R0_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R1_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R1_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R1_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R1_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R3_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R3_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R3_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R3_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R4_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ ]
+ }
+ }
+
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Verify the Default Originate on R2 nexthop as R1")
+
+ interface = topo["routers"]["r1"]["links"]["r2"]["interface"]
+ ipv6_link_local = get_frr_ipv6_linklocal(tgen, "r1", intf=interface)
+ ipv4_nxt_hop = topo["routers"]["r1"]["links"]["r2"]["ipv4"].split("/")[0]
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ DEFAULT_ROUTE_NXT_HOP = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local}
+
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP,
+ expected=True,
+ )
+ assert (
+ result is True
+ ), "Testcase {} : Failed \n Error: After Deactivating the BGP neighbor the default route is expected but found in RIB -> {}".format(
+ tc_name, result
+ )
+
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP,
+ expected=True,
+ )
+ assert (
+ result is True
+ ), "Testcase {} : Failed \n Error: After Deactivating the BGP neighbor the default route is expected but found in FIB -> {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Configure default-originate on R3 for R3 to R2 neighbor for IPv4 and IPv6 peer "
+ )
+ local_as = get_dut_as_number(tgen, dut="r3")
+ default_originate_config = {
+ "r3": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {"unicast": {"default_originate": {"r2": {}}}},
+ "ipv6": {"unicast": {"default_originate": {"r2": {}}}},
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ STEP = """After configuring the Default Originate From R3 --> R2
+ Both Default routes from R1 and R3 Should present in R2 BGP RIB
+ The Deafult Route from iBGP is prefferedover EBGP thus
+ Default Route From R1->r2 should only present in R2 FIB """
+ step(STEP)
+
+ interface = topo["routers"]["r3"]["links"]["r2"]["interface"]
+ ipv6_link_local = get_frr_ipv6_linklocal(tgen, "r3", intf=interface)
+ ipv4_nxt_hop = topo["routers"]["r3"]["links"]["r2"]["ipv4"].split("/")[0]
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ DEFAULT_ROUTE_NXT_HOP = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local}
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP,
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: Only IBGP default originate is expected in FIB over EBGP {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP,
+ expected=True,
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "No change on static and connected routes which got advertised from R0, R1, R3 and R4"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R0_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R1_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R1_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R1_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R1_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R3_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R3_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R3_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R3_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R4_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ ]
+ }
+ }
+
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ " Remove default-originate on R1 for R1 to R2 neighbor for IPv4 and IPv6 peer "
+ )
+ local_as = get_dut_as_number(tgen, dut="r1")
+ default_originate_config = {
+ "r1": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {
+ "unicast": {"default_originate": {"r2": {"delete": True}}}
+ },
+ "ipv6": {
+ "unicast": {"default_originate": {"r2": {"delete": True}}}
+ },
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify the Default Originate reoute from R1 to r2 is removed in R2 ")
+ interface = topo["routers"]["r1"]["links"]["r2"]["interface"]
+ ipv6_link_local = get_frr_ipv6_linklocal(tgen, "r1", intf=interface)
+ ipv4_nxt_hop = topo["routers"]["r1"]["links"]["r2"]["ipv4"].split("/")[0]
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ DEFAULT_ROUTE_NXT_HOP = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local}
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP,
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n After removing the default originate the route should not be present in FIB \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP,
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n After removing the default originate the route should not be present in RIB \n Error: {}".format(
+ tc_name, result
+ )
+
+ NOTE = """ after removing the Default originate from R1-->R2
+ Verify the BGP Default route received from R3 is present in both BGP RIB and FIB on R2
+ """
+ interface = topo["routers"]["r3"]["links"]["r2"]["interface"]
+ ipv6_link_local = get_frr_ipv6_linklocal(tgen, "r3", intf=interface)
+ ipv4_nxt_hop = topo["routers"]["r3"]["links"]["r2"]["ipv4"].split("/")[0]
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ DEFAULT_ROUTE_NXT_HOP = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local}
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP,
+ expected=True,
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP,
+ expected=True,
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "No change on static and connected routes which got advertised from R0, R1, R3 and R4"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R0_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R1_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R1_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R1_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R1_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R3_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R3_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R3_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R3_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R4_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ ]
+ }
+ }
+
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Remove default-originate on R3 for R3 to R2 neighbor for IPv4 and IPv6 peer "
+ )
+ local_as = get_dut_as_number(tgen, dut="r3")
+ default_originate_config = {
+ "r3": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {
+ "unicast": {"default_originate": {"r2": {"delete": True}}}
+ },
+ "ipv6": {
+ "unicast": {"default_originate": {"r2": {"delete": True}}}
+ },
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "After removing default originate , verify default IPv4 and IPv6 BGP routes removed on R2 from R1 ( next-hop as R3) "
+ )
+ interface = topo["routers"]["r3"]["links"]["r2"]["interface"]
+ ipv6_link_local = get_frr_ipv6_linklocal(tgen, "r3", intf=interface)
+ ipv4_nxt_hop = topo["routers"]["r3"]["links"]["r2"]["ipv4"].split("/")[0]
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ DEFAULT_ROUTE_NXT_HOP = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local}
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP,
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n After removing the default originate the route should not be present in FIB \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP,
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n After removing the default originate the route should not be present in RIB \n Error: {}".format(
+ tc_name, result
+ )
+ step(
+ "No change on static and connected routes which got advertised from R0, R1, R3 and R4"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R0_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R1_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R1_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R1_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R1_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R3_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R3_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R3_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R3_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R4_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ ]
+ }
+ }
+
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ write_test_footer(tc_name)
+
+def test_verify_default_originate_route_with_GR_p1(request):
+ """ "Verify default-originate route with GR "
+ """
+ tgen = get_topogen()
+ global BGP_CONVERGENCE
+ global topo
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+ reset_config_on_routers(tgen)
+
+ if BGP_CONVERGENCE != True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+
+ step("Configure IPV4 and IPV6 IBGP between R1 and R2 ")
+ step("Configure IPV4 and IPV6 EBGP between R2 to R3 ")
+ r0_local_as = topo['routers']['r0']['bgp']['local_as']
+ r1_local_as = topo['routers']['r1']['bgp']['local_as']
+ r2_local_as = topo['routers']['r2']['bgp']['local_as']
+ r3_local_as = topo['routers']['r3']['bgp']['local_as']
+ r4_local_as = topo['routers']['r4']['bgp']['local_as']
+ input_dict = {
+ "r0": {
+ "bgp": {
+ "local_as": r0_local_as,
+ }
+ },
+ "r1": {
+ "bgp": {
+ "local_as": 1000,
+ }
+ },
+ "r2": {
+ "bgp": {
+ "local_as": 1000,
+ }
+ },
+ "r3": {
+ "bgp": {
+ "local_as": r3_local_as,
+ }
+ },
+ "r4": {
+ "bgp": {
+ "local_as": r4_local_as,
+ }
+ },
+ }
+ result = modify_as_number(tgen, topo, input_dict)
+ try:
+ assert result is True
+ except AssertionError:
+ logger.info("Expected behaviour: {}".format(result))
+ logger.info("BGP config is not created because of invalid ASNs")
+ step("After changing the BGP AS Path Verify the BGP Convergence")
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format(
+ BGP_CONVERGENCE
+ )
+
+ step(
+ "Configure per peer Graceful restart on R2 ( restarting router) and R3 helper router "
+ )
+ input_dict = {
+ "r2": {
+ "bgp": {
+ "local_as": get_dut_as_number(tgen, "r2"),
+ "graceful-restart": {
+ "graceful-restart": True,
+ "preserve-fw-state": True,
+ },
+ }
+ },
+ "r3": {
+ "bgp": {
+ "local_as": get_dut_as_number(tgen, "r3"),
+ "graceful-restart": {"graceful-restart-helper": True},
+ }
+ },
+ }
+
+ configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r2", peer="r3")
+
+ step("verify Graceful restart at R2")
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r2", peer="r3"
+ )
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ step(
+ "Configure default-originate on R1 for R1-R2 neighbor for IPv4 and IPv6 BGP peers "
+ )
+ local_as = get_dut_as_number(tgen, dut="r1")
+ default_originate_config = {
+ "r1": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "default_originate":{
+ "r2":{
+
+ }
+
+ }
+
+ }
+ }, "ipv6": {
+ "unicast": {
+ "default_originate":{
+ "r2":{
+
+ }
+
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ step(
+ "R2 received default-originate routes and advertised it to R3 , verify on R2 and R3"
+ )
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ step(
+ "After configuring default-originate command , verify default routes are advertised on R2 "
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input,next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type])
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input,next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type])
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+
+ step(" Kill BGPd session on R2")
+ kill_router_daemons(tgen, "r2", ["bgpd"])
+ start_router_daemons(tgen, "r2", ["bgpd"])
+
+ step("verify default route is relearned after clear bgp on R2 on BGP RIB and")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input,next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type])
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input,next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type])
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ write_test_footer(tc_name)
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/lib/bgp.py b/tests/topotests/lib/bgp.py
index 4dd44e3e9e..216756f512 100644
--- a/tests/topotests/lib/bgp.py
+++ b/tests/topotests/lib/bgp.py
@@ -29,6 +29,7 @@ from lib.common_config import (
create_common_configurations,
FRRCFG_FILE,
InvalidCLIError,
+ apply_raw_config,
check_address_types,
find_interface_with_greater_ip,
generate_ips,
@@ -74,6 +75,12 @@ def create_router_bgp(tgen, topo=None, input_dict=None, build=False, load_config
"address_family": {
"ipv4": {
"unicast": {
+ "default_originate":{
+ "neighbor":"R2",
+ "add_type":"lo"
+ "route_map":"rm"
+
+ },
"redistribute": [{
"redist_type": "static",
"attribute": {
@@ -498,6 +505,12 @@ def __create_bgp_unicast_neighbor(
topo, input_dict, router, addr_type, add_neigh
)
config_data.extend(neigh_data)
+ # configure default originate
+ if "default_originate" in addr_data:
+ default_originate_config = __create_bgp_default_originate_neighbor(
+ topo, input_dict, router, addr_type, add_neigh
+ )
+ config_data.extend(default_originate_config)
for addr_type, addr_dict in bgp_data.items():
if not addr_dict or not check_address_types(addr_type):
@@ -515,6 +528,78 @@ def __create_bgp_unicast_neighbor(
return config_data
+def __create_bgp_default_originate_neighbor(
+ topo, input_dict, router, addr_type, add_neigh=True
+):
+ """
+ Helper API to create neighbor default - originate configuration
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `router` : router id to be configured
+ """
+ tgen = get_topogen()
+ config_data = []
+ logger.debug("Entering lib API: __create_bgp_default_originate_neighbor()")
+
+ bgp_data = input_dict["address_family"]
+ neigh_data = bgp_data[addr_type]["unicast"]["default_originate"]
+ for name, peer_dict in neigh_data.items():
+ nh_details = topo[name]
+
+ neighbor_ip = None
+ if "dest-link" in neigh_data[name]:
+ dest_link = neigh_data[name]["dest-link"]
+ neighbor_ip = nh_details["links"][dest_link][addr_type].split("/")[0]
+ elif "add_type" in neigh_data[name]:
+ add_type = neigh_data[name]["add_type"]
+ neighbor_ip = nh_details["links"][add_type][addr_type].split("/")[0]
+ else:
+ neighbor_ip = nh_details["links"][router][addr_type].split("/")[0]
+
+ config_data.append("address-family {} unicast".format(addr_type))
+ if "route_map" in peer_dict:
+ route_map = peer_dict["route_map"]
+ if "delete" in peer_dict:
+ if peer_dict["delete"]:
+ config_data.append(
+ "no neighbor {} default-originate route-map {}".format(
+ neighbor_ip, route_map
+ )
+ )
+ else:
+ config_data.append(
+ " neighbor {} default-originate route-map {}".format(
+ neighbor_ip, route_map
+ )
+ )
+ else:
+ config_data.append(
+ " neighbor {} default-originate route-map {}".format(
+ neighbor_ip, route_map
+ )
+ )
+
+ else:
+ if "delete" in peer_dict:
+ if peer_dict["delete"]:
+ config_data.append(
+ "no neighbor {} default-originate".format(neighbor_ip)
+ )
+ else:
+ config_data.append(
+ "neighbor {} default-originate".format(neighbor_ip)
+ )
+ else:
+ config_data.append("neighbor {} default-originate".format(neighbor_ip))
+
+ logger.debug("Exiting lib API: __create_bgp_default_originate_neighbor()")
+ return config_data
+
+
def __create_l2vpn_evpn_address_family(
tgen, topo, input_dict, router, config_data=None
):
@@ -4574,3 +4659,876 @@ def verify_tcp_mss(tgen, dut, neighbour, configured_tcp_mss, vrf=None):
return "TCP-MSS Mismatch"
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return False
+
+
+def get_dut_as_number(tgen, dut):
+ """
+ API to get the Autonomous Number of the given DUT
+
+ params:
+ =======
+ dut : Device Under test
+
+ returns :
+ =======
+ Success : DUT Autonomous number
+ Fail : Error message with Boolean False
+ """
+ tgen = get_topogen()
+ for router, rnode in tgen.routers().items():
+ if router == dut:
+ show_bgp_json = run_frr_cmd(rnode, "sh ip bgp summary json ", isjson=True)
+ as_number = show_bgp_json["ipv4Unicast"]["as"]
+ if as_number:
+ logger.info(
+ "[dut {}] DUT contains Automnomous number :: {} ".format(
+ dut, as_number
+ )
+ )
+ return as_number
+ else:
+ logger.error(
+ "[dut {}] ERROR....! DUT doesnot contain any Automnomous number ".format(
+ dut
+ )
+ )
+ return False
+
+
+def get_prefix_count_route(
+ tgen, topo, dut, peer, vrf=None, link=None, sent=None, received=None
+):
+ """
+ API to return the prefix count of default originate the given DUT
+ dut : Device under test
+ peer : neigbor on which you are expecting the route to be received
+
+ returns :
+ prefix_count as dict with ipv4 and ipv6 value
+ """
+ # the neighbor IP address can be accessable by finding the neigborship (vice-versa)
+
+ if link:
+ neighbor_ipv4_address = topo["routers"][peer]["links"][link]["ipv4"]
+ neighbor_ipv6_address = topo["routers"][peer]["links"][link]["ipv6"]
+ else:
+ neighbor_ipv4_address = topo["routers"][peer]["links"][dut]["ipv4"]
+ neighbor_ipv6_address = topo["routers"][peer]["links"][dut]["ipv6"]
+
+ neighbor_ipv4_address = neighbor_ipv4_address.split("/")[0]
+ neighbor_ipv6_address = neighbor_ipv6_address.split("/")[0]
+ prefix_count = {}
+ tgen = get_topogen()
+ for router, rnode in tgen.routers().items():
+ if router == dut:
+
+ if vrf:
+ ipv4_cmd = "sh ip bgp vrf {} summary json".format(vrf)
+ show_bgp_json_ipv4 = run_frr_cmd(rnode, ipv4_cmd, isjson=True)
+ ipv6_cmd = "sh ip bgp vrf {} ipv6 unicast summary json".format(vrf)
+ show_bgp_json_ipv6 = run_frr_cmd(rnode, ipv6_cmd, isjson=True)
+
+ prefix_count["ipv4_count"] = show_bgp_json_ipv4["ipv4Unicast"]["peers"][
+ neighbor_ipv4_address
+ ]["pfxRcd"]
+ prefix_count["ipv6_count"] = show_bgp_json_ipv6["peers"][
+ neighbor_ipv6_address
+ ]["pfxRcd"]
+
+ logger.info(
+ "The Prefix Count of the [DUT:{} : vrf [{}] ] towards neighbor ipv4 : {} and ipv6 : {} is : {}".format(
+ dut,
+ vrf,
+ neighbor_ipv4_address,
+ neighbor_ipv6_address,
+ prefix_count,
+ )
+ )
+ return prefix_count
+
+ else:
+ show_bgp_json_ipv4 = run_frr_cmd(
+ rnode, "sh ip bgp summary json ", isjson=True
+ )
+ show_bgp_json_ipv6 = run_frr_cmd(
+ rnode, "sh ip bgp ipv6 unicast summary json ", isjson=True
+ )
+ if received:
+ prefix_count["ipv4_count"] = show_bgp_json_ipv4["ipv4Unicast"][
+ "peers"
+ ][neighbor_ipv4_address]["pfxRcd"]
+ prefix_count["ipv6_count"] = show_bgp_json_ipv6["peers"][
+ neighbor_ipv6_address
+ ]["pfxRcd"]
+
+ elif sent:
+ prefix_count["ipv4_count"] = show_bgp_json_ipv4["ipv4Unicast"][
+ "peers"
+ ][neighbor_ipv4_address]["pfxSnt"]
+ prefix_count["ipv6_count"] = show_bgp_json_ipv6["peers"][
+ neighbor_ipv6_address
+ ]["pfxSnt"]
+
+ else:
+ prefix_count["ipv4_count"] = show_bgp_json_ipv4["ipv4Unicast"][
+ "peers"
+ ][neighbor_ipv4_address]["pfxRcd"]
+ prefix_count["ipv6_count"] = show_bgp_json_ipv6["peers"][
+ neighbor_ipv6_address
+ ]["pfxRcd"]
+
+ logger.info(
+ "The Prefix Count of the DUT:{} towards neighbor ipv4 : {} and ipv6 : {} is : {}".format(
+ dut, neighbor_ipv4_address, neighbor_ipv6_address, prefix_count
+ )
+ )
+ return prefix_count
+ else:
+ logger.error("ERROR...! Unknown dut {} in topolgy".format(dut))
+
+
+@retry(retry_timeout=5)
+def verify_rib_default_route(
+ tgen,
+ topo,
+ dut,
+ routes,
+ expected_nexthop,
+ metric=None,
+ origin=None,
+ locPrf=None,
+ expected_aspath=None,
+):
+ """
+ API to verify the the 'Default route" in BGP RIB with the attributes the rout carries (metric , local preference, )
+
+ param
+ =====
+ dut : device under test
+ routes : default route with expected nexthop
+ expected_nexthop : the nexthop that is expected the deafult route
+
+ """
+ result = False
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ tgen = get_topogen()
+ connected_routes = {}
+ for router, rnode in tgen.routers().items():
+ if router == dut:
+
+ ipv4_routes = run_frr_cmd(rnode, "sh ip bgp json", isjson=True)
+ ipv6_routes = run_frr_cmd(rnode, "sh ip bgp ipv6 unicast json", isjson=True)
+ is_ipv4_default_attrib_found = False
+ is_ipv6_default_attrib_found = False
+
+ default_ipv4_route = routes["ipv4"]
+ default_ipv6_route = "::/0"
+ ipv4_route_Origin = False
+ ipv4_route_local_pref = False
+ ipv4_route_metric = False
+
+ if default_ipv4_route in ipv4_routes["routes"].keys():
+ nxt_hop_count = len(ipv4_routes["routes"][default_ipv4_route])
+ rib_next_hops = []
+ for index in range(nxt_hop_count):
+ rib_next_hops.append(
+ ipv4_routes["routes"][default_ipv4_route][index]["nexthops"][0]["ip"]
+ )
+
+ for nxt_hop in expected_nexthop.items():
+ if nxt_hop[0] == "ipv4":
+ if nxt_hop[1] in rib_next_hops:
+ logger.info(
+ "Default routes [{}] obtained from {} .....PASSED".format(
+ default_ipv4_route, nxt_hop[1]
+ )
+ )
+ else:
+ logger.error(
+ "ERROR ...! Default routes [{}] expected is missing {}".format(
+ default_ipv4_route, nxt_hop[1]
+ )
+ )
+ return False
+
+ else:
+ pass
+
+ if "origin" in ipv4_routes["routes"][default_ipv4_route][0].keys():
+ ipv4_route_Origin = ipv4_routes["routes"][default_ipv4_route][0]["origin"]
+ if "locPrf" in ipv4_routes["routes"][default_ipv4_route][0].keys():
+ ipv4_route_local_pref = ipv4_routes["routes"][default_ipv4_route][0][
+ "locPrf"
+ ]
+ if "metric" in ipv4_routes["routes"][default_ipv4_route][0].keys():
+ ipv4_route_metric = ipv4_routes["routes"][default_ipv4_route][0]["metric"]
+ else:
+ logger.error("ERROR [ DUT {}] : The Default Route Not found in RIB".format(dut))
+ return False
+
+ origin_found = False
+ locPrf_found = False
+ metric_found = False
+ as_path_found = False
+
+ if origin:
+ if origin == ipv4_route_Origin:
+ logger.info(
+ "Dafault Route {} expected origin {} Found in RIB....PASSED".format(
+ default_ipv4_route, origin
+ )
+ )
+ origin_found = True
+ else:
+ logger.error(
+ "ERROR... IPV4::! Expected Origin is {} obtained {}".format(
+ origin, ipv4_route_Origin
+ )
+ )
+ return False
+ else:
+ origin_found = True
+
+ if locPrf:
+ if locPrf == ipv4_route_local_pref:
+ logger.info(
+ "Dafault Route {} expected local preference {} Found in RIB....PASSED".format(
+ default_ipv4_route, locPrf
+ )
+ )
+ locPrf_found = True
+ else:
+ logger.error(
+ "ERROR... IPV4::! Expected Local preference is {} obtained {}".format(
+ locPrf, ipv4_route_local_pref
+ )
+ )
+ return False
+ else:
+ locPrf_found = True
+
+ if metric:
+ if metric == ipv4_route_metric:
+ logger.info(
+ "Dafault Route {} expected metric {} Found in RIB....PASSED".format(
+ default_ipv4_route, metric
+ )
+ )
+
+ metric_found = True
+ else:
+ logger.error(
+ "ERROR... IPV4::! Expected metric is {} obtained {}".format(
+ metric, ipv4_route_metric
+ )
+ )
+ return False
+ else:
+ metric_found = True
+
+ if expected_aspath:
+ obtained_aspath = ipv4_routes["routes"]["0.0.0.0/0"][0]["path"]
+ if expected_aspath in obtained_aspath:
+ as_path_found = True
+ logger.info(
+ "Dafault Route {} expected AS path {} Found in RIB....PASSED".format(
+ default_ipv4_route, expected_aspath
+ )
+ )
+ else:
+ logger.error(
+ "ERROR.....! Expected AS path {} obtained {}..... FAILED ".format(
+ expected_aspath, obtained_aspath
+ )
+ )
+ return False
+ else:
+ as_path_found = True
+
+ if origin_found and locPrf_found and metric_found and as_path_found:
+ is_ipv4_default_attrib_found = True
+ logger.info(
+ "IPV4:: Expected origin ['{}'] , Local Preference ['{}'] , Metric ['{}'] and AS path [{}] is found in RIB".format(
+ origin, locPrf, metric, expected_aspath
+ )
+ )
+ else:
+ is_ipv4_default_attrib_found = False
+ logger.error(
+ "IPV4:: Expected origin ['{}'] Obtained [{}]".format(
+ origin, ipv4_route_Origin
+ )
+ )
+ logger.error(
+ "IPV4:: Expected locPrf ['{}'] Obtained [{}]".format(
+ locPrf, ipv4_route_local_pref
+ )
+ )
+ logger.error(
+ "IPV4:: Expected metric ['{}'] Obtained [{}]".format(
+ metric, ipv4_route_metric
+ )
+ )
+ logger.error(
+ "IPV4:: Expected metric ['{}'] Obtained [{}]".format(
+ expected_aspath, obtained_aspath
+ )
+ )
+
+ route_Origin = False
+ route_local_pref = False
+ route_local_metric = False
+ default_ipv6_route = ""
+ try:
+ ipv6_routes["routes"]["0::0/0"]
+ default_ipv6_route = "0::0/0"
+ except:
+ ipv6_routes["routes"]["::/0"]
+ default_ipv6_route = "::/0"
+ if default_ipv6_route in ipv6_routes["routes"].keys():
+ nxt_hop_count = len(ipv6_routes["routes"][default_ipv6_route])
+ rib_next_hops = []
+ for index in range(nxt_hop_count):
+ rib_next_hops.append(
+ ipv6_routes["routes"][default_ipv6_route][index]["nexthops"][0]["ip"]
+ )
+ try:
+ rib_next_hops.append(
+ ipv6_routes["routes"][default_ipv6_route][index]["nexthops"][1][
+ "ip"
+ ]
+ )
+ except (KeyError, IndexError) as e:
+ logger.error("NO impact ..! Global IPV6 Address not found ")
+
+ for nxt_hop in expected_nexthop.items():
+ if nxt_hop[0] == "ipv6":
+ if nxt_hop[1] in rib_next_hops:
+ logger.info(
+ "Default routes [{}] obtained from {} .....PASSED".format(
+ default_ipv6_route, nxt_hop[1]
+ )
+ )
+ else:
+ logger.error(
+ "ERROR ...! Default routes [{}] expected from {} obtained {}".format(
+ default_ipv6_route, nxt_hop[1], rib_next_hops
+ )
+ )
+ return False
+
+ else:
+ pass
+ if "origin" in ipv6_routes["routes"][default_ipv6_route][0].keys():
+ route_Origin = ipv6_routes["routes"][default_ipv6_route][0]["origin"]
+ if "locPrf" in ipv6_routes["routes"][default_ipv6_route][0].keys():
+ route_local_pref = ipv6_routes["routes"][default_ipv6_route][0]["locPrf"]
+ if "metric" in ipv6_routes["routes"][default_ipv6_route][0].keys():
+ route_local_metric = ipv6_routes["routes"][default_ipv6_route][0]["metric"]
+
+ origin_found = False
+ locPrf_found = False
+ metric_found = False
+ as_path_found = False
+
+ if origin:
+ if origin == route_Origin:
+ logger.info(
+ "Dafault Route {} expected origin {} Found in RIB....PASSED".format(
+ default_ipv6_route, route_Origin
+ )
+ )
+ origin_found = True
+ else:
+ logger.error(
+ "ERROR... IPV6::! Expected Origin is {} obtained {}".format(
+ origin, route_Origin
+ )
+ )
+ return False
+ else:
+ origin_found = True
+
+ if locPrf:
+ if locPrf == route_local_pref:
+ logger.info(
+ "Dafault Route {} expected Local Preference {} Found in RIB....PASSED".format(
+ default_ipv6_route, route_local_pref
+ )
+ )
+ locPrf_found = True
+ else:
+ logger.error(
+ "ERROR... IPV6::! Expected Local Preference is {} obtained {}".format(
+ locPrf, route_local_pref
+ )
+ )
+ return False
+ else:
+ locPrf_found = True
+
+ if metric:
+ if metric == route_local_metric:
+ logger.info(
+ "Dafault Route {} expected metric {} Found in RIB....PASSED".format(
+ default_ipv4_route, metric
+ )
+ )
+
+ metric_found = True
+ else:
+ logger.error(
+ "ERROR... IPV6::! Expected metric is {} obtained {}".format(
+ metric, route_local_metric
+ )
+ )
+ return False
+ else:
+ metric_found = True
+
+ if expected_aspath:
+ obtained_aspath = ipv6_routes["routes"]["::/0"][0]["path"]
+ if expected_aspath in obtained_aspath:
+ as_path_found = True
+ logger.info(
+ "Dafault Route {} expected AS path {} Found in RIB....PASSED".format(
+ default_ipv4_route, expected_aspath
+ )
+ )
+ else:
+ logger.error(
+ "ERROR.....! Expected AS path {} obtained {}..... FAILED ".format(
+ expected_aspath, obtained_aspath
+ )
+ )
+ return False
+ else:
+ as_path_found = True
+
+ if origin_found and locPrf_found and metric_found and as_path_found:
+ is_ipv6_default_attrib_found = True
+ logger.info(
+ "IPV6:: Expected origin ['{}'] , Local Preference ['{}'] , Metric ['{}'] and AS path [{}] is found in RIB".format(
+ origin, locPrf, metric, expected_aspath
+ )
+ )
+ else:
+ is_ipv6_default_attrib_found = False
+ logger.error(
+ "IPV6:: Expected origin ['{}'] Obtained [{}]".format(origin, route_Origin)
+ )
+ logger.error(
+ "IPV6:: Expected locPrf ['{}'] Obtained [{}]".format(
+ locPrf, route_local_pref
+ )
+ )
+ logger.error(
+ "IPV6:: Expected metric ['{}'] Obtained [{}]".format(
+ metric, route_local_metric
+ )
+ )
+ logger.error(
+ "IPV6:: Expected metric ['{}'] Obtained [{}]".format(
+ expected_aspath, obtained_aspath
+ )
+ )
+
+ if is_ipv4_default_attrib_found and is_ipv6_default_attrib_found:
+ logger.info("The attributes are found for default route in RIB ")
+ return True
+ else:
+ return False
+
+
+@retry(retry_timeout=5)
+def verify_fib_default_route(tgen, topo, dut, routes, expected_nexthop):
+ """
+ API to verify the the 'Default route" in FIB
+
+ param
+ =====
+ dut : device under test
+ routes : default route with expected nexthop
+ expected_nexthop : the nexthop that is expected the deafult route
+
+ """
+ result = False
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ tgen = get_topogen()
+ connected_routes = {}
+ for router, rnode in tgen.routers().items():
+ if router == dut:
+ ipv4_routes = run_frr_cmd(rnode, "sh ip route json", isjson=True)
+ ipv6_routes = run_frr_cmd(rnode, "sh ipv6 route json", isjson=True)
+
+ is_ipv4_default_route_found = False
+ is_ipv6_default_route_found = False
+ if routes["ipv4"] in ipv4_routes.keys():
+ rib_ipv4_nxt_hops = []
+ ipv4_default_route = routes["ipv4"]
+ nxt_hop_count = len(ipv4_routes[ipv4_default_route][0]["nexthops"])
+ for index in range(nxt_hop_count):
+ rib_ipv4_nxt_hops.append(
+ ipv4_routes[ipv4_default_route][0]["nexthops"][index]["ip"]
+ )
+
+ if expected_nexthop["ipv4"] in rib_ipv4_nxt_hops:
+ is_ipv4_default_route_found = True
+ logger.info(
+ "{} default route with next hop {} is found in FIB ".format(
+ ipv4_default_route, expected_nexthop
+ )
+ )
+ else:
+ logger.error(
+ "ERROR .. ! {} default route with next hop {} is not found in FIB ".format(
+ ipv4_default_route, expected_nexthop
+ )
+ )
+ return False
+
+ if routes["ipv6"] in ipv6_routes.keys() or "::/0" in ipv6_routes.keys():
+ rib_ipv6_nxt_hops = []
+ if "::/0" in ipv6_routes.keys():
+ ipv6_default_route = "::/0"
+ elif routes["ipv6"] in ipv6_routes.keys():
+ ipv6_default_route = routes["ipv6"]
+
+ nxt_hop_count = len(ipv6_routes[ipv6_default_route][0]["nexthops"])
+ for index in range(nxt_hop_count):
+ rib_ipv6_nxt_hops.append(
+ ipv6_routes[ipv6_default_route][0]["nexthops"][index]["ip"]
+ )
+
+ if expected_nexthop["ipv6"] in rib_ipv6_nxt_hops:
+ is_ipv6_default_route_found = True
+ logger.info(
+ "{} default route with next hop {} is found in FIB ".format(
+ ipv6_default_route, expected_nexthop
+ )
+ )
+ else:
+ logger.error(
+ "ERROR .. ! {} default route with next hop {} is not found in FIB ".format(
+ ipv6_default_route, expected_nexthop
+ )
+ )
+ return False
+
+ if is_ipv4_default_route_found and is_ipv6_default_route_found:
+ return True
+ else:
+ logger.error(
+ "Default Route for ipv4 and ipv6 address family is not found in FIB "
+ )
+ return False
+
+
+@retry(retry_timeout=5)
+def verify_bgp_advertised_routes_from_neighbor(tgen, topo, dut, peer, expected_routes):
+ """
+ APi is verifies the the routes that are advertised from dut to peer
+
+ command used :
+ "sh ip bgp neighbor <x.x.x.x> advertised-routes" and
+ "sh ip bgp ipv6 unicast neighbor<x::x> advertised-routes"
+
+ dut : Device Under Tests
+ Peer : Peer on which the routs is expected
+ expected_routes : dual stack IPV4-and IPv6 routes to be verified
+ expected_routes
+
+ returns: True / False
+
+ """
+ result = False
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ tgen = get_topogen()
+
+ peer_ipv4_neighbor_ip = topo["routers"][peer]["links"][dut]["ipv4"].split("/")[0]
+ peer_ipv6_neighbor_ip = topo["routers"][peer]["links"][dut]["ipv6"].split("/")[0]
+
+ for router, rnode in tgen.routers().items():
+ if router == dut:
+ ipv4_receieved_routes = run_frr_cmd(
+ rnode,
+ "sh ip bgp neighbor {} advertised-routes json".format(
+ peer_ipv4_neighbor_ip
+ ),
+ isjson=True,
+ )
+ ipv6_receieved_routes = run_frr_cmd(
+ rnode,
+ "sh ip bgp ipv6 unicast neighbor {} advertised-routes json".format(
+ peer_ipv6_neighbor_ip
+ ),
+ isjson=True,
+ )
+ ipv4_route_count = 0
+ ipv6_route_count = 0
+ if ipv4_receieved_routes:
+ for index in range(len(expected_routes["ipv4"])):
+ if (
+ expected_routes["ipv4"][index]["network"]
+ in ipv4_receieved_routes["advertisedRoutes"].keys()
+ ):
+ ipv4_route_count += 1
+ logger.info(
+ "Success [DUT : {}] The Expected Route {} is advertised to {} ".format(
+ dut, expected_routes["ipv4"][index]["network"], peer
+ )
+ )
+
+ elif (
+ expected_routes["ipv4"][index]["network"]
+ in ipv4_receieved_routes["bgpOriginatingDefaultNetwork"]
+ ):
+ ipv4_route_count += 1
+ logger.info(
+ "Success [DUT : {}] The Expected Route {} is advertised to {} ".format(
+ dut, expected_routes["ipv4"][index]["network"], peer
+ )
+ )
+
+ else:
+ logger.error(
+ "ERROR....![DUT : {}] The Expected Route {} is not advertised to {} ".format(
+ dut, expected_routes["ipv4"][index]["network"], peer
+ )
+ )
+ else:
+ logger.error(ipv4_receieved_routes)
+ logger.error(
+ "ERROR...! [DUT : {}] No IPV4 Routes are advertised to the peer {}".format(
+ dut, peer
+ )
+ )
+ return False
+
+ if ipv6_receieved_routes:
+ for index in range(len(expected_routes["ipv6"])):
+ if (
+ expected_routes["ipv6"][index]["network"]
+ in ipv6_receieved_routes["advertisedRoutes"].keys()
+ ):
+ ipv6_route_count += 1
+ logger.info(
+ "Success [DUT : {}] The Expected Route {} is advertised to {} ".format(
+ dut, expected_routes["ipv6"][index]["network"], peer
+ )
+ )
+ elif (
+ expected_routes["ipv6"][index]["network"]
+ in ipv6_receieved_routes["bgpOriginatingDefaultNetwork"]
+ ):
+ ipv6_route_count += 1
+ logger.info(
+ "Success [DUT : {}] The Expected Route {} is advertised to {} ".format(
+ dut, expected_routes["ipv6"][index]["network"], peer
+ )
+ )
+ else:
+ logger.error(
+ "ERROR....![DUT : {}] The Expected Route {} is not advertised to {} ".format(
+ dut, expected_routes["ipv6"][index]["network"], peer
+ )
+ )
+ else:
+ logger.error(ipv6_receieved_routes)
+ logger.error(
+ "ERROR...! [DUT : {}] No IPV6 Routes are advertised to the peer {}".format(
+ dut, peer
+ )
+ )
+ return False
+
+ if ipv4_route_count == len(expected_routes["ipv4"]) and ipv6_route_count == len(
+ expected_routes["ipv6"]
+ ):
+ return True
+ else:
+ logger.error(
+ "ERROR ....! IPV4 : Expected Routes -> {} obtained ->{} ".format(
+ expected_routes["ipv4"], ipv4_receieved_routes["advertisedRoutes"]
+ )
+ )
+ logger.error(
+ "ERROR ....! IPV6 : Expected Routes -> {} obtained ->{} ".format(
+ expected_routes["ipv6"], ipv6_receieved_routes["advertisedRoutes"]
+ )
+ )
+ return False
+
+
+@retry(retry_timeout=5)
+def verify_bgp_received_routes_from_neighbor(tgen, topo, dut, peer, expected_routes):
+ """
+ API to verify the bgp received routes
+
+ commad used :
+ =============
+ show ip bgp neighbor <x.x.x.x> received-routes
+ show ip bgp ipv6 unicast neighbor <x::x> received-routes
+
+ params
+ =======
+ dut : Device Under Tests
+ Peer : Peer on which the routs is expected
+ expected_routes : dual stack IPV4-and IPv6 routes to be verified
+ expected_routes
+
+ returns:
+ ========
+ True / False
+ """
+ result = False
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ tgen = get_topogen()
+
+ peer_ipv4_neighbor_ip = topo["routers"][peer]["links"][dut]["ipv4"].split("/")[0]
+ peer_ipv6_neighbor_ip = topo["routers"][peer]["links"][dut]["ipv6"].split("/")[0]
+
+ logger.info("Enabling Soft configuration to neighbor INBOUND ")
+ neigbor_dict = {"ipv4": peer_ipv4_neighbor_ip, "ipv6": peer_ipv6_neighbor_ip}
+ result = configure_bgp_soft_configuration(
+ tgen, dut, neigbor_dict, direction="inbound"
+ )
+ assert (
+ result is True
+ ), " Failed to configure the soft configuration \n Error: {}".format(result)
+
+ """sleep of 10 sec is required to get the routes on peer after soft configuration"""
+ sleep(10)
+ for router, rnode in tgen.routers().items():
+ if router == dut:
+ ipv4_receieved_routes = run_frr_cmd(
+ rnode,
+ "sh ip bgp neighbor {} received-routes json".format(
+ peer_ipv4_neighbor_ip
+ ),
+ isjson=True,
+ )
+ ipv6_receieved_routes = run_frr_cmd(
+ rnode,
+ "sh ip bgp ipv6 unicast neighbor {} received-routes json".format(
+ peer_ipv6_neighbor_ip
+ ),
+ isjson=True,
+ )
+ ipv4_route_count = 0
+ ipv6_route_count = 0
+ if ipv4_receieved_routes:
+ for index in range(len(expected_routes["ipv4"])):
+ if (
+ expected_routes["ipv4"][index]["network"]
+ in ipv4_receieved_routes["receivedRoutes"].keys()
+ ):
+ ipv4_route_count += 1
+ logger.info(
+ "Success [DUT : {}] The Expected Route {} is received from {} ".format(
+ dut, expected_routes["ipv4"][index]["network"], peer
+ )
+ )
+ else:
+ logger.error(
+ "ERROR....![DUT : {}] The Expected Route {} is not received from {} ".format(
+ dut, expected_routes["ipv4"][index]["network"], peer
+ )
+ )
+ else:
+ logger.error(ipv4_receieved_routes)
+ logger.error(
+ "ERROR...! [DUT : {}] No IPV4 Routes are received from the peer {}".format(
+ dut, peer
+ )
+ )
+ return False
+
+ if ipv6_receieved_routes:
+ for index in range(len(expected_routes["ipv6"])):
+ if (
+ expected_routes["ipv6"][index]["network"]
+ in ipv6_receieved_routes["receivedRoutes"].keys()
+ ):
+ ipv6_route_count += 1
+ logger.info(
+ "Success [DUT : {}] The Expected Route {} is received from {} ".format(
+ dut, expected_routes["ipv6"][index]["network"], peer
+ )
+ )
+ else:
+ logger.error(
+ "ERROR....![DUT : {}] The Expected Route {} is not received from {} ".format(
+ dut, expected_routes["ipv6"][index]["network"], peer
+ )
+ )
+ else:
+ logger.error(ipv6_receieved_routes)
+ logger.error(
+ "ERROR...! [DUT : {}] No IPV6 Routes are received from the peer {}".format(
+ dut, peer
+ )
+ )
+ return False
+
+ if ipv4_route_count == len(expected_routes["ipv4"]) and ipv6_route_count == len(
+ expected_routes["ipv6"]
+ ):
+ return True
+ else:
+ logger.error(
+ "ERROR ....! IPV4 : Expected Routes -> {} obtained ->{} ".format(
+ expected_routes["ipv4"], ipv4_receieved_routes["advertisedRoutes"]
+ )
+ )
+ logger.error(
+ "ERROR ....! IPV6 : Expected Routes -> {} obtained ->{} ".format(
+ expected_routes["ipv6"], ipv6_receieved_routes["advertisedRoutes"]
+ )
+ )
+ return False
+
+
+def configure_bgp_soft_configuration(tgen, dut, neighbor_dict, direction):
+ """
+ Api to configure the bgp soft configuration to show the received routes from peer
+ params
+ ======
+ dut : device under test route on which the sonfiguration to be applied
+ neighbor_dict : dict element contains ipv4 and ipv6 neigbor ip
+ direction : Directionon which it should be applied in/out
+
+ returns:
+ ========
+ boolean
+ """
+ logger.info("Enabling Soft configuration to neighbor INBOUND ")
+ local_as = get_dut_as_number(tgen, dut)
+ ipv4_neighbor = neighbor_dict["ipv4"]
+ ipv6_neighbor = neighbor_dict["ipv6"]
+ direction = direction.lower()
+ if ipv4_neighbor and ipv4_neighbor:
+ raw_config = {
+ dut: {
+ "raw_config": [
+ "router bgp {}".format(local_as),
+ "address-family ipv4 unicast",
+ "neighbor {} soft-reconfiguration {} ".format(
+ ipv4_neighbor, direction
+ ),
+ "exit-address-family",
+ "address-family ipv6 unicast",
+ "neighbor {} soft-reconfiguration {} ".format(
+ ipv6_neighbor, direction
+ ),
+ "exit-address-family",
+ ]
+ }
+ }
+ result = apply_raw_config(tgen, raw_config)
+ logger.info(
+ "Success... [DUT : {}] The soft configuration onis applied on neighbors {} ".format(
+ dut, neighbor_dict
+ )
+ )
+ return True
diff --git a/tests/topotests/lib/pim.py b/tests/topotests/lib/pim.py
index 5d623c94e1..cd070e08b9 100644
--- a/tests/topotests/lib/pim.py
+++ b/tests/topotests/lib/pim.py
@@ -1592,6 +1592,13 @@ def verify_pim_interface(
if pim_interface in show_ip_pim_interface_json:
pim_intf_json = show_ip_pim_interface_json[pim_interface]
+ else:
+ errormsg = (
+ "[DUT %s]: PIM interface: %s "
+ "PIM interface ip: %s, not Found"
+ % (dut, pim_interface, pim_intf_ip)
+ )
+ return errormsg
# Verifying PIM interface
if (
@@ -3556,6 +3563,78 @@ class McastTesterHelper(HostApplicationHelper):
return True
+@retry(retry_timeout=62)
+def verify_local_igmp_groups(tgen, dut, interface, group_addresses):
+ """
+ Verify local IGMP groups are received from an intended interface
+ by running "show ip igmp join json" command
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `dut`: device under test
+ * `interface`: interface, from which IGMP groups are configured
+ * `group_addresses`: IGMP group address
+
+ Usage
+ -----
+ dut = "r1"
+ interface = "r1-r0-eth0"
+ group_address = "225.1.1.1"
+ result = verify_local_igmp_groups(tgen, dut, interface, group_address)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ if dut not in tgen.routers():
+ return False
+
+ rnode = tgen.routers()[dut]
+
+ logger.info("[DUT: %s]: Verifying local IGMP groups received:", dut)
+ show_ip_local_igmp_json = run_frr_cmd(rnode, "show ip igmp join json", isjson=True)
+
+ if type(group_addresses) is not list:
+ group_addresses = [group_addresses]
+
+ if interface not in show_ip_local_igmp_json:
+
+ errormsg = (
+ "[DUT %s]: Verifying local IGMP group received"
+ " from interface %s [FAILED]!! " % (dut, interface)
+ )
+ return errormsg
+
+ for grp_addr in group_addresses:
+ found = False
+ for index in show_ip_local_igmp_json[interface]["groups"]:
+ if index["group"] == grp_addr:
+ found = True
+ break
+ if not found:
+ errormsg = (
+ "[DUT %s]: Verifying local IGMP group received"
+ " from interface %s [FAILED]!! "
+ " Expected: %s " % (dut, interface, grp_addr)
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: Verifying local IGMP group %s received "
+ "from interface %s [PASSED]!! ",
+ dut,
+ grp_addr,
+ interface,
+ )
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
def verify_pim_interface_traffic(tgen, input_dict, return_stats=True):
"""
Verify ip pim interface traffice by running
diff --git a/tests/topotests/multicast_pim_uplink_topo1/multicast_pim_uplink_topo1.json b/tests/topotests/multicast_pim_uplink_topo1/multicast_pim_uplink_topo1.json
new file mode 100644
index 0000000000..fa98987620
--- /dev/null
+++ b/tests/topotests/multicast_pim_uplink_topo1/multicast_pim_uplink_topo1.json
@@ -0,0 +1,226 @@
+{
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 24,
+ "link_ip_start": {"ipv4": "10.0.0.0", "v4mask": 24, "link_local": "disable"},
+ "lo_prefix": {"ipv4": "1.0.", "v4mask": 32},
+ "routers": {
+ "r1": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "r2-link1": {"ipv4": "auto", "pim": "enable"},
+ "r2-link2": {"ipv4": "auto", "pim": "enable"},
+ "r2-link3": {"ipv4": "auto", "pim": "enable"},
+ "r2-link4": {"ipv4": "auto", "pim": "enable"},
+ "r3-link1": {"ipv4": "auto", "pim": "enable"},
+ "r3-link2": {"ipv4": "auto", "pim": "enable"},
+ "r3-link3": {"ipv4": "auto", "pim": "enable"},
+ "r3-link4": {"ipv4": "auto", "pim": "enable"},
+ "i1": {"ipv4": "auto", "pim": "enable"},
+ "i2": {"ipv4": "auto", "pim": "enable"}
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ],
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {},
+ "r1-link2": {},
+ "r1-link3": {},
+ "r1-link4": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r1-link1": {},
+ "r1-link2": {},
+ "r1-link3": {},
+ "r1-link4": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "r1-link1": {"ipv4": "auto", "pim": "enable"},
+ "r1-link2": {"ipv4": "auto", "pim": "enable"},
+ "r1-link3": {"ipv4": "auto", "pim": "enable"},
+ "r1-link4": {"ipv4": "auto", "pim": "enable"},
+ "r4-link1": {"ipv4": "auto", "pim": "enable"},
+ "r4-link2": {"ipv4": "auto", "pim": "enable"},
+ "r4-link3": {"ipv4": "auto", "pim": "enable"},
+ "r4-link4": {"ipv4": "auto", "pim": "enable"},
+ "i3": {"ipv4": "auto", "pim": "enable"},
+ "i4": {"ipv4": "auto", "pim": "enable"}
+ },
+ "bgp": {
+ "local_as": "200",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ],
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {},
+ "r2-link2": {},
+ "r2-link3": {},
+ "r2-link4": {}
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r2-link1": {},
+ "r2-link2": {},
+ "r2-link3": {},
+ "r2-link4": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "r1-link1": {"ipv4": "auto", "pim": "enable"},
+ "r1-link2": {"ipv4": "auto", "pim": "enable"},
+ "r1-link3": {"ipv4": "auto", "pim": "enable"},
+ "r1-link4": {"ipv4": "auto", "pim": "enable"},
+ "r4-link1": {"ipv4": "auto", "pim": "enable"},
+ "r4-link2": {"ipv4": "auto", "pim": "enable"},
+ "r4-link3": {"ipv4": "auto", "pim": "enable"},
+ "r4-link4": {"ipv4": "auto", "pim": "enable"},
+ "i5": {"ipv4": "auto", "pim": "enable"}
+ },
+ "bgp": {
+ "local_as": "300",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ],
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3-link1": {},
+ "r3-link2": {},
+ "r3-link3": {},
+ "r3-link4": {}
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3-link1": {},
+ "r3-link2": {},
+ "r3-link3": {},
+ "r3-link4": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r4": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "r2-link1": {"ipv4": "auto", "pim": "enable"},
+ "r2-link2": {"ipv4": "auto", "pim": "enable"},
+ "r2-link3": {"ipv4": "auto", "pim": "enable"},
+ "r2-link4": {"ipv4": "auto", "pim": "enable"},
+ "r3-link1": {"ipv4": "auto", "pim": "enable"},
+ "r3-link2": {"ipv4": "auto", "pim": "enable"},
+ "r3-link3": {"ipv4": "auto", "pim": "enable"},
+ "r3-link4": {"ipv4": "auto", "pim": "enable"},
+ "i6": {"ipv4": "auto", "pim": "enable"},
+ "i7": {"ipv4": "auto", "pim": "enable"}
+ },
+ "bgp": {
+ "local_as": "400",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ],
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r4-link1": {},
+ "r4-link2": {},
+ "r4-link3": {},
+ "r4-link4": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r4-link1": {},
+ "r4-link2": {},
+ "r4-link3": {},
+ "r4-link4": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "i1": {
+ "links": {
+ "r1": {"ipv4": "auto"}
+ }
+ },
+ "i2": {
+ "links": {
+ "r1": {"ipv4": "auto"}
+ }
+ },
+ "i3": {
+ "links": {
+ "r2": {"ipv4": "auto"}
+ }
+ },
+ "i4": {
+ "links": {
+ "r2": {"ipv4": "auto"}
+ }
+ },
+ "i5": {
+ "links": {
+ "r3": {"ipv4": "auto"}
+ }
+ },
+ "i6": {
+ "links": {
+ "r4": {"ipv4": "auto"}
+ }
+ },
+ "i7": {
+ "links": {
+ "r4": {"ipv4": "auto"}
+ }
+ }
+ }
+}
diff --git a/tests/topotests/multicast_pim_uplink_topo1/test_multicast_pim_uplink_topo1.py b/tests/topotests/multicast_pim_uplink_topo1/test_multicast_pim_uplink_topo1.py
new file mode 100644
index 0000000000..8a505a86b5
--- /dev/null
+++ b/tests/topotests/multicast_pim_uplink_topo1/test_multicast_pim_uplink_topo1.py
@@ -0,0 +1,3327 @@
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2022 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation,
+# Inc. ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+Following tests are covered to test multicast pim uplink:
+
+1. Verify mroutes OIL and IIF updated correctly when receivers present inside
+ and outside of DUT
+2. Verify mroutes OIL and IIF updated correctly when source present inside
+ and outside of DUT
+3. Verify Mroutes and BSM forwarding when edge is transit node
+4. Verify mroutes updated correctly after source interface shut/no shut
+5. Verify mroutes updated correctly after receiver interface shut/no shut
+6. Verify mroute updated correctly after sending IGMP prune and join
+7. Verify mroute updated correctly after clear mroute
+8. Verify (*,G) mroute entries after changing the RP configuration
+9. Verify mroute entries after FRR service stop and start
+"""
+
+import os
+import sys
+import json
+import time
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# Required to instantiate the topology builder class.
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+
+from lib.common_config import (
+ start_topology,
+ write_test_header,
+ write_test_footer,
+ step,
+ reset_config_on_routers,
+ shutdown_bringup_interface,
+ start_router,
+ stop_router,
+ create_static_routes,
+ required_linux_kernel_version,
+ topo_daemons,
+)
+from lib.bgp import create_router_bgp
+from lib.pim import (
+ create_pim_config,
+ create_igmp_config,
+ verify_igmp_groups,
+ verify_mroutes,
+ clear_pim_interface_traffic,
+ verify_upstream_iif,
+ clear_mroute,
+ verify_multicast_traffic,
+ verify_pim_rp_info,
+ verify_pim_interface_traffic,
+ verify_pim_state,
+ McastTesterHelper,
+)
+from lib.topolog import logger
+from lib.topojson import build_config_from_json
+
+# Global variables
+GROUP_RANGE_1 = [
+ "225.1.1.1/32",
+ "225.1.1.2/32",
+ "225.1.1.3/32",
+ "225.1.1.4/32",
+ "225.1.1.5/32",
+]
+IGMP_JOIN_RANGE_1 = ["225.1.1.1", "225.1.1.2", "225.1.1.3", "225.1.1.4", "225.1.1.5"]
+GROUP_RANGE_2 = [
+ "226.1.1.1/32",
+ "226.1.1.2/32",
+ "226.1.1.3/32",
+ "226.1.1.4/32",
+ "226.1.1.5/32",
+]
+IGMP_JOIN_RANGE_2 = ["226.1.1.1", "226.1.1.2", "226.1.1.3", "226.1.1.4", "226.1.1.5"]
+GROUP_RANGE_3 = [
+ "227.1.1.1/32",
+ "227.1.1.2/32",
+ "227.1.1.3/32",
+ "227.1.1.4/32",
+ "227.1.1.5/32",
+]
+IGMP_JOIN_RANGE_3 = ["227.1.1.1", "227.1.1.2", "227.1.1.3", "227.1.1.4", "227.1.1.5"]
+
+r1_r2_links = []
+r1_r3_links = []
+r2_r1_links = []
+r3_r1_links = []
+r2_r4_links = []
+r4_r2_links = []
+r4_r3_links = []
+HELLO_TIMER = 1
+HOLD_TIMER = 3
+
+pytestmark = [pytest.mark.pimd]
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ # Required linux kernel version for this suite to run.
+ result = required_linux_kernel_version("4.19")
+ if result is not True:
+ pytest.skip("Kernel requirements are not met")
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ testdir = os.path.dirname(os.path.realpath(__file__))
+ json_file = "{}/multicast_pim_uplink_topo1.json".format(testdir)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
+ # ... and here it calls Mininet initialization functions.
+
+ # get list of daemons needs to be started for this suite.
+ daemons = topo_daemons(tgen, tgen.json_topo)
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen, daemons)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, tgen.json_topo)
+
+ # Pre-requisite data
+ get_interfaces_names(topo)
+
+ # XXX Replace this using "with McastTesterHelper()... " in each test if possible.
+ global app_helper
+ app_helper = McastTesterHelper(tgen)
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """Teardown the pytest environment"""
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ app_helper.cleanup()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
+
+
+#####################################################
+#
+# Local APIs
+#
+#####################################################
+
+
+def get_interfaces_names(topo):
+ """
+ API to fetch interfaces names and create list, which further would be used
+ for verification
+
+ Parameters
+ ----------
+ * `topo` : inout JSON data
+ """
+
+ for link in range(1, 5):
+
+ intf = topo["routers"]["r1"]["links"]["r2-link{}".format(link)]["interface"]
+ r1_r2_links.append(intf)
+
+ intf = topo["routers"]["r1"]["links"]["r3-link{}".format(link)]["interface"]
+ r1_r3_links.append(intf)
+
+ intf = topo["routers"]["r2"]["links"]["r1-link{}".format(link)]["interface"]
+ r2_r1_links.append(intf)
+
+ intf = topo["routers"]["r3"]["links"]["r1-link{}".format(link)]["interface"]
+ r3_r1_links.append(intf)
+
+ intf = topo["routers"]["r2"]["links"]["r4-link{}".format(link)]["interface"]
+ r2_r4_links.append(intf)
+
+ intf = topo["routers"]["r4"]["links"]["r2-link{}".format(link)]["interface"]
+ r4_r2_links.append(intf)
+
+ intf = topo["routers"]["r4"]["links"]["r3-link{}".format(link)]["interface"]
+ r4_r3_links.append(intf)
+
+
+def configure_static_routes_for_rp_reachability(tgen, topo):
+ """
+ API to configure static routes for rp reachability
+
+ Parameters
+ ----------
+ * `topo` : inout JSON data
+ """
+
+ for i in range(1, 5):
+ static_routes = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": [
+ topo["routers"]["r2"]["links"]["lo"]["ipv4"],
+ topo["routers"]["i6"]["links"]["r4"]["ipv4"],
+ topo["routers"]["i7"]["links"]["r4"]["ipv4"],
+ topo["routers"]["r4"]["links"]["lo"]["ipv4"],
+ ],
+ "next_hop": topo["routers"]["r2"]["links"][
+ "r1-link{}".format(i)
+ ]["ipv4"].split("/")[0],
+ },
+ {
+ "network": [
+ topo["routers"]["r3"]["links"]["lo"]["ipv4"],
+ topo["routers"]["i6"]["links"]["r4"]["ipv4"],
+ topo["routers"]["i7"]["links"]["r4"]["ipv4"],
+ topo["routers"]["r4"]["links"]["lo"]["ipv4"],
+ ],
+ "next_hop": topo["routers"]["r3"]["links"][
+ "r1-link{}".format(i)
+ ]["ipv4"].split("/")[0],
+ },
+ ]
+ },
+ "r2": {
+ "static_routes": [
+ {
+ "network": [
+ topo["routers"]["i6"]["links"]["r4"]["ipv4"],
+ topo["routers"]["i7"]["links"]["r4"]["ipv4"],
+ topo["routers"]["r4"]["links"]["lo"]["ipv4"],
+ topo["routers"]["r3"]["links"]["lo"]["ipv4"],
+ ],
+ "next_hop": topo["routers"]["r4"]["links"][
+ "r2-link{}".format(i)
+ ]["ipv4"].split("/")[0],
+ },
+ {
+ "network": [
+ topo["routers"]["r1"]["links"]["lo"]["ipv4"],
+ topo["routers"]["r3"]["links"]["lo"]["ipv4"],
+ topo["routers"]["i1"]["links"]["r1"]["ipv4"],
+ topo["routers"]["i2"]["links"]["r1"]["ipv4"],
+ ],
+ "next_hop": topo["routers"]["r1"]["links"][
+ "r2-link{}".format(i)
+ ]["ipv4"].split("/")[0],
+ },
+ ]
+ },
+ "r3": {
+ "static_routes": [
+ {
+ "network": [
+ topo["routers"]["r4"]["links"]["lo"]["ipv4"],
+ topo["routers"]["i6"]["links"]["r4"]["ipv4"],
+ topo["routers"]["i7"]["links"]["r4"]["ipv4"],
+ topo["routers"]["r2"]["links"]["lo"]["ipv4"],
+ ],
+ "next_hop": topo["routers"]["r4"]["links"][
+ "r3-link{}".format(i)
+ ]["ipv4"].split("/")[0],
+ },
+ {
+ "network": [
+ topo["routers"]["r1"]["links"]["lo"]["ipv4"],
+ topo["routers"]["i1"]["links"]["r1"]["ipv4"],
+ topo["routers"]["i2"]["links"]["r1"]["ipv4"],
+ topo["routers"]["r2"]["links"]["lo"]["ipv4"],
+ ],
+ "next_hop": topo["routers"]["r1"]["links"][
+ "r3-link{}".format(i)
+ ]["ipv4"].split("/")[0],
+ },
+ ]
+ },
+ "r4": {
+ "static_routes": [
+ {
+ "network": [
+ topo["routers"]["r3"]["links"]["lo"]["ipv4"],
+ topo["routers"]["i1"]["links"]["r1"]["ipv4"],
+ topo["routers"]["i2"]["links"]["r1"]["ipv4"],
+ topo["routers"]["r1"]["links"]["lo"]["ipv4"],
+ ],
+ "next_hop": topo["routers"]["r3"]["links"][
+ "r4-link{}".format(i)
+ ]["ipv4"].split("/")[0],
+ },
+ {
+ "network": [
+ topo["routers"]["r2"]["links"]["lo"]["ipv4"],
+ topo["routers"]["i1"]["links"]["r1"]["ipv4"],
+ topo["routers"]["i2"]["links"]["r1"]["ipv4"],
+ topo["routers"]["r1"]["links"]["lo"]["ipv4"],
+ ],
+ "next_hop": topo["routers"]["r2"]["links"][
+ "r4-link{}".format(i)
+ ]["ipv4"].split("/")[0],
+ },
+ ]
+ },
+ }
+
+ result = create_static_routes(tgen, static_routes)
+ assert result is True, "API {} : Failed Error: {}".\
+ format(sys._getframe().f_code.co_name, result)
+
+
+def verify_state_incremented(state_before, state_after):
+ """
+ API to compare interface traffic state incrementing
+
+ Parameters
+ ----------
+ * `state_before` : State dictionary for any particular instance
+ * `state_after` : State dictionary for any particular instance
+ """
+
+ for router, state_data in state_before.items():
+ for state, value in state_data.items():
+ if state_before[router][state] > state_after[router][state]:
+ errormsg = (
+ "[DUT: %s]: state %s value has not"
+ " incremented, Initial value: %s, "
+ "Current value: %s [FAILED!!]"
+ % (
+ router,
+ state,
+ state_before[router][state],
+ state_after[router][state],
+ )
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT: %s]: State %s value is "
+ "incremented, Initial value: %s, Current value: %s"
+ " [PASSED!!]",
+ router,
+ state,
+ state_before[router][state],
+ state_after[router][state],
+ )
+
+ return True
+
+
+#####################################################
+#
+# Testcases
+#
+#####################################################
+
+
+def test_mroutes_updated_with_correct_oil_iif_when_receiver_is_in_and_outside_DUT_p0(
+ request,
+):
+ """
+ Verify mroutes OIL and IIF updated correctly when receivers present inside
+ and outside of DUT
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ app_helper.stop_all_hosts()
+ clear_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Enable IGMP on DUT and R4 interface")
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ intf_r4_i7 = topo["routers"]["r4"]["links"]["i7"]["interface"]
+ for dut, intf in zip(["r1", "r4"], [intf_r1_i1, intf_r4_i7]):
+ input_dict = {dut: {"igmp": {"interfaces": {intf: {"igmp": {"version": "2"}}}}}}
+
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Send IGMP joins from DUT and R4 for group range 225.1.1.1-5")
+ input_join = {
+ "i1": topo["routers"]["i1"]["links"]["r1"]["interface"],
+ "i7": topo["routers"]["i7"]["links"]["r4"]["interface"],
+ }
+
+ for recvr, recvr_intf in input_join.items():
+ result = app_helper.run_join(recvr, IGMP_JOIN_RANGE_1, join_intf=recvr_intf)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure RP as R2 for group range 225.1.1.1-5")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Done in base config: " "Configure EBGP peering between all the nodes")
+
+ step("Done in base config: " "Enable PIM on all the interfaces of all the nodes")
+
+ step("Send traffic from R4 for group range 225.1.1.1-5")
+
+ input_src = {"i6": topo["routers"]["i6"]["links"]["r4"]["interface"]}
+
+ result = app_helper.run_traffic("i6", IGMP_JOIN_RANGE_1, "r4")
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "IGMP groups are received on DUT and R4 verify using 'show ip igmp groups'"
+ " and 'show ip igmp groups json'"
+ )
+
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ result = verify_igmp_groups(tgen, "r1", intf_r1_i1, IGMP_JOIN_RANGE_1)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ intf_r4_i7 = topo["routers"]["r4"]["links"]["i7"]["interface"]
+ result = verify_igmp_groups(tgen, "r4", intf_r4_i7, IGMP_JOIN_RANGE_1)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("(*,G) IIF and OIL updated on both the nodes")
+
+ step(
+ "(S,G) IIF updated towards shortest path to source on both the nodes "
+ ", verify using 'show ip mroute' and 'show ip mroute json'"
+ )
+
+ source_i6 = topo["routers"]["i6"]["links"]["r4"]["ipv4"].split("/")[0]
+ input_dict_star_sg = [
+ {
+ "dut": "r1",
+ "src_address": "*",
+ "iif": r1_r2_links + r1_r3_links,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": "*",
+ "iif": r4_r2_links + r4_r3_links,
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ {
+ "dut": "r1",
+ "src_address": source_i6,
+ "iif": r1_r2_links + r1_r3_links,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": source_i6,
+ "iif": topo["routers"]["r4"]["links"]["i6"]["interface"],
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ ]
+
+ for data in input_dict_star_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "OIL is updated and traffic is received for all the groups on both "
+ "the nodes , verify using 'show ip multicast'; 'show ip multicast json'"
+ )
+
+ intf_r4_i6 = topo["routers"]["r4"]["links"]["i6"]["interface"]
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ input_traffic = {
+ "r1": {"traffic_sent": [intf_r1_i1]},
+ "r4": {"traffic_received": [intf_r4_i6]},
+ }
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Random shut of upstream interface from DUT side")
+ for i in range(1, 5, 2):
+ intf = topo["routers"]["r1"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf, False)
+
+ step(
+ "After shut of upstream interface from DUT verify mroutes has moved "
+ "to another interface (R2 or R3) and updated with correct OIL/IIF using"
+ " 'show ip mroute json'"
+ )
+
+ for data in input_dict_star_sg:
+ if data["src_address"] == "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step("Random no shut of upstream interface from DUT side")
+ for i in range(1, 5, 2):
+ intf = topo["routers"]["r1"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf, True)
+
+ step(
+ "After no shut of upstream interface from DUT verify no change on"
+ "mroute using 'show ip mroute json'; 'show ip upstream json'"
+ )
+
+ for data in input_dict_star_sg:
+ if data["src_address"] == "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step("Shut of upstream interface in alternate fashion from R4 side")
+ for i in range(1, 5, 2):
+ intf = topo["routers"]["r4"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r4", intf, False)
+
+ step(
+ "After shut of upstream interface from R4 verify mroutes has moved "
+ "to another interface (R2 or R3) and updated with correct OIL/IIF using"
+ " 'show ip mroute json'"
+ )
+
+ for data in input_dict_star_sg:
+ if data["src_address"] == "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step("No shut of upstream interface in alternate fashion from R4 side")
+ for i in range(1, 5, 2):
+ intf = topo["routers"]["r4"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r4", intf, True)
+
+ step(
+ "After no shut of upstream interface from DUT verify no change on"
+ "mroute using 'show ip mroute json'; 'show ip upstream json'"
+ )
+
+ for data in input_dict_star_sg:
+ if data["src_address"] == "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Send different IGMP joins from DUT and R4 for group range (From DUT "
+ "225.1.1.1-5 and from R4 226.1.1.1-5)"
+ )
+
+ result = app_helper.run_join("i7", IGMP_JOIN_RANGE_2, "r4")
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Send traffic for all the groups from R4")
+
+ input_src = {"i6": topo["routers"]["i6"]["links"]["r4"]["interface"]}
+ result = app_helper.run_traffic("i6", IGMP_JOIN_RANGE_1 + IGMP_JOIN_RANGE_2, "r4")
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "IGMP groups are received on DUT and R4 verify using 'show ip igmp groups'"
+ " and 'show ip igmp groups json'"
+ )
+
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ result = verify_igmp_groups(tgen, "r1", intf_r1_i1, IGMP_JOIN_RANGE_1)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ intf_r4_i7 = topo["routers"]["r4"]["links"]["i7"]["interface"]
+ result = verify_igmp_groups(
+ tgen, "r4", intf_r4_i7, IGMP_JOIN_RANGE_1 + IGMP_JOIN_RANGE_2
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("(*,G) IIF and OIL updated on both the nodes")
+
+ for data in input_dict_star_sg:
+ if data["src_address"] == "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "(S,G) IIF updated towards shortest path to source on both the nodes "
+ ", verify using 'show ip mroute' and 'show ip mroute json'"
+ )
+
+ for data in input_dict_star_sg:
+ if data["src_address"] != "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "OIL is updated and traffic is received for all the groups on both "
+ "the nodes , verify using 'show ip multicast'; 'show ip multicast json'"
+ )
+
+ intf_r4_i6 = topo["routers"]["r4"]["links"]["i6"]["interface"]
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ input_traffic = {
+ "r1": {"traffic_sent": [intf_r1_i1]},
+ "r4": {"traffic_received": [intf_r4_i6]},
+ }
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Random shut of upstream interface from DUT side")
+ for i in range(1, 5, 2):
+ intf = topo["routers"]["r1"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf, False)
+
+ step(
+ "After shut of upstream interface from DUT verify mroutes has moved "
+ "to another interface (R2 or R3) and updated with correct OIL/IIF using"
+ " 'show ip mroute json'"
+ )
+
+ for data in input_dict_star_sg:
+ if data["src_address"] == "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step("Random no shut of upstream interface from DUT side")
+ for i in range(1, 5, 2):
+ intf = topo["routers"]["r1"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf, True)
+
+ step(
+ "After no shut of upstream interface from DUT verify no change on"
+ "mroute using 'show ip mroute json'; 'show ip upstream json'"
+ )
+
+ for data in input_dict_star_sg:
+ if data["src_address"] == "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_mroutes_updated_with_correct_oil_iif_when_source_is_in_and_outside_DUT_p0(
+ request,
+):
+ """
+ Verify mroutes OIL and IIF updated correctly when source present inside
+ and outside of DUT
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ app_helper.stop_all_hosts()
+ clear_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Enable IGMP on DUT and R4 interface")
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ intf_r4_i7 = topo["routers"]["r4"]["links"]["i7"]["interface"]
+ for dut, intf in zip(["r1", "r4"], [intf_r1_i1, intf_r4_i7]):
+ input_dict = {dut: {"igmp": {"interfaces": {intf: {"igmp": {"version": "2"}}}}}}
+
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Send IGMP joins from DUT and R4 for group range 225.1.1.1-5")
+ input_join = {
+ "i1": topo["routers"]["i1"]["links"]["r1"]["interface"],
+ "i7": topo["routers"]["i7"]["links"]["r4"]["interface"],
+ }
+
+ for recvr, recvr_intf in input_join.items():
+ result = app_helper.run_join(recvr, IGMP_JOIN_RANGE_1, join_intf=recvr_intf)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure RP as R2 for group range 225.1.1.1-5")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Done in base config: " "Configure EBGP peering between all the nodes")
+
+ step("Done in base config: " "Enable PIM on all the interfaces of all the nodes")
+
+ step("Send traffic from R4 for group range 225.1.1.1-5")
+
+ result = app_helper.run_traffic("i6", IGMP_JOIN_RANGE_1, "r4")
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "IGMP groups are received on DUT and R4 verify using 'show ip igmp groups'"
+ " and 'show ip igmp groups json'"
+ )
+
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ result = verify_igmp_groups(tgen, "r1", intf_r1_i1, IGMP_JOIN_RANGE_1)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ intf_r4_i7 = topo["routers"]["r4"]["links"]["i7"]["interface"]
+ result = verify_igmp_groups(tgen, "r4", intf_r4_i7, IGMP_JOIN_RANGE_1)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("(*,G) IIF and OIL updated on both the nodes")
+
+ step(
+ "(S,G) IIF updated towards shortest path to source on both the nodes "
+ ", verify using 'show ip mroute' and 'show ip mroute json'"
+ )
+
+ source_i6 = topo["routers"]["i6"]["links"]["r4"]["ipv4"].split("/")[0]
+ input_dict_star_sg = [
+ {
+ "dut": "r1",
+ "src_address": "*",
+ "iif": r1_r2_links + r1_r3_links,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": "*",
+ "iif": r4_r2_links + r4_r3_links,
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ {
+ "dut": "r1",
+ "src_address": source_i6,
+ "iif": r1_r2_links + r1_r3_links,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": source_i6,
+ "iif": topo["routers"]["r4"]["links"]["i6"]["interface"],
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ ]
+
+ for data in input_dict_star_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "OIL is updated and traffic is received for all the groups on both "
+ "the nodes , verify using 'show ip multicast'; 'show ip multicast json'"
+ )
+
+ intf_r4_i6 = topo["routers"]["r4"]["links"]["i6"]["interface"]
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ input_traffic = {
+ "r1": {"traffic_sent": [intf_r1_i1]},
+ "r4": {"traffic_received": [intf_r4_i6]},
+ }
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Random shut of upstream interface from DUT side")
+ for i in range(1, 5, 2):
+ intf = topo["routers"]["r1"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf, False)
+
+ step(
+ "After shut of upstream interface from DUT verify mroutes has moved "
+ "to another interface (R2 or R3) and updated with correct OIL/IIF using"
+ " 'show ip mroute json'"
+ )
+
+ for data in input_dict_star_sg:
+ if data["src_address"] == "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step("Random no shut of upstream interface from DUT side")
+ for i in range(1, 5, 2):
+ intf = topo["routers"]["r1"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf, True)
+
+ step(
+ "After no shut of upstream interface from DUT verify no change on"
+ "mroute using 'show ip mroute json'; 'show ip upstream json'"
+ )
+
+ for data in input_dict_star_sg:
+ if data["src_address"] == "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step("Random shut of upstream interface from R4 side")
+ for i in range(1, 5, 2):
+ intf = topo["routers"]["r4"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r4", intf, False)
+
+ step(
+ "After shut of upstream interface from R4 verify mroutes has moved "
+ "to another interface (R2 or R3) and updated with correct OIL/IIF using"
+ " 'show ip mroute json'"
+ )
+
+ for data in input_dict_star_sg:
+ if data["src_address"] == "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step("Random no shut of upstream interface from R4 side")
+ for i in range(1, 5, 2):
+ intf = topo["routers"]["r4"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r4", intf, True)
+
+ step(
+ "After no shut of upstream interface from DUT verify no change on"
+ "mroute using 'show ip mroute json'; 'show ip upstream json'"
+ )
+
+ for data in input_dict_star_sg:
+ if data["src_address"] == "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Send different IGMP joins from DUT and R4 for group range (From DUT "
+ "225.1.1.1-5 and from R4 226.1.1.1-5)"
+ )
+
+ result = app_helper.run_join("i7", IGMP_JOIN_RANGE_2, "r4")
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Send traffic for all the groups from R4")
+
+ result = app_helper.run_traffic("i6", IGMP_JOIN_RANGE_1 + IGMP_JOIN_RANGE_2, "r4")
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "IGMP groups are received on DUT and R4 verify using 'show ip igmp groups'"
+ " and 'show ip igmp groups json'"
+ )
+
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ result = verify_igmp_groups(tgen, "r1", intf_r1_i1, IGMP_JOIN_RANGE_1)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ intf_r4_i7 = topo["routers"]["r4"]["links"]["i7"]["interface"]
+ result = verify_igmp_groups(
+ tgen, "r4", intf_r4_i7, IGMP_JOIN_RANGE_1 + IGMP_JOIN_RANGE_2
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("(*,G) IIF and OIL updated on both the nodes")
+
+ for data in input_dict_star_sg:
+ if data["src_address"] == "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "(S,G) IIF updated towards shortest path to source on both the nodes "
+ ", verify using 'show ip mroute' and 'show ip mroute json'"
+ )
+
+ for data in input_dict_star_sg:
+ if data["src_address"] != "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "OIL is updated and traffic is received for all the groups on both "
+ "the nodes , verify using 'show ip multicast'; 'show ip multicast json'"
+ )
+
+ intf_r4_i6 = topo["routers"]["r4"]["links"]["i6"]["interface"]
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ input_traffic = {
+ "r1": {"traffic_sent": [intf_r1_i1]},
+ "r4": {"traffic_received": [intf_r4_i6]},
+ }
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Random shut and no shut of upstream interface from DUT side")
+
+ step("Random shut of upstream interface from DUT side")
+ for i in range(1, 5, 2):
+ intf = topo["routers"]["r1"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf, False)
+
+ step(
+ "After shut of upstream interface from DUT verify mroutes has moved "
+ "to another interface (R2 or R3) and updated with correct OIL/IIF using"
+ " 'show ip mroute json'"
+ )
+
+ for data in input_dict_star_sg:
+ if data["src_address"] == "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step("Random no shut of upstream interface from DUT side")
+ for i in range(1, 5, 2):
+ intf = topo["routers"]["r1"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf, True)
+
+ step(
+ "After no shut of upstream interface from DUT verify no change on"
+ "mroute using 'show ip mroute json'; 'show ip upstream json'"
+ )
+
+ for data in input_dict_star_sg:
+ if data["src_address"] == "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_verify_mroutes_forwarding_p0(request):
+ """
+ Verify Mroutes and BSM forwarding when edge is transit node
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ app_helper.stop_all_hosts()
+ clear_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("To make DUT as transit node , shut all the links from R3 to R4 nodes")
+ for i in range(1, 5):
+ intf = topo["routers"]["r3"]["links"]["r4-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r3", intf, False)
+
+ intf = topo["routers"]["r4"]["links"]["r3-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r4", intf, False)
+
+ step("Enable IGMP on DUT and R3 interface")
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ intf_r3_i5 = topo["routers"]["r3"]["links"]["i5"]["interface"]
+ for dut, intf in zip(["r1", "r3"], [intf_r1_i1, intf_r3_i5]):
+ input_dict = {dut: {"igmp": {"interfaces": {intf: {"igmp": {"version": "2"}}}}}}
+
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Send IGMP joins from DUT and R4 for group range 226.1.1.1-5")
+ input_join = {
+ "i1": topo["routers"]["i1"]["links"]["r1"]["interface"],
+ "i5": topo["routers"]["i5"]["links"]["r3"]["interface"],
+ }
+
+ for recvr, recvr_intf in input_join.items():
+ result = app_helper.run_join(recvr, IGMP_JOIN_RANGE_2, join_intf=recvr_intf)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure RP as R2 for group range 226.1.1.1-5")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_2,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Done in base config: " "Configure EBGP peering between all the nodes")
+
+ step("Done in base config: " "Enable PIM on all the interfaces of all the nodes")
+
+ step("Send traffic from R4 for group range 226.1.1.1-5")
+
+ input_src = {
+ "i6": topo["routers"]["i6"]["links"]["r4"]["interface"],
+ "i2": topo["routers"]["i2"]["links"]["r1"]["interface"],
+ }
+
+ for src, src_intf in input_src.items():
+ result = app_helper.run_traffic(src, IGMP_JOIN_RANGE_2, bind_intf=src_intf)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "BSR and candidate RP info populated in R3 node verify using "
+ "'show ip pim rp-info json'"
+ )
+
+ rp_addr_r2 = topo["routers"]["r2"]["links"]["lo"]["ipv4"].split("/")[0]
+
+ result = verify_pim_rp_info(
+ tgen, topo, "r2", GROUP_RANGE_2, "lo", rp_addr_r2, "Static"
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("(*,G) IIF and OIL updated on both the nodes")
+
+ step(
+ "(S,G) IIF updated towards shortest path to source on both the nodes "
+ ", verify using 'show ip mroute' and 'show ip mroute json'"
+ )
+
+ step(
+ "DUT created (*,G) and (S,G) entries as transit node for 226.1.1.1-5 "
+ "mroutes , OIL is local received and toward R3"
+ )
+
+ source_i6 = topo["routers"]["i6"]["links"]["r4"]["ipv4"].split("/")[0]
+ source_i2 = topo["routers"]["i2"]["links"]["r1"]["ipv4"].split("/")[0]
+ input_dict_star_sg = [
+ {
+ "dut": "r1",
+ "src_address": "*",
+ "iif": r1_r2_links + r1_r3_links,
+ "oil": r1_r3_links + [topo["routers"]["r1"]["links"]["i1"]["interface"]],
+ },
+ {
+ "dut": "r1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["r1"]["links"]["i2"]["interface"],
+ "oil": r1_r3_links + [topo["routers"]["r1"]["links"]["i1"]["interface"]],
+ },
+ {
+ "dut": "r1",
+ "src_address": source_i6,
+ "iif": r1_r2_links + r1_r3_links,
+ "oil": r1_r3_links + [topo["routers"]["r1"]["links"]["i1"]["interface"]],
+ },
+ {
+ "dut": "r3",
+ "src_address": "*",
+ "iif": r3_r1_links,
+ "oil": topo["routers"]["r3"]["links"]["i5"]["interface"],
+ },
+ {
+ "dut": "r3",
+ "src_address": source_i2,
+ "iif": r3_r1_links,
+ "oil": topo["routers"]["r3"]["links"]["i5"]["interface"],
+ },
+ {
+ "dut": "r3",
+ "src_address": source_i6,
+ "iif": r3_r1_links,
+ "oil": topo["routers"]["r3"]["links"]["i5"]["interface"],
+ },
+ ]
+
+ for data in input_dict_star_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_2,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "OIL is updated and traffic is received for all the groups on both "
+ "the nodes , verify using 'show ip multicast'; 'show ip multicast json'"
+ )
+
+ intf_r3_i5 = topo["routers"]["r3"]["links"]["i5"]["interface"]
+ intf_r1_i2 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ input_traffic = {
+ "r3": {"traffic_sent": [intf_r3_i5]},
+ "r1": {"traffic_sent": [intf_r1_i2]},
+ }
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Send different join from R3 (232.1.1.1-5) and traffic "
+ "from R4 for same range"
+ )
+
+ input_join = {"i5": topo["routers"]["i5"]["links"]["r3"]["interface"]}
+ result = app_helper.run_join("i5", IGMP_JOIN_RANGE_3, "r3")
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_3,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ input_src = {"i6": topo["routers"]["i6"]["links"]["r4"]["interface"]}
+ result = app_helper.run_traffic("i6", IGMP_JOIN_RANGE_3, "r4")
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("For different join (232.1.1.1-5) DUT created mroute OIL toward R3 only")
+
+ source_i6 = topo["routers"]["i6"]["links"]["r4"]["ipv4"].split("/")[0]
+ input_dict_sg = [
+ {"dut": "r1", "src_address": "*", "iif": r1_r2_links, "oil": r1_r3_links},
+ {"dut": "r1", "src_address": source_i6, "iif": r1_r2_links, "oil": r1_r3_links},
+ ]
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_3,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_3
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Shut from DUT to R2 and no shut from DUT")
+
+ for i in range(1, 5):
+ intf = topo["routers"]["r1"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf, False)
+
+ step(
+ "After Shut (R1-R2) link from DUT, verify IIF on DUT changed to "
+ "different uplink interface on DUT 'show ip mroute json' for R4 so "
+ "connected urce"
+ )
+
+ input_dict_sg = [
+ {
+ "dut": "r1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["r1"]["links"]["i2"]["interface"],
+ "oil": r1_r3_links + [topo["routers"]["r1"]["links"]["i1"]["interface"]],
+ }
+ ]
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_2,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Traffic is received fine for R4 source 'show ip multicast json' on DUT")
+
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("No shut from DUT to R2 and no shut from DUT")
+
+ for i in range(1, 5):
+ intf = topo["routers"]["r1"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf, True)
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_2,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Shut and no shut DUT to R2 within 30 sec from DUT")
+
+ for i in range(1, 5):
+ intf = topo["routers"]["r1"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf, False)
+
+ for i in range(1, 5):
+ intf = topo["routers"]["r1"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf, True)
+
+ step(
+ "Shut and No shut in 30 sec time , verify on R2 added 2 entries in mroute "
+ ", shut link OIL got timeout after sometime"
+ )
+
+ for data in input_dict_star_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_2,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_2
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_mroutes_updated_correctly_after_source_interface_shut_noshut_p1(request):
+ """
+ Verify mroutes updated correctly after source interface shut/no shut
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ app_helper.stop_all_hosts()
+ clear_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Enable IGMP on DUT and R4 interface")
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ intf_r4_i7 = topo["routers"]["r4"]["links"]["i7"]["interface"]
+ for dut, intf in zip(["r1", "r4"], [intf_r1_i1, intf_r4_i7]):
+ input_dict = {dut: {"igmp": {"interfaces": {intf: {"igmp": {"version": "2"}}}}}}
+
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Send IGMP joins from DUT and R4 for group range 225.1.1.1-5")
+ input_join = {
+ "i1": topo["routers"]["i1"]["links"]["r1"]["interface"],
+ "i7": topo["routers"]["i7"]["links"]["r4"]["interface"],
+ }
+
+ for recvr, recvr_intf in input_join.items():
+ result = app_helper.run_join(recvr, IGMP_JOIN_RANGE_1, join_intf=recvr_intf)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure RP as R2 for group range 225.1.1.1-5")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Done in base config: " "Configure EBGP peering between all the nodes")
+
+ step("Done in base config: " "Enable PIM on all the interfaces of all the nodes")
+
+ step("Send traffic from R4 for group range 225.1.1.1-5")
+ step("Send traffic from DUT for group range 225.1.1.1-5")
+
+ input_src = {
+ "i6": topo["routers"]["i6"]["links"]["r4"]["interface"],
+ "i2": topo["routers"]["i2"]["links"]["r1"]["interface"],
+ }
+
+ for src, src_intf in input_src.items():
+ result = app_helper.run_traffic(src, IGMP_JOIN_RANGE_1, bind_intf=src_intf)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("(*,G) IIF and OIL updated on both the nodes")
+
+ input_dict_starg = [
+ {
+ "dut": "r1",
+ "src_address": "*",
+ "iif": r1_r2_links + r1_r3_links,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": "*",
+ "iif": r4_r2_links + r4_r3_links,
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ ]
+
+ for data in input_dict_starg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "(S,G) IIF updated towards shortest path to source on both the nodes "
+ ", verify using 'show ip mroute' and 'show ip mroute json'"
+ )
+
+ source_i6 = topo["routers"]["i6"]["links"]["r4"]["ipv4"].split("/")[0]
+ source_i2 = topo["routers"]["i2"]["links"]["r1"]["ipv4"].split("/")[0]
+ input_dict_sg = [
+ {
+ "dut": "r1",
+ "src_address": source_i6,
+ "iif": r1_r2_links + r1_r3_links,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["r1"]["links"]["i2"]["interface"],
+ "oil": r1_r3_links + [topo["routers"]["r1"]["links"]["i1"]["interface"]],
+ },
+ {
+ "dut": "r4",
+ "src_address": source_i6,
+ "iif": topo["routers"]["r4"]["links"]["i6"]["interface"],
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": source_i2,
+ "iif": r4_r2_links + r4_r3_links,
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ ]
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "OIL is updated and traffic is received for all the groups on both "
+ "the nodes , verify using 'show ip multicast'; 'show ip multicast json'"
+ )
+
+ intf_r4_i6 = topo["routers"]["r4"]["links"]["i6"]["interface"]
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ input_traffic = {
+ "r1": {"traffic_sent": [intf_r1_i1]},
+ "r4": {"traffic_received": [intf_r4_i6]},
+ }
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("On R1 for local IGMP receivers, OIL towards RP is removed")
+
+ input_dict = [
+ {
+ "dut": "r1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["r1"]["links"]["i2"]["interface"],
+ "oil": topo["routers"]["r1"]["links"]["i2"]["interface"],
+ }
+ ]
+
+ for data in input_dict:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {} : Failed "
+ "Mroute IIF and OIF are same \n Error: {}".format(tc_name, result)
+ )
+
+ step("Shut and No shut source interface multiple time")
+
+ for i in range(0, 2):
+ step("Shut and no shut the source interface from DUT")
+ intf_r1_i2 = topo["routers"]["r1"]["links"]["i2"]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf_r1_i2, False)
+ shutdown_bringup_interface(tgen, "r1", intf_r1_i2, True)
+
+ step(
+ "After shut/no shut of source interface verify all the (S,G) "
+ "got re-learn and IIF/OIF pointing any of the links from R2 or "
+ "R3 verify using 'show ip mroute json'"
+ )
+
+ step(
+ "(S,G) OIL on R1 has only respective receiver port and uplink port "
+ " , RP side oil is removed"
+ )
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step("No change seen on (*,G) mroutes")
+
+ for data in input_dict_starg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Traffic is received for all the groups , verify using "
+ "'show ip multicast count json'"
+ )
+
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Shut and no shut the source interface from R4")
+
+ intf_r4_i6 = topo["routers"]["r4"]["links"]["i6"]["interface"]
+ shutdown_bringup_interface(tgen, "r4", intf_r4_i6, False)
+ shutdown_bringup_interface(tgen, "r4", intf_r4_i6, True)
+
+ step(
+ "After shut/no shut of source interface verify all the (S,G) "
+ "got re-learn and IIF/OIF pointing any of the links from R2 or "
+ "R3 verify using 'show ip mroute json'"
+ )
+
+ step(
+ "(S,G) OIL on R1 has only respective receiver port and uplink port "
+ " , RP side oil is removed"
+ )
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step("No change seen on (*,G) mroutes")
+
+ for data in input_dict_starg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Traffic is received for all the groups , verify using "
+ "'show ip multicast count json'"
+ )
+
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Shut source interface from R4 and no shut immediate after the "
+ "same source upstream expires from DUT"
+ )
+
+ intf_r4_i6 = topo["routers"]["r4"]["links"]["i6"]["interface"]
+ shutdown_bringup_interface(tgen, "r4", intf_r4_i6, False)
+ shutdown_bringup_interface(tgen, "r4", intf_r4_i6, True)
+
+ step(
+ "After no shut verify mroutes populated and multicast traffic resume ,"
+ " verify using 'show ip mroute json' 'show ip multicast count json'"
+ )
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Shut source interface from DUT and no shut immediate after the "
+ "same source upstream expires from R4"
+ )
+
+ intf_r1_i2 = topo["routers"]["r1"]["links"]["i2"]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf_r1_i2, False)
+ shutdown_bringup_interface(tgen, "r1", intf_r1_i2, True)
+
+ step(
+ "After no shut verify mroutes populated and multicast traffic resume ,"
+ " verify using 'show ip mroute json' 'show ip multicast count json'"
+ )
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_mroutes_updated_correctly_after_receiver_interface_shut_noshut_p1(request):
+ """
+ Verify mroutes updated correctly after receiver interface shut/no shut
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ app_helper.stop_all_hosts()
+ clear_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Enable IGMP on DUT and R4 interface")
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ intf_r4_i7 = topo["routers"]["r4"]["links"]["i7"]["interface"]
+ for dut, intf in zip(["r1", "r4"], [intf_r1_i1, intf_r4_i7]):
+ input_dict = {dut: {"igmp": {"interfaces": {intf: {"igmp": {"version": "2"}}}}}}
+
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Send IGMP joins from DUT and R4 for group range 225.1.1.1-5")
+ input_join = {
+ "i1": topo["routers"]["i1"]["links"]["r1"]["interface"],
+ "i7": topo["routers"]["i7"]["links"]["r4"]["interface"],
+ }
+
+ for recvr, recvr_intf in input_join.items():
+ result = app_helper.run_join(recvr, IGMP_JOIN_RANGE_1, join_intf=recvr_intf)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure RP as R2 for group range 225.1.1.1-5")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Done in base config: " "Configure EBGP peering between all the nodes")
+
+ step("Done in base config: " "Enable PIM on all the interfaces of all the nodes")
+
+ step("Send traffic from R4 for group range 225.1.1.1-5")
+
+ result = app_helper.run_traffic("i6", IGMP_JOIN_RANGE_1, "r4")
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Send traffic from DUT for group range 225.1.1.1-5")
+
+ result = app_helper.run_traffic("i2", IGMP_JOIN_RANGE_1, "r1")
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("(*,G) IIF and OIL updated on both the nodes")
+
+ input_dict_starg = [
+ {
+ "dut": "r1",
+ "src_address": "*",
+ "iif_r1_r2": r1_r2_links + r1_r3_links,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": "*",
+ "iif_r1_r2": r4_r2_links + r4_r3_links,
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ ]
+
+ for data in input_dict_starg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif_r1_r2"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif_r1_r2"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "(S,G) IIF updated towards shortest path to source on both the nodes "
+ ", verify using 'show ip mroute' and 'show ip mroute json'"
+ )
+
+ source_i6 = topo["routers"]["i6"]["links"]["r4"]["ipv4"].split("/")[0]
+ source_i2 = topo["routers"]["i2"]["links"]["r1"]["ipv4"].split("/")[0]
+ input_dict_sg = [
+ {
+ "dut": "r1",
+ "src_address": source_i6,
+ "iif": r1_r2_links + r1_r3_links,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["r1"]["links"]["i2"]["interface"],
+ "oil": r1_r3_links + [topo["routers"]["r1"]["links"]["i1"]["interface"]],
+ },
+ {
+ "dut": "r4",
+ "src_address": source_i6,
+ "iif": topo["routers"]["r4"]["links"]["i6"]["interface"],
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": source_i2,
+ "iif": r4_r2_links + r4_r3_links,
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ ]
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "OIL is updated and traffic is received for all the groups on both "
+ "the nodes , verify using 'show ip multicast'; 'show ip multicast json'"
+ )
+
+ intf_r4_i6 = topo["routers"]["r4"]["links"]["i6"]["interface"]
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ input_traffic = {
+ "r1": {"traffic_sent": [intf_r1_i1]},
+ "r4": {"traffic_received": [intf_r4_i6]},
+ }
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Shut and no shut the source interface from DUT")
+ for i in range(1, 5):
+ intf = topo["routers"]["r1"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf, False)
+
+ for i in range(1, 5):
+ intf = topo["routers"]["r1"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf, True)
+
+ step(
+ "After shut/no shut of source interface verify all the (S,G) "
+ "got re-learn and IIF/OIF pointing any of the links from R2 or "
+ "R3 verify using 'show ip mroute json'"
+ )
+
+ step(
+ "(S,G) OIL on R1 has only respective receiver port and uplink port "
+ " , RP side oil is removed"
+ )
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Traffic is received for all the groups , verify using "
+ "'show ip multicast count json'"
+ )
+
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Shut the receiver interface from R4")
+ for i in range(1, 5):
+ intf = topo["routers"]["r4"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r4", intf, False)
+
+ for i in range(1, 5):
+ intf = topo["routers"]["r4"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r4", intf, True)
+
+ step(
+ "After shut/no shut of source interface verify all the (S,G) "
+ "got re-learn and IIF/OIF pointing any of the links from R2 or "
+ "R3 verify using 'show ip mroute json'"
+ )
+
+ step(
+ "(S,G) OIL on R1 has only respective receiver port and uplink port "
+ " , RP side oil is removed"
+ )
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Traffic is received for all the groups , verify using "
+ "'show ip multicast count json'"
+ )
+
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Shut and no shut the receiver interface from DUT after PIM upstream" " timeout"
+ )
+
+ for i in range(1, 5):
+ intf = topo["routers"]["r1"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf, False)
+
+ for i in range(1, 5):
+ intf = topo["routers"]["r1"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf, True)
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Traffic is received for all the groups , verify using "
+ "'show ip multicast count json'"
+ )
+
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Shut and no shut the receiver interface from R4 after PIM upstream " "timeout"
+ )
+
+ for i in range(1, 5):
+ intf = topo["routers"]["r4"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r4", intf, False)
+
+ for i in range(1, 5):
+ intf = topo["routers"]["r4"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r4", intf, True)
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Traffic is received for all the groups , verify using "
+ "'show ip multicast count json'"
+ )
+
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_mroutes_updated_after_sending_IGMP_prune_and_join_p1(request):
+ """
+ Verify mroute updated correctly after sending IGMP prune and join
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ app_helper.stop_all_hosts()
+ clear_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Enable IGMP on DUT and R4 interface")
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ intf_r4_i7 = topo["routers"]["r4"]["links"]["i7"]["interface"]
+ for dut, intf in zip(["r1", "r4"], [intf_r1_i1, intf_r4_i7]):
+ input_dict = {dut: {"igmp": {"interfaces": {intf: {"igmp": {"version": "2"}}}}}}
+
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Send IGMP joins from DUT and R4 for group range 225.1.1.1-5")
+ input_join = {
+ "i1": topo["routers"]["i1"]["links"]["r1"]["interface"],
+ "i7": topo["routers"]["i7"]["links"]["r4"]["interface"],
+ }
+
+ for recvr, recvr_intf in input_join.items():
+ result = app_helper.run_join(recvr, IGMP_JOIN_RANGE_1, join_intf=recvr_intf)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure RP as R2 for group range 225.1.1.1-5")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Done in base config: " "Configure EBGP peering between all the nodes")
+
+ step("Done in base config: " "Enable PIM on all the interfaces of all the nodes")
+
+ step("Send traffic from R4 for group range 225.1.1.1-5")
+ step("Send traffic from DUT for group range 225.1.1.1-5")
+
+ input_src = {
+ "i6": topo["routers"]["i6"]["links"]["r4"]["interface"],
+ "i2": topo["routers"]["i2"]["links"]["r1"]["interface"],
+ }
+
+ for src, src_intf in input_src.items():
+ result = app_helper.run_traffic(src, IGMP_JOIN_RANGE_1, bind_intf=src_intf)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("(*,G) IIF and OIL updated on both the nodes")
+
+ input_dict_starg = [
+ {
+ "dut": "r1",
+ "src_address": "*",
+ "iif_r1_r2": r1_r2_links + r1_r3_links,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": "*",
+ "iif_r1_r2": r4_r2_links + r4_r3_links,
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ ]
+
+ for data in input_dict_starg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif_r1_r2"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif_r1_r2"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "(S,G) IIF updated towards shortest path to source on both the nodes "
+ ", verify using 'show ip mroute' and 'show ip mroute json'"
+ )
+
+ source_i6 = topo["routers"]["i6"]["links"]["r4"]["ipv4"].split("/")[0]
+ source_i2 = topo["routers"]["i2"]["links"]["r1"]["ipv4"].split("/")[0]
+ input_dict_sg = [
+ {
+ "dut": "r1",
+ "src_address": source_i6,
+ "iif": r1_r2_links + r1_r3_links,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["r1"]["links"]["i2"]["interface"],
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": source_i6,
+ "iif": topo["routers"]["r4"]["links"]["i6"]["interface"],
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": source_i2,
+ "iif": r4_r2_links + r4_r3_links,
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ ]
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "OIL is updated and traffic is received for all the groups on both "
+ "the nodes , verify using 'show ip multicast'; 'show ip multicast json'"
+ )
+
+ intf_r4_i6 = topo["routers"]["r4"]["links"]["i6"]["interface"]
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ input_traffic = {
+ "r1": {"traffic_sent": [intf_r1_i1]},
+ "r4": {"traffic_received": [intf_r4_i6]},
+ }
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Send IGMP prune and join for receivers connected on DUT")
+ step("Send IGMP prune and join for receivers connected on R4")
+
+ app_helper.stop_all_hosts()
+
+ step(
+ "After sending prune verify (*,G) and (S,G) entries got cleared "
+ "from all the nodes"
+ )
+
+ for data in input_dict_starg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif_r1_r2"],
+ data["oil"],
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {} : Failed "
+ " mroute are still present \n Error: {}".format(tc_name, result)
+ )
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {} : Failed "
+ " mroute are still present \n Error: {}".format(tc_name, result)
+ )
+
+ step(
+ "After sending joins verify (*,G) and (S,G) entries got populated "
+ "again correct OIL and IIF info (any of the link of R2 or R3) verify "
+ "using 'show ip mroute json'"
+ )
+
+ for recvr, recvr_intf in input_join.items():
+ result = app_helper.run_join(recvr, IGMP_JOIN_RANGE_1, join_intf=recvr_intf)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ for src, src_intf in input_src.items():
+ result = app_helper.run_traffic(src, IGMP_JOIN_RANGE_1, bind_intf=src_intf)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_starg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif_r1_r2"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Multicast traffic receiver for all the groups verify using "
+ "'show ip multicast count'"
+ )
+
+ input_traffic = {
+ "r1": {"traffic_sent": [intf_r1_i1]},
+ "r4": {"traffic_received": [intf_r4_i6]},
+ }
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_mroutes_updated_after_after_clear_mroute_p1(request):
+ """
+ Verify mroute updated correctly after clear mroute
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ app_helper.stop_all_hosts()
+ clear_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Enable IGMP on DUT and R4 interface")
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ intf_r4_i7 = topo["routers"]["r4"]["links"]["i7"]["interface"]
+ for dut, intf in zip(["r1", "r4"], [intf_r1_i1, intf_r4_i7]):
+ input_dict = {dut: {"igmp": {"interfaces": {intf: {"igmp": {"version": "2"}}}}}}
+
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Send IGMP joins from DUT and R4 for group range 225.1.1.1-5")
+ input_join = {
+ "i1": topo["routers"]["i1"]["links"]["r1"]["interface"],
+ "i7": topo["routers"]["i7"]["links"]["r4"]["interface"],
+ }
+
+ for recvr, recvr_intf in input_join.items():
+ result = app_helper.run_join(recvr, IGMP_JOIN_RANGE_1, join_intf=recvr_intf)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure RP as R2 for group range 225.1.1.1-5")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Done in base config: " "Configure EBGP peering between all the nodes")
+
+ step("Done in base config: " "Enable PIM on all the interfaces of all the nodes")
+
+ step("Send traffic from R4 for group range 225.1.1.1-5")
+ step("Send traffic from DUT for group range 225.1.1.1-5")
+
+ input_src = {
+ "i6": topo["routers"]["i6"]["links"]["r4"]["interface"],
+ "i2": topo["routers"]["i2"]["links"]["r1"]["interface"],
+ }
+
+ for src, src_intf in input_src.items():
+ result = app_helper.run_traffic(src, IGMP_JOIN_RANGE_1, bind_intf=src_intf)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("(*,G) IIF and OIL updated on both the nodes")
+
+ input_dict_starg = [
+ {
+ "dut": "r1",
+ "src_address": "*",
+ "iif_r1_r2": r1_r2_links + r1_r3_links,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": "*",
+ "iif_r1_r2": r4_r2_links + r4_r3_links,
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ ]
+
+ for data in input_dict_starg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif_r1_r2"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif_r1_r2"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "(S,G) IIF updated towards shortest path to source on both the nodes "
+ ", verify using 'show ip mroute' and 'show ip mroute json'"
+ )
+
+ source_i6 = topo["routers"]["i6"]["links"]["r4"]["ipv4"].split("/")[0]
+ source_i2 = topo["routers"]["i2"]["links"]["r1"]["ipv4"].split("/")[0]
+ input_dict_sg = [
+ {
+ "dut": "r1",
+ "src_address": source_i6,
+ "iif": r1_r2_links + r1_r3_links,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["r1"]["links"]["i2"]["interface"],
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": source_i6,
+ "iif": topo["routers"]["r4"]["links"]["i6"]["interface"],
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": source_i2,
+ "iif": r4_r2_links + r4_r3_links,
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ ]
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "OIL is updated and traffic is received for all the groups on both "
+ "the nodes , verify using 'show ip multicast'; 'show ip multicast json'"
+ )
+
+ intf_r4_i6 = topo["routers"]["r4"]["links"]["i6"]["interface"]
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ input_traffic = {
+ "r1": {"traffic_sent": [intf_r1_i1]},
+ "r4": {"traffic_received": [intf_r4_i6]},
+ }
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Clear ip mroute from DUT")
+ clear_mroute(tgen, "r1")
+
+ step("Clear ip mroute from r4")
+ clear_mroute(tgen, "r4")
+
+ step(
+ "Multicast traffic receiver for all the groups verify using "
+ "'show ip multicast count'"
+ )
+
+ intf_r4_i6 = topo["routers"]["r4"]["links"]["i6"]["interface"]
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ input_traffic = {
+ "r1": {"traffic_sent": [intf_r1_i1]},
+ "r4": {"traffic_received": [intf_r4_i6]},
+ }
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_mroutes_updated_after_changing_rp_config_p1(request):
+ """
+ Verify (*,G) mroute entries after changing the RP configuration
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ app_helper.stop_all_hosts()
+ clear_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Unconfigure BGP from all nodes as using static routes")
+
+ input_dict = {}
+ DUT = ["r1", "r2", "r3", "r4"]
+ ASN = [100, 200, 300, 400]
+ for dut, asn in zip(DUT, ASN):
+ temp = {dut: {"bgp": {}}}
+ input_dict.update(temp)
+
+ temp[dut]["bgp"].update({"local_as": asn, "delete": True})
+
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Enable IGMP on DUT and R4 interface")
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ intf_r4_i7 = topo["routers"]["r4"]["links"]["i7"]["interface"]
+ for dut, intf in zip(["r1", "r4"], [intf_r1_i1, intf_r4_i7]):
+ input_dict = {dut: {"igmp": {"interfaces": {intf: {"igmp": {"version": "2"}}}}}}
+
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Send IGMP joins from DUT and R4 for group range 225.1.1.1-5")
+ input_join = {
+ "i1": topo["routers"]["i1"]["links"]["r1"]["interface"],
+ "i7": topo["routers"]["i7"]["links"]["r4"]["interface"],
+ }
+
+ for recvr, recvr_intf in input_join.items():
+ result = app_helper.run_join(recvr, IGMP_JOIN_RANGE_1, join_intf=recvr_intf)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure RP as R2 for group range 225.1.1.1-5")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Configure static routes between nodes for making RP and source" "reachable")
+
+ configure_static_routes_for_rp_reachability(tgen, topo)
+
+ step("Done in base config: " "Enable PIM on all the interfaces of all the nodes")
+
+ step("Send traffic from R4 for group range 225.1.1.1-5")
+ step("Send traffic from DUT for group range 225.1.1.1-5")
+
+ input_src = {
+ "i6": topo["routers"]["i6"]["links"]["r4"]["interface"],
+ "i2": topo["routers"]["i2"]["links"]["r1"]["interface"],
+ }
+
+ for src, src_intf in input_src.items():
+ result = app_helper.run_traffic(src, IGMP_JOIN_RANGE_1, bind_intf=src_intf)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("(*,G) IIF and OIL updated on both the nodes")
+
+ step(
+ "(S,G) IIF updated towards shortest path to source on both the nodes "
+ ", verify using 'show ip mroute' and 'show ip mroute json'"
+ )
+
+ source_i6 = topo["routers"]["i6"]["links"]["r4"]["ipv4"].split("/")[0]
+ source_i2 = topo["routers"]["i2"]["links"]["r1"]["ipv4"].split("/")[0]
+ input_dict_star_sg = [
+ {
+ "dut": "r1",
+ "src_address": "*",
+ "iif": r1_r2_links + r1_r3_links,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": "*",
+ "iif": r4_r2_links + r4_r3_links,
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ {
+ "dut": "r1",
+ "src_address": source_i6,
+ "iif": r1_r2_links + r1_r3_links,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["r1"]["links"]["i2"]["interface"],
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": source_i6,
+ "iif": topo["routers"]["r4"]["links"]["i6"]["interface"],
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": source_i2,
+ "iif": r4_r2_links + r4_r3_links,
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ ]
+
+ for data in input_dict_star_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "OIL is updated and traffic is received for all the groups on both "
+ "the nodes , verify using 'show ip multicast'; 'show ip multicast json'"
+ )
+
+ intf_r4_i6 = topo["routers"]["r4"]["links"]["i6"]["interface"]
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ input_traffic = {
+ "r1": {"traffic_sent": [intf_r1_i1]},
+ "r4": {"traffic_received": [intf_r4_i6]},
+ }
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Verify RP has (S,G) with none OIL or Upstream should be present using 'show ip mroute json'"
+ " 'show ip pim upstream json'"
+ )
+
+ source_i6 = topo["routers"]["i6"]["links"]["r4"]["ipv4"].split("/")[0]
+ source_i2 = topo["routers"]["i2"]["links"]["r1"]["ipv4"].split("/")[0]
+ input_dict_star_sg = [
+ {"dut": "r2", "src_address": source_i2, "iif": r2_r1_links, "oil": r2_r4_links},
+ {"dut": "r2", "src_address": source_i6, "iif": r2_r4_links, "oil": r2_r1_links},
+ ]
+
+ for data in input_dict_star_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Verify pim interface traffic before changing RP")
+
+ intf_traffic = topo["routers"]["r4"]["links"]["r3-link1"]["interface"]
+ state_dict = {"r4": {intf_traffic: ["registerStopRx"]}}
+ state_before = verify_pim_interface_traffic(tgen, state_dict)
+ assert isinstance(state_before, dict), \
+ ("Testcase{} : Failed \n state_before is not dictionary \n "
+ "Error: {}".\
+ format(tc_name, result))
+
+ step("Change the RP to R3 loopback for same group range (225.1.1.1-5)")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1,
+ "delete": True,
+ }
+ ]
+ }
+ },
+ "r3": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r3"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1 + GROUP_RANGE_2,
+ }
+ ]
+ }
+ },
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "After changing the RP to R3 , verify (S,G) with none OIL and "
+ "upstream got cleared from R2 and created on R3 verify using "
+ "'show ip mroute json'; 'show ip pim upstream json'"
+ )
+
+ for data in input_dict_star_sg:
+ if data["src_address"] != "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step("(*,G) IIF on DUT is changed towards R3, verify using 'show ip mroute json'")
+
+ input_dict_star_g = [
+ {
+ "dut": "r1",
+ "src_address": "*",
+ "iif": r1_r3_links,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ }
+ ]
+
+ for data in input_dict_star_g:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "R4 is sending null register packets to R3 'show ip pim multicast traffic json'"
+ )
+ step("Verify pim interface traffic after changing RP")
+
+ state_after = verify_pim_interface_traffic(tgen, state_dict)
+ assert isinstance(state_before, dict), \
+ ("Testcase{} : Failed \n state_before is not dictionary \n "
+ "Error: {}".\
+ format(tc_name, result))
+
+ result = verify_state_incremented(state_before, state_after)
+ assert result is True, "Testcase{} : Failed Error: {}".format(tc_name, result)
+
+ step("Send new IGMP join for new group range (226.1.1.1-5)")
+
+ input_join = {
+ "i1": topo["routers"]["i1"]["links"]["r1"]["interface"],
+ "i7": topo["routers"]["i7"]["links"]["r4"]["interface"],
+ }
+
+ for recvr, recvr_intf in input_join.items():
+ result = app_helper.run_join(recvr, IGMP_JOIN_RANGE_2, join_intf=recvr_intf)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Send traffic from R4 to same group range")
+
+ input_src = {
+ "i6": topo["routers"]["i6"]["links"]["r4"]["interface"],
+ "i2": topo["routers"]["i2"]["links"]["r1"]["interface"],
+ }
+
+ for src, src_intf in input_src.items():
+ result = app_helper.run_traffic(src, IGMP_JOIN_RANGE_2, bind_intf=src_intf)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("(*.G) and (S,G) on LHR for group range (226.1.1.1-5)")
+ step(
+ "(*,G) joins sent towards new RP (R3) , mroute created verify using "
+ "'show ip mroute json'"
+ )
+
+ for data in input_dict_star_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_2,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_2
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Traffic is received for groups (226.1.1.1-5) , (S,G) mroute updated "
+ "in DUT and R4 node verify using 'show ip multicast json'"
+ )
+
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Delete and Add the RP for group range 225.1.1.1-5 on DUT")
+
+ input_dict = {
+ "r3": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r3"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1,
+ "delete": True,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "After delete of RP verify mroute got uninstall from DUT IIF updated as "
+ "unknown in PIM state using 'show ip mroute' 'show ip pim state json'"
+ )
+ step(
+ "No impact seen to on data path as RP config removed after SPT switchover "
+ "verify uptime and traffic using 'show ip mroute' 'show ip mroute count json'"
+ )
+
+ for data in input_dict_star_sg:
+ if data["src_address"] == "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {} : Failed "
+ "(*,G) entried are still present \n Error: {}".format(tc_name, result)
+ )
+
+ else:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ iif = topo["routers"]["r1"]["links"]["i2"]["interface"]
+ oil = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ result = verify_pim_state(tgen, "r1", iif, oil, IGMP_JOIN_RANGE_1, expected=False)
+ assert result is not True, (
+ "Testcase {} :Failed "
+ "PIM state is not unknown after deleting RP \n Error: {}".format(
+ tc_name, result
+ )
+ )
+
+ input_dict = {
+ "r3": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r3"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "After Adding the RP verify IIF updated again towards RP , and DUT"
+ " sending register packets towards RP, verify using 'show ip mroute'"
+ " and 'show ip pim int traffic'"
+ )
+ step(
+ "No impact seen to on data path as RP config removed after SPT "
+ "switchover verify uptime and traffic using 'show ip mroute' "
+ "'show ip mroute count json'"
+ )
+
+ for data in input_dict_star_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_mroutes_after_restart_frr_services_p2(request):
+ """
+ Verify mroute entries after FRR service stop and start
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ app_helper.stop_all_hosts()
+ clear_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Enable IGMP on DUT and R4 interface")
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ intf_r4_i7 = topo["routers"]["r4"]["links"]["i7"]["interface"]
+ for dut, intf in zip(["r1", "r4"], [intf_r1_i1, intf_r4_i7]):
+ input_dict = {dut: {"igmp": {"interfaces": {intf: {"igmp": {"version": "2"}}}}}}
+
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Send IGMP joins from DUT and R4 for group range 225.1.1.1-5")
+ input_join = {
+ "i1": topo["routers"]["i1"]["links"]["r1"]["interface"],
+ "i7": topo["routers"]["i7"]["links"]["r4"]["interface"],
+ }
+
+ for recvr, recvr_intf in input_join.items():
+ result = app_helper.run_join(recvr, IGMP_JOIN_RANGE_1, join_intf=recvr_intf)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure RP as R2 for group range 225.1.1.1-5")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Done in base config: " "Configure EBGP peering between all the nodes")
+
+ step("Done in base config: " "Enable PIM on all the interfaces of all the nodes")
+
+ step("Send traffic from R4 for group range 225.1.1.1-5")
+
+ result = app_helper.run_traffic("i6", IGMP_JOIN_RANGE_1, "r4")
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Send traffic from DUT for group range 225.1.1.1-5")
+
+ result = app_helper.run_traffic("i2", IGMP_JOIN_RANGE_1, "r1")
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("(*,G) IIF and OIL updated on both the nodes")
+
+ step(
+ "(S,G) IIF updated towards shortest path to source on both the nodes "
+ ", verify using 'show ip mroute' and 'show ip mroute json'"
+ )
+
+ source_i6 = topo["routers"]["i6"]["links"]["r4"]["ipv4"].split("/")[0]
+ source_i2 = topo["routers"]["i2"]["links"]["r1"]["ipv4"].split("/")[0]
+ input_dict_star_sg = [
+ {
+ "dut": "r1",
+ "src_address": "*",
+ "iif": r1_r2_links + r1_r3_links,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": "*",
+ "iif": r4_r2_links + r4_r3_links,
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ {
+ "dut": "r1",
+ "src_address": source_i6,
+ "iif": r1_r2_links + r1_r3_links,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r1",
+ "src_address": source_i2,
+ "iif": r1_r2_links + [topo["routers"]["r1"]["links"]["i2"]["interface"]],
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": source_i6,
+ "iif": topo["routers"]["r4"]["links"]["i6"]["interface"],
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": source_i2,
+ "iif": r4_r2_links + r4_r3_links,
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ ]
+
+ for data in input_dict_star_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "OIL is updated and traffic is received for all the groups on both "
+ "the nodes , verify using 'show ip multicast'; 'show ip multicast json'"
+ )
+
+ intf_r4_i6 = topo["routers"]["r4"]["links"]["i6"]["interface"]
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ input_traffic = {
+ "r1": {"traffic_sent": [intf_r1_i1]},
+ "r4": {"traffic_received": [intf_r4_i6]},
+ }
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Stop the FRR services using kill -9 pid(s) from DUT")
+ stop_router(tgen, "r1")
+
+ step("Start the FRR services from DUT")
+ start_router(tgen, "r1")
+
+ step("(*,G) IIF and OIL updated on both the nodes")
+
+ for data in input_dict_star_sg:
+ if data["src_address"] == "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "(S,G) IIF updated towards shortest path to source on both the nodes "
+ ", verify using 'show ip mroute' and 'show ip mroute json'"
+ )
+
+ for data in input_dict_star_sg:
+ if data["src_address"] != "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "OIL is updated and traffic is received for all the groups on both "
+ "the nodes , verify using 'show ip multicast'; 'show ip multicast json'"
+ )
+
+ intf_r4_i6 = topo["routers"]["r4"]["links"]["i6"]["interface"]
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ input_traffic = {
+ "r1": {"traffic_sent": [intf_r1_i1]},
+ "r4": {"traffic_received": [intf_r4_i6]},
+ }
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Stop the traffic and do frr services stop/start")
+ app_helper.stop_all_hosts()
+
+ stop_router(tgen, "r1")
+ start_router(tgen, "r1")
+
+ step(
+ "FRR services started with new PID , (S,G) not present "
+ "on DUT and R4 , verify using 'show ip mroute json'"
+ )
+
+ for data in input_dict_star_sg:
+ if data["src_address"] != "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {}: Failed "
+ "mroutes are still present \n Error: {}".format(tc_name, result)
+ )
+
+ step("Stop FRR on R4 node")
+
+ stop_router(tgen, "r4")
+
+ step(
+ "After stop of FRR on R4 node verify mroute on DUT should be "
+ "pimreg/prune state"
+ )
+ step("No OIL created toward R2 on R11 node")
+
+ for data in input_dict_star_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {} : Failed "
+ " Mroutes are still present \n Error: {}".format(tc_name, result)
+ )
+
+ step("Start FRR on R4 node")
+
+ start_router(tgen, "r4")
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_single_area.py b/tests/topotests/ospf_basic_functionality/test_ospf_single_area.py
index c7ee723b3e..21a7d83845 100644
--- a/tests/topotests/ospf_basic_functionality/test_ospf_single_area.py
+++ b/tests/topotests/ospf_basic_functionality/test_ospf_single_area.py
@@ -596,93 +596,6 @@ def test_ospf_hello_tc10_p0(request):
ospf_covergence
)
- step(" Configure hello timer = 65535")
- topo1 = {
- "r0": {
- "links": {
- "r1": {
- "interface": topo["routers"]["r0"]["links"]["r1"]["interface"],
- "ospf": {"hello_interval": 65535, "dead_interval": 4},
- }
- }
- }
- }
-
- result = create_interfaces_cfg(tgen, topo1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
-
- topo1 = {
- "r1": {
- "links": {
- "r0": {
- "interface": topo["routers"]["r1"]["links"]["r0"]["interface"],
- "ospf": {"hello_interval": 65535, "dead_interval": 4},
- }
- }
- }
- }
-
- result = create_interfaces_cfg(tgen, topo1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
-
- step("verify that new timer value is configured.")
- input_dict = {
- "r0": {
- "links": {"r1": {"ospf": {"timerMsecs": 65535 * 1000, "timerDeadSecs": 4}}}
- }
- }
- dut = "r0"
- result = verify_ospf_interface(tgen, topo, dut=dut, input_dict=input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
-
- step("verify that ospf neighbours are full")
- ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
- ospf_covergence
- )
-
- step(" Try configuring timer values outside range for example 65536")
- topo1 = {
- "r0": {
- "links": {
- "r1": {
- "interface": topo["routers"]["r0"]["links"]["r1"]["interface"],
- "ospf": {"hello_interval": 65536, "dead_interval": 4},
- }
- }
- }
- }
-
- result = create_interfaces_cfg(tgen, topo1)
- assert result is not True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result
- )
-
- step("Unconfigure the hello timer from the interface from r1 and r2.")
-
- topo1 = {
- "r1": {
- "links": {
- "r0": {
- "interface": topo["routers"]["r1"]["links"]["r0"]["interface"],
- "ospf": {"hello_interval": 65535},
- "delete": True,
- }
- }
- }
- }
-
- result = create_interfaces_cfg(tgen, topo1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
-
- step(
- "Verify that timer value is deleted from intf & " "set to default value 40 sec."
- )
- input_dict = {"r1": {"links": {"r0": {"ospf": {"timerMsecs": 10 * 1000}}}}}
- dut = "r1"
- result = verify_ospf_interface(tgen, topo, dut=dut, input_dict=input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
-
write_test_footer(tc_name)
diff --git a/zebra/interface.c b/zebra/interface.c
index 96e378444d..93ffeb437c 100644
--- a/zebra/interface.c
+++ b/zebra/interface.c
@@ -184,13 +184,6 @@ static int if_zebra_new_hook(struct interface *ifp)
static void if_nhg_dependents_check_valid(struct nhg_hash_entry *nhe)
{
zebra_nhg_check_valid(nhe);
- if (!CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_VALID)) {
- /* If we're in shutdown, this interface event needs to clean
- * up installed NHGs, so don't clear that flag directly.
- */
- if (!zrouter.in_shutdown)
- UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED);
- }
}
static void if_down_nhg_dependents(const struct interface *ifp)
diff --git a/zebra/zebra_nhg.c b/zebra/zebra_nhg.c
index 500f4b0f1b..f025507f7d 100644
--- a/zebra/zebra_nhg.c
+++ b/zebra/zebra_nhg.c
@@ -1055,6 +1055,12 @@ static void zebra_nhg_set_invalid(struct nhg_hash_entry *nhe)
UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_VALID);
+ /* If we're in shutdown, this interface event needs to clean
+ * up installed NHGs, so don't clear that flag directly.
+ */
+ if (!zrouter.in_shutdown)
+ UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED);
+
/* Update validity of nexthops depending on it */
frr_each(nhg_connected_tree, &nhe->nhg_dependents, rb_node_dep)
zebra_nhg_check_valid(rb_node_dep->nhe);
@@ -1619,6 +1625,17 @@ void zebra_nhg_hash_free(void *p)
zebra_nhg_free((struct nhg_hash_entry *)p);
}
+static void zebra_nhg_timer(struct thread *thread)
+{
+ struct nhg_hash_entry *nhe = THREAD_ARG(thread);
+
+ if (IS_ZEBRA_DEBUG_NHG_DETAIL)
+ zlog_debug("Nexthop Timer for nhe: %pNG", nhe);
+
+ if (nhe->refcnt == 1)
+ zebra_nhg_decrement_ref(nhe);
+}
+
void zebra_nhg_decrement_ref(struct nhg_hash_entry *nhe)
{
if (IS_ZEBRA_DEBUG_NHG_DETAIL)
@@ -1627,6 +1644,15 @@ void zebra_nhg_decrement_ref(struct nhg_hash_entry *nhe)
nhe->refcnt--;
+ if (!zrouter.in_shutdown && nhe->refcnt <= 0 &&
+ CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED) &&
+ !CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_KEEP_AROUND)) {
+ nhe->refcnt = 1;
+ SET_FLAG(nhe->flags, NEXTHOP_GROUP_KEEP_AROUND);
+ thread_add_timer(zrouter.master, zebra_nhg_timer, nhe,
+ zrouter.nhg_keep, &nhe->timer);
+ }
+
if (!zebra_nhg_depends_is_empty(nhe))
nhg_connected_tree_decrement_ref(&nhe->nhg_depends);
@@ -1642,6 +1668,12 @@ void zebra_nhg_increment_ref(struct nhg_hash_entry *nhe)
nhe->refcnt++;
+ if (thread_is_scheduled(nhe->timer)) {
+ THREAD_OFF(nhe->timer);
+ nhe->refcnt--;
+ UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_KEEP_AROUND);
+ }
+
if (!zebra_nhg_depends_is_empty(nhe))
nhg_connected_tree_increment_ref(&nhe->nhg_depends);
}
@@ -3290,9 +3322,6 @@ struct nhg_hash_entry *zebra_nhg_proto_add(uint32_t id, int type,
rib_handle_nhg_replace(old, new);
- /* if this != 1 at this point, we have a bug */
- assert(old->refcnt == 1);
-
/* We have to decrement its singletons
* because some might not exist in NEW.
*/
@@ -3304,6 +3333,7 @@ struct nhg_hash_entry *zebra_nhg_proto_add(uint32_t id, int type,
/* Dont call the dec API, we dont want to uninstall the ID */
old->refcnt = 0;
+ THREAD_OFF(old->timer);
zebra_nhg_free(old);
old = NULL;
}
diff --git a/zebra/zebra_nhg.h b/zebra/zebra_nhg.h
index 0863d90a7e..6d2ab248f9 100644
--- a/zebra/zebra_nhg.h
+++ b/zebra/zebra_nhg.h
@@ -79,16 +79,34 @@ struct nhg_hash_entry {
uint32_t flags;
- /* Dependency tree for other entries.
+ /* Dependency trees for other entries.
* For instance a group with two
* nexthops will have two dependencies
* pointing to those nhg_hash_entries.
*
* Using a rb tree here to make lookups
* faster with ID's.
+ *
+ * nhg_depends the RB tree of entries that this
+ * group contains.
+ *
+ * nhg_dependents the RB tree of entries that
+ * this group is being used by
+ *
+ * NHG id 3 with nexthops id 1/2
+ * nhg(3)->nhg_depends has 1 and 2 in the tree
+ * nhg(3)->nhg_dependents is empty
+ *
+ * nhg(1)->nhg_depends is empty
+ * nhg(1)->nhg_dependents is 3 in the tree
+ *
+ * nhg(2)->nhg_depends is empty
+ * nhg(3)->nhg_dependents is 3 in the tree
*/
struct nhg_connected_tree_head nhg_depends, nhg_dependents;
+ struct thread *timer;
+
/*
* Is this nexthop group valid, ie all nexthops are fully resolved.
* What is fully resolved? It's a nexthop that is either self contained
@@ -130,6 +148,15 @@ struct nhg_hash_entry {
#define NEXTHOP_GROUP_PROTO_RELEASED (1 << 5)
/*
+ * When deleting a NHG notice that it is still installed
+ * and if it is, slightly delay the actual removal to
+ * the future. So that upper level protocols might
+ * be able to take advantage of some NHG's that
+ * are there
+ */
+#define NEXTHOP_GROUP_KEEP_AROUND (1 << 6)
+
+/*
* Track FPM installation status..
*/
#define NEXTHOP_GROUP_FPM (1 << 6)
diff --git a/zebra/zebra_rib.c b/zebra/zebra_rib.c
index 6801280012..3699b53f12 100644
--- a/zebra/zebra_rib.c
+++ b/zebra/zebra_rib.c
@@ -4171,21 +4171,15 @@ unsigned long rib_score_proto(uint8_t proto, unsigned short instance)
void rib_close_table(struct route_table *table)
{
struct route_node *rn;
- struct rib_table_info *info;
rib_dest_t *dest;
if (!table)
return;
- info = route_table_get_info(table);
-
for (rn = route_top(table); rn; rn = srcdest_route_next(rn)) {
dest = rib_dest_from_rnode(rn);
if (dest && dest->selected_fib) {
- if (info->safi == SAFI_UNICAST)
- hook_call(rib_update, rn, NULL);
-
rib_uninstall_kernel(rn, dest->selected_fib);
dest->selected_fib = NULL;
}
diff --git a/zebra/zebra_router.c b/zebra/zebra_router.c
index 6b4a7543cd..92d519bad1 100644
--- a/zebra/zebra_router.c
+++ b/zebra/zebra_router.c
@@ -278,6 +278,8 @@ void zebra_router_init(bool asic_offload, bool notify_on_ack)
zrouter.packets_to_process = ZEBRA_ZAPI_PACKETS_TO_PROCESS;
+ zrouter.nhg_keep = ZEBRA_DEFAULT_NHG_KEEP_TIMER;
+
zebra_vxlan_init();
zebra_mlag_init();
diff --git a/zebra/zebra_router.h b/zebra/zebra_router.h
index 7aca91959c..c96c8e5f46 100644
--- a/zebra/zebra_router.h
+++ b/zebra/zebra_router.h
@@ -219,6 +219,9 @@ struct zebra_router {
bool notify_on_ack;
bool supports_nhgs;
+
+#define ZEBRA_DEFAULT_NHG_KEEP_TIMER 180
+ uint32_t nhg_keep;
};
#define GRACEFUL_RESTART_TIME 60
diff --git a/zebra/zebra_vty.c b/zebra/zebra_vty.c
index be19b07d9d..9149da8b0d 100644
--- a/zebra/zebra_vty.c
+++ b/zebra/zebra_vty.c
@@ -1433,14 +1433,22 @@ static void show_nexthop_group_out(struct vty *vty, struct nhg_hash_entry *nhe)
struct nhg_connected *rb_node_dep = NULL;
struct nexthop_group *backup_nhg;
char up_str[MONOTIME_STRLEN];
+ char time_left[MONOTIME_STRLEN];
uptime2str(nhe->uptime, up_str, sizeof(up_str));
vty_out(vty, "ID: %u (%s)\n", nhe->id, zebra_route_string(nhe->type));
- vty_out(vty, " RefCnt: %u\n", nhe->refcnt);
+ vty_out(vty, " RefCnt: %u", nhe->refcnt);
+ if (thread_is_scheduled(nhe->timer))
+ vty_out(vty, " Time to Deletion: %s",
+ thread_timer_to_hhmmss(time_left, sizeof(time_left),
+ nhe->timer));
+ vty_out(vty, "\n");
+
vty_out(vty, " Uptime: %s\n", up_str);
vty_out(vty, " VRF: %s\n", vrf_id_to_name(nhe->vrf_id));
+
if (CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_VALID)) {
vty_out(vty, " Valid");
if (CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED))
@@ -3842,11 +3850,31 @@ DEFUN (no_ip_zebra_import_table,
return (zebra_import_table(AFI_IP, VRF_DEFAULT, table_id, 0, NULL, 0));
}
+DEFPY (zebra_nexthop_group_keep,
+ zebra_nexthop_group_keep_cmd,
+ "[no] zebra nexthop-group keep (1-3600)",
+ NO_STR
+ ZEBRA_STR
+ "Nexthop-Group\n"
+ "How long to keep\n"
+ "Time in seconds from 1-3600\n")
+{
+ if (no)
+ zrouter.nhg_keep = ZEBRA_DEFAULT_NHG_KEEP_TIMER;
+ else
+ zrouter.nhg_keep = keep;
+
+ return CMD_SUCCESS;
+}
+
static int config_write_protocol(struct vty *vty)
{
if (allow_delete)
vty_out(vty, "allow-external-route-update\n");
+ if (zrouter.nhg_keep != ZEBRA_DEFAULT_NHG_KEEP_TIMER)
+ vty_out(vty, "zebra nexthop-group keep %u\n", zrouter.nhg_keep);
+
if (zrouter.ribq->spec.hold != ZEBRA_RIB_PROCESS_HOLD_TIME)
vty_out(vty, "zebra work-queue %u\n", zrouter.ribq->spec.hold);
@@ -4425,6 +4453,7 @@ void zebra_vty_init(void)
install_element(CONFIG_NODE, &ip_multicast_mode_cmd);
install_element(CONFIG_NODE, &no_ip_multicast_mode_cmd);
+ install_element(CONFIG_NODE, &zebra_nexthop_group_keep_cmd);
install_element(CONFIG_NODE, &ip_zebra_import_table_distance_cmd);
install_element(CONFIG_NODE, &no_ip_zebra_import_table_cmd);
install_element(CONFIG_NODE, &zebra_workqueue_timer_cmd);