summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--bgpd/bgp_attr.c2
-rw-r--r--bgpd/bgp_bfd.c2
-rw-r--r--bgpd/bgp_bmp.c39
-rw-r--r--bgpd/bgp_network.c21
-rw-r--r--bgpd/bgp_route.c81
-rw-r--r--bgpd/bgp_rpki.c81
-rw-r--r--bgpd/bgp_updgrp.c12
-rw-r--r--bgpd/bgp_vty.c79
-rw-r--r--bgpd/bgp_zebra.c41
-rw-r--r--bgpd/bgpd.h2
-rw-r--r--bgpd/rfapi/vnc_zebra.c3
-rw-r--r--debian/frr.postinst2
-rw-r--r--doc/user/installation.rst3
-rw-r--r--doc/user/ospfd.rst2
-rw-r--r--doc/user/routemap.rst4
-rw-r--r--doc/user/rpki.rst8
-rw-r--r--doc/user/zebra.rst6
-rw-r--r--eigrpd/eigrp_vty.c8
-rw-r--r--isisd/isis_bfd.c2
-rw-r--r--isisd/isis_snmp.c2
-rw-r--r--lib/command.c1
-rw-r--r--lib/if.c16
-rw-r--r--lib/libfrr.c2
-rw-r--r--lib/nexthop.h4
-rw-r--r--lib/nexthop_group.c4
-rw-r--r--lib/prefix.h9
-rw-r--r--lib/sockunion.c10
-rw-r--r--lib/vty.c1
-rw-r--r--lib/zclient.c7
-rw-r--r--lib/zclient.h1
-rw-r--r--nhrpd/nhrp_nhs.c7
-rw-r--r--nhrpd/nhrp_peer.c9
-rw-r--r--nhrpd/nhrp_shortcut.c12
-rw-r--r--nhrpd/nhrp_vty.c14
-rw-r--r--nhrpd/zbuf.c6
-rw-r--r--ospf6d/ospf6_auth_trailer.c8
-rw-r--r--ospf6d/ospf6_interface.c2
-rw-r--r--ospf6d/ospf6_zebra.c27
-rw-r--r--ospfclient/README5
-rwxr-xr-xospfclient/ospfclient.py53
-rw-r--r--ospfd/ospf_api.c9
-rw-r--r--ospfd/ospf_api.h11
-rw-r--r--ospfd/ospf_apiserver.c37
-rw-r--r--ospfd/ospf_apiserver.h4
-rw-r--r--ospfd/ospf_vty.c342
-rw-r--r--ospfd/ospfd.c5
-rw-r--r--pimd/pim6_mld.c2
-rw-r--r--pimd/pim6_mroute_msg.c184
-rw-r--r--pimd/pim_cmd_common.c8
-rw-r--r--pimd/pim_iface.c4
-rw-r--r--pimd/pim_instance.c17
-rw-r--r--pimd/pim_instance.h1
-rw-r--r--pimd/pim_mroute.c237
-rw-r--r--pimd/pim_mroute.h17
-rw-r--r--pimd/pim_mroute_msg.c242
-rw-r--r--pimd/pim_nht.c25
-rw-r--r--pimd/pim_pim.c100
-rw-r--r--pimd/pim_register.c49
-rw-r--r--pimd/pim_register.h4
-rw-r--r--pimd/pim_rpf.c9
-rw-r--r--pimd/pim_sock.c9
-rw-r--r--pimd/pim_upstream.c4
-rw-r--r--pimd/pim_zebra.c1
-rw-r--r--pimd/pim_zlookup.c7
-rw-r--r--pimd/pimd.c1
-rw-r--r--pimd/subdir.am2
-rw-r--r--python/tiabwarfo.py1
-rw-r--r--tests/topotests/bgp_default_originate/bgp_default_originate_topo1.json294
-rw-r--r--tests/topotests/bgp_default_originate/test_bgp_default_originate_topo1_1.py2537
-rw-r--r--tests/topotests/bgp_default_originate/test_bgp_default_originate_topo1_2.py2437
-rw-r--r--tests/topotests/lib/bgp.py958
-rw-r--r--tests/topotests/lib/pim.py79
-rw-r--r--tests/topotests/multicast_pim_uplink_topo1/multicast_pim_uplink_topo1.json226
-rw-r--r--tests/topotests/multicast_pim_uplink_topo1/test_multicast_pim_uplink_topo1.py3327
-rw-r--r--tests/topotests/ospf_basic_functionality/test_ospf_single_area.py87
-rwxr-xr-xtests/topotests/ospfapi/ctester.py36
-rw-r--r--tests/topotests/ospfapi/test_ospf_clientapi.py53
-rwxr-xr-xtools/frrcommon.sh.in2
-rw-r--r--vtysh/vtysh_user.c4
-rw-r--r--zebra/debug_nl.c5
-rw-r--r--zebra/if_netlink.c66
-rw-r--r--zebra/if_netlink.h1
-rw-r--r--zebra/interface.c24
-rw-r--r--zebra/interface.h3
-rw-r--r--zebra/kernel_netlink.c7
-rw-r--r--zebra/netconf_netlink.c32
-rw-r--r--zebra/rt_netlink.c69
-rw-r--r--zebra/rtadv.c41
-rw-r--r--zebra/zapi_msg.c6
-rw-r--r--zebra/zebra_dplane.c51
-rw-r--r--zebra/zebra_dplane.h10
-rw-r--r--zebra/zebra_fpm_netlink.c2
-rw-r--r--zebra/zebra_netns_id.c109
-rw-r--r--zebra/zebra_nhg.c63
-rw-r--r--zebra/zebra_nhg.h29
-rw-r--r--zebra/zebra_rib.c22
-rw-r--r--zebra/zebra_router.c2
-rw-r--r--zebra/zebra_router.h3
-rw-r--r--zebra/zebra_vty.c40
-rw-r--r--zebra/zebra_vxlan.c7
100 files changed, 11370 insertions, 1192 deletions
diff --git a/bgpd/bgp_attr.c b/bgpd/bgp_attr.c
index 454e134c8e..faf44aa251 100644
--- a/bgpd/bgp_attr.c
+++ b/bgpd/bgp_attr.c
@@ -1040,8 +1040,6 @@ struct attr *bgp_attr_aggregate_intern(
else
attr.aggregator_as = bgp->as;
attr.aggregator_addr = bgp->router_id;
- attr.label_index = BGP_INVALID_LABEL_INDEX;
- attr.label = MPLS_INVALID_LABEL;
/* Apply route-map */
if (aggregate->rmap.name) {
diff --git a/bgpd/bgp_bfd.c b/bgpd/bgp_bfd.c
index a859b7ad0f..d66b916b95 100644
--- a/bgpd/bgp_bfd.c
+++ b/bgpd/bgp_bfd.c
@@ -63,7 +63,7 @@ static void bfd_session_status_update(struct bfd_session_params *bsp,
if (CHECK_FLAG(peer->sflags, PEER_STATUS_NSF_MODE)
&& bfd_sess_cbit(bsp) && !bss->remote_cbit) {
if (BGP_DEBUG(bfd, BFD_LIB))
- zlog_info(
+ zlog_debug(
"%s BFD DOWN message ignored in the process of graceful restart when C bit is cleared",
peer->host);
return;
diff --git a/bgpd/bgp_bmp.c b/bgpd/bgp_bmp.c
index 25712908df..b561b50ff5 100644
--- a/bgpd/bgp_bmp.c
+++ b/bgpd/bgp_bmp.c
@@ -1889,7 +1889,6 @@ static void bmp_active_thread(struct thread *t)
struct bmp_active *ba = THREAD_ARG(t);
socklen_t slen;
int status, ret;
- char buf[SU_ADDRSTRLEN];
vrf_id_t vrf_id;
/* all 3 end up here, though only timer or read+write are active
@@ -1915,16 +1914,16 @@ static void bmp_active_thread(struct thread *t)
ret = getsockopt(ba->socket, SOL_SOCKET, SO_ERROR, (void *)&status,
&slen);
- sockunion2str(&ba->addrs[ba->addrpos], buf, sizeof(buf));
if (ret < 0 || status != 0) {
ba->last_err = strerror(status);
- zlog_warn("bmp[%s]: failed to connect to %s:%d: %s",
- ba->hostname, buf, ba->port, ba->last_err);
+ zlog_warn("bmp[%s]: failed to connect to %pSU:%d: %s",
+ ba->hostname, &ba->addrs[ba->addrpos], ba->port,
+ ba->last_err);
goto out_next;
}
- zlog_warn("bmp[%s]: outbound connection to %s:%d",
- ba->hostname, buf, ba->port);
+ zlog_warn("bmp[%s]: outbound connection to %pSU:%d", ba->hostname,
+ &ba->addrs[ba->addrpos], ba->port);
ba->bmp = bmp_open(ba->targets, ba->socket);
if (!ba->bmp)
@@ -2317,7 +2316,6 @@ DEFPY(show_bmp,
struct bmp_active *ba;
struct bmp *bmp;
struct ttable *tt;
- char buf[SU_ADDRSTRLEN];
char uptime[BGP_UPTIME_LEN];
char *out;
@@ -2364,9 +2362,8 @@ DEFPY(show_bmp,
vty_out(vty, " Listeners:\n");
frr_each (bmp_listeners, &bt->listeners, bl)
- vty_out(vty, " %s:%d\n",
- sockunion2str(&bl->addr, buf,
- SU_ADDRSTRLEN), bl->port);
+ vty_out(vty, " %pSU:%d\n", &bl->addr,
+ bl->port);
vty_out(vty, "\n Outbound connections:\n");
tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
@@ -2379,13 +2376,11 @@ DEFPY(show_bmp,
peer_uptime(ba->bmp->t_up.tv_sec,
uptime, sizeof(uptime),
false, NULL);
- ttable_add_row(tt, "%s:%d|Up|%s|%s|%s",
+ ttable_add_row(tt,
+ "%s:%d|Up|%s|%s|%pSU",
ba->hostname, ba->port,
ba->bmp->remote, uptime,
- sockunion2str(
- &ba->addrsrc,
- buf,
- SU_ADDRSTRLEN));
+ &ba->addrsrc);
continue;
}
@@ -2405,15 +2400,11 @@ DEFPY(show_bmp,
state_str = "Resolving";
}
- sockunion2str(&ba->addrsrc,
- buf,
- SU_ADDRSTRLEN);
- ttable_add_row(tt, "%s:%d|%s|%s|%s|%s",
+ ttable_add_row(tt, "%s:%d|%s|%s|%s|%pSU",
ba->hostname, ba->port,
state_str,
ba->last_err ? ba->last_err : "",
- uptime,
- buf);
+ uptime, &ba->addrsrc);
continue;
}
out = ttable_dump(tt, "\n");
@@ -2460,7 +2451,6 @@ static int bmp_config_write(struct bgp *bgp, struct vty *vty)
struct bmp_targets *bt;
struct bmp_listener *bl;
struct bmp_active *ba;
- char buf[SU_ADDRSTRLEN];
afi_t afi;
safi_t safi;
@@ -2497,9 +2487,8 @@ static int bmp_config_write(struct bgp *bgp, struct vty *vty)
afi_str, safi2str(safi));
}
frr_each (bmp_listeners, &bt->listeners, bl)
- vty_out(vty, " \n bmp listener %s port %d\n",
- sockunion2str(&bl->addr, buf, SU_ADDRSTRLEN),
- bl->port);
+ vty_out(vty, " \n bmp listener %pSU port %d\n",
+ &bl->addr, bl->port);
frr_each (bmp_actives, &bt->actives, ba) {
vty_out(vty, " bmp connect %s port %u min-retry %u max-retry %u",
diff --git a/bgpd/bgp_network.c b/bgpd/bgp_network.c
index 9417b7d59a..da4cc03b66 100644
--- a/bgpd/bgp_network.c
+++ b/bgpd/bgp_network.c
@@ -59,14 +59,10 @@ void bgp_dump_listener_info(struct vty *vty)
vty_out(vty, "Name fd Address\n");
vty_out(vty, "---------------------------\n");
- for (ALL_LIST_ELEMENTS_RO(bm->listen_sockets, node, listener)) {
- char buf[SU_ADDRSTRLEN];
-
- vty_out(vty, "%-16s %d %s\n",
+ for (ALL_LIST_ELEMENTS_RO(bm->listen_sockets, node, listener))
+ vty_out(vty, "%-16s %d %pSU\n",
listener->name ? listener->name : VRF_DEFAULT_NAME,
- listener->fd,
- sockunion2str(&listener->su, buf, sizeof(buf)));
- }
+ listener->fd, &listener->su);
}
/*
@@ -103,21 +99,18 @@ static int bgp_md5_set_socket(int socket, union sockunion *su,
#endif /* HAVE_TCP_MD5SIG */
if (ret < 0) {
- char sabuf[SU_ADDRSTRLEN];
- sockunion2str(su, sabuf, sizeof(sabuf));
-
switch (ret) {
case -2:
flog_warn(
EC_BGP_NO_TCP_MD5,
- "Unable to set TCP MD5 option on socket for peer %s (sock=%d): This platform does not support MD5 auth for prefixes",
- sabuf, socket);
+ "Unable to set TCP MD5 option on socket for peer %pSU (sock=%d): This platform does not support MD5 auth for prefixes",
+ su, socket);
break;
default:
flog_warn(
EC_BGP_NO_TCP_MD5,
- "Unable to set TCP MD5 option on socket for peer %s (sock=%d): %s",
- sabuf, socket, safe_strerror(en));
+ "Unable to set TCP MD5 option on socket for peer %pSU (sock=%d): %s",
+ su, socket, safe_strerror(en));
}
}
diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c
index b7b069fbed..b35cbeb21f 100644
--- a/bgpd/bgp_route.c
+++ b/bgpd/bgp_route.c
@@ -9174,12 +9174,9 @@ void route_vty_out(struct vty *vty, const struct prefix *p,
else
vty_out(vty, "%7u ", attr->weight);
- if (json_paths) {
- char buf[BUFSIZ];
- json_object_string_add(
- json_path, "peerId",
- sockunion2str(&path->peer->su, buf, SU_ADDRSTRLEN));
- }
+ if (json_paths)
+ json_object_string_addf(json_path, "peerId", "%pSU",
+ &path->peer->su);
/* Print aspath */
if (attr->aspath) {
@@ -9771,7 +9768,6 @@ static void route_vty_out_advertised_to(struct vty *vty, struct peer *peer,
int *first, const char *header,
json_object *json_adv_to)
{
- char buf1[INET6_ADDRSTRLEN];
json_object *json_peer = NULL;
if (json_adv_to) {
@@ -9791,10 +9787,8 @@ static void route_vty_out_advertised_to(struct vty *vty, struct peer *peer,
json_object_object_add(json_adv_to, peer->conf_if,
json_peer);
else
- json_object_object_add(
- json_adv_to,
- sockunion2str(&peer->su, buf1, SU_ADDRSTRLEN),
- json_peer);
+ json_object_object_addf(json_adv_to, json_peer, "%pSU",
+ &peer->su);
} else {
if (*first) {
vty_out(vty, "%s", header);
@@ -9807,16 +9801,13 @@ static void route_vty_out_advertised_to(struct vty *vty, struct peer *peer,
vty_out(vty, " %s(%s)", peer->hostname,
peer->conf_if);
else
- vty_out(vty, " %s(%s)", peer->hostname,
- sockunion2str(&peer->su, buf1,
- SU_ADDRSTRLEN));
+ vty_out(vty, " %s(%pSU)", peer->hostname,
+ &peer->su);
} else {
if (peer->conf_if)
vty_out(vty, " %s", peer->conf_if);
else
- vty_out(vty, " %s",
- sockunion2str(&peer->su, buf1,
- SU_ADDRSTRLEN));
+ vty_out(vty, " %pSU", &peer->su);
}
}
}
@@ -10236,10 +10227,8 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct bgp_dest *bn,
else {
if (json_paths) {
- json_object_string_add(json_peer, "peerId",
- sockunion2str(&path->peer->su,
- buf,
- SU_ADDRSTRLEN));
+ json_object_string_addf(json_peer, "peerId", "%pSU",
+ &path->peer->su);
json_object_string_addf(json_peer, "routerId", "%pI4",
&path->peer->remote_id);
@@ -10273,10 +10262,8 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct bgp_dest *bn,
path->peer->hostname,
path->peer->host);
else
- vty_out(vty, " from %s",
- sockunion2str(&path->peer->su,
- buf,
- SU_ADDRSTRLEN));
+ vty_out(vty, " from %pSU",
+ &path->peer->su);
}
if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_ORIGINATOR_ID))
@@ -12871,6 +12858,11 @@ static const char *table_stats_strs[][2] = {
struct bgp_table_stats {
struct bgp_table *table;
unsigned long long counts[BGP_STATS_MAX];
+
+ unsigned long long
+ prefix_len_count[MAX(EVPN_ROUTE_PREFIXLEN, IPV6_MAX_BITLEN) +
+ 1];
+
double total_space;
};
@@ -12888,6 +12880,7 @@ static void bgp_table_stats_rn(struct bgp_dest *dest, struct bgp_dest *top,
ts->counts[BGP_STATS_PREFIXES]++;
ts->counts[BGP_STATS_TOTPLEN] += rn_p->prefixlen;
+ ts->prefix_len_count[rn_p->prefixlen]++;
/* check if the prefix is included by any other announcements */
while (pdest && !bgp_dest_has_bgp_path_info_data(pdest))
pdest = bgp_dest_parent_nolock(pdest);
@@ -12994,6 +12987,8 @@ static int bgp_table_stats_single(struct vty *vty, struct bgp *bgp, afi_t afi,
int ret = CMD_SUCCESS;
char temp_buf[20];
struct json_object *json = NULL;
+ uint32_t bitlen = 0;
+ struct json_object *json_bitlen;
if (json_array)
json = json_object_new_object();
@@ -13192,6 +13187,38 @@ static int bgp_table_stats_single(struct vty *vty, struct bgp *bgp, afi_t afi,
if (!json)
vty_out(vty, "\n");
}
+
+ switch (afi) {
+ case AFI_IP:
+ bitlen = IPV4_MAX_BITLEN;
+ break;
+ case AFI_IP6:
+ bitlen = IPV6_MAX_BITLEN;
+ break;
+ case AFI_L2VPN:
+ bitlen = EVPN_ROUTE_PREFIXLEN;
+ break;
+ default:
+ break;
+ }
+
+ if (json) {
+ json_bitlen = json_object_new_array();
+
+ for (i = 0; i <= bitlen; i++) {
+ struct json_object *ind_bit = json_object_new_object();
+
+ if (!ts.prefix_len_count[i])
+ continue;
+
+ snprintf(temp_buf, sizeof(temp_buf), "%u", i);
+ json_object_int_add(ind_bit, temp_buf,
+ ts.prefix_len_count[i]);
+ json_object_array_add(json_bitlen, ind_bit);
+ }
+ json_object_object_add(json, "prefixLength", json_bitlen);
+ }
+
end_table_stats:
if (json)
json_object_array_add(json_array, json);
@@ -14982,10 +15009,8 @@ static void show_bgp_peerhash_entry(struct hash_bucket *bucket, void *arg)
{
struct vty *vty = arg;
struct peer *peer = bucket->data;
- char buf[SU_ADDRSTRLEN];
- vty_out(vty, "\tPeer: %s %s\n", peer->host,
- sockunion2str(&peer->su, buf, sizeof(buf)));
+ vty_out(vty, "\tPeer: %s %pSU\n", peer->host, &peer->su);
}
DEFUN (show_bgp_listeners,
diff --git a/bgpd/bgp_rpki.c b/bgpd/bgp_rpki.c
index 1c7dc7cb0a..de1c559641 100644
--- a/bgpd/bgp_rpki.c
+++ b/bgpd/bgp_rpki.c
@@ -114,12 +114,12 @@ static struct rtr_mgr_group *get_groups(void);
#if defined(FOUND_SSH)
static int add_ssh_cache(const char *host, const unsigned int port,
const char *username, const char *client_privkey_path,
- const char *client_pubkey_path,
const char *server_pubkey_path,
const uint8_t preference, const char *bindaddr);
#endif
static struct rtr_socket *create_rtr_socket(struct tr_socket *tr_socket);
static struct cache *find_cache(const uint8_t preference);
+static void rpki_delete_all_cache_nodes(void);
static int add_tcp_cache(const char *host, const char *port,
const uint8_t preference, const char *bindaddr);
static void print_record(const struct pfx_record *record, struct vty *vty,
@@ -276,6 +276,17 @@ static struct cache *find_cache(const uint8_t preference)
return NULL;
}
+static void rpki_delete_all_cache_nodes(void)
+{
+ struct listnode *cache_node, *cache_next;
+ struct cache *cache;
+
+ for (ALL_LIST_ELEMENTS(cache_list, cache_node, cache_next, cache)) {
+ rtr_mgr_remove_group(rtr_config, cache->preference);
+ listnode_delete(cache_list, cache);
+ }
+}
+
static void print_record(const struct pfx_record *record, struct vty *vty,
json_object *json)
{
@@ -916,7 +927,6 @@ static int add_tcp_cache(const char *host, const char *port,
#if defined(FOUND_SSH)
static int add_ssh_cache(const char *host, const unsigned int port,
const char *username, const char *client_privkey_path,
- const char *client_pubkey_path,
const char *server_pubkey_path,
const uint8_t preference, const char *bindaddr)
{
@@ -991,16 +1001,14 @@ static int config_write(struct vty *vty)
struct listnode *cache_node;
struct cache *cache;
- if (!listcount(cache_list))
- return 0;
-
if (rpki_debug)
vty_out(vty, "debug rpki\n");
vty_out(vty, "!\n");
vty_out(vty, "rpki\n");
- vty_out(vty, " rpki polling_period %d\n", polling_period);
+ if (polling_period != POLLING_PERIOD_DEFAULT)
+ vty_out(vty, " rpki polling_period %d\n", polling_period);
if (retry_interval != RETRY_INTERVAL_DEFAULT)
vty_out(vty, " rpki retry_interval %d\n", retry_interval);
if (expire_interval != EXPIRE_INTERVAL_DEFAULT)
@@ -1055,6 +1063,17 @@ DEFUN_NOSH (rpki,
return CMD_SUCCESS;
}
+DEFPY (no_rpki,
+ no_rpki_cmd,
+ "no rpki",
+ NO_STR
+ "Enable rpki and enter rpki configuration mode\n")
+{
+ rpki_delete_all_cache_nodes();
+ stop();
+ return CMD_SUCCESS;
+}
+
DEFUN (bgp_rpki_start,
bgp_rpki_start_cmd,
"rpki start",
@@ -1161,15 +1180,15 @@ DEFUN (no_rpki_retry_interval,
}
DEFPY(rpki_cache, rpki_cache_cmd,
- "rpki cache <A.B.C.D|WORD> <TCPPORT|(1-65535)$sshport SSH_UNAME SSH_PRIVKEY SSH_PUBKEY [SERVER_PUBKEY]> [source <A.B.C.D>$bindaddr] preference (1-255)",
+ "rpki cache <A.B.C.D|WORD> <TCPPORT|(1-65535)$sshport SSH_UNAME SSH_PRIVKEY [SERVER_PUBKEY]> [source <A.B.C.D>$bindaddr] preference (1-255)",
RPKI_OUTPUT_STRING
"Install a cache server to current group\n"
- "IP address of cache server\n Hostname of cache server\n"
+ "IP address of cache server\n"
+ "Hostname of cache server\n"
"TCP port number\n"
"SSH port number\n"
"SSH user name\n"
"Path to own SSH private key\n"
- "Path to own SSH public key\n"
"Path to Public key of cache server\n"
"Configure source IP address of RPKI connection\n"
"Define a Source IP Address\n"
@@ -1193,9 +1212,9 @@ DEFPY(rpki_cache, rpki_cache_cmd,
// use ssh connection
if (ssh_uname) {
#if defined(FOUND_SSH)
- return_value = add_ssh_cache(
- cache, sshport, ssh_uname, ssh_privkey, ssh_pubkey,
- server_pubkey, preference, bindaddr_str);
+ return_value =
+ add_ssh_cache(cache, sshport, ssh_uname, ssh_privkey,
+ server_pubkey, preference, bindaddr_str);
#else
return_value = SUCCESS;
vty_out(vty,
@@ -1216,20 +1235,27 @@ DEFPY(rpki_cache, rpki_cache_cmd,
DEFPY (no_rpki_cache,
no_rpki_cache_cmd,
- "no rpki cache <A.B.C.D|WORD> <TCPPORT|(1-65535)$sshport> preference (1-255)$preference",
+ "no rpki cache <A.B.C.D|WORD> <TCPPORT|(1-65535)$sshport SSH_UNAME SSH_PRIVKEY [SERVER_PUBKEY]> [source <A.B.C.D>$bindaddr] preference (1-255)",
NO_STR
RPKI_OUTPUT_STRING
- "Remove a cache server\n"
- "IP address of cache server\n Hostname of cache server\n"
+ "Install a cache server to current group\n"
+ "IP address of cache server\n"
+ "Hostname of cache server\n"
"TCP port number\n"
"SSH port number\n"
+ "SSH user name\n"
+ "Path to own SSH private key\n"
+ "Path to Public key of cache server\n"
+ "Configure source IP address of RPKI connection\n"
+ "Define a Source IP Address\n"
"Preference of the cache server\n"
"Preference value\n")
{
struct cache *cache_p = find_cache(preference);
if (!cache_p) {
- vty_out(vty, "Could not find cache %ld\n", preference);
+ vty_out(vty, "Could not find cache with preference %ld\n",
+ preference);
return CMD_WARNING;
}
@@ -1237,9 +1263,9 @@ DEFPY (no_rpki_cache,
stop();
} else if (is_running()) {
if (rtr_mgr_remove_group(rtr_config, preference) == RTR_ERROR) {
- vty_out(vty, "Could not remove cache %ld", preference);
-
- vty_out(vty, "\n");
+ vty_out(vty,
+ "Could not remove cache with preference %ld\n",
+ preference);
return CMD_WARNING;
}
}
@@ -1392,9 +1418,11 @@ DEFPY (show_rpki_cache_server,
for (ALL_LIST_ELEMENTS_RO(cache_list, cache_node, cache)) {
if (cache->type == TCP) {
if (!json) {
- vty_out(vty, "host: %s port: %s\n",
+ vty_out(vty,
+ "host: %s port: %s, preference: %hhu\n",
cache->tr_config.tcp_config->host,
- cache->tr_config.tcp_config->port);
+ cache->tr_config.tcp_config->port,
+ cache->preference);
} else {
json_server = json_object_new_object();
json_object_string_add(json_server, "mode",
@@ -1405,6 +1433,8 @@ DEFPY (show_rpki_cache_server,
json_object_string_add(
json_server, "port",
cache->tr_config.tcp_config->port);
+ json_object_int_add(json_server, "preference",
+ cache->preference);
json_object_array_add(json_servers,
json_server);
}
@@ -1413,14 +1443,15 @@ DEFPY (show_rpki_cache_server,
} else if (cache->type == SSH) {
if (!json) {
vty_out(vty,
- "host: %s port: %d username: %s server_hostkey_path: %s client_privkey_path: %s\n",
+ "host: %s port: %d username: %s server_hostkey_path: %s client_privkey_path: %s, preference: %hhu\n",
cache->tr_config.ssh_config->host,
cache->tr_config.ssh_config->port,
cache->tr_config.ssh_config->username,
cache->tr_config.ssh_config
->server_hostkey_path,
cache->tr_config.ssh_config
- ->client_privkey_path);
+ ->client_privkey_path,
+ cache->preference);
} else {
json_server = json_object_new_object();
json_object_string_add(json_server, "mode",
@@ -1442,6 +1473,8 @@ DEFPY (show_rpki_cache_server,
json_server, "clientPrivkeyPath",
cache->tr_config.ssh_config
->client_privkey_path);
+ json_object_int_add(json_server, "preference",
+ cache->preference);
json_object_array_add(json_servers,
json_server);
}
@@ -1662,6 +1695,8 @@ static void install_cli_commands(void)
install_default(RPKI_NODE);
install_element(CONFIG_NODE, &rpki_cmd);
install_element(ENABLE_NODE, &rpki_cmd);
+ install_element(CONFIG_NODE, &no_rpki_cmd);
+
install_element(ENABLE_NODE, &bgp_rpki_start_cmd);
install_element(ENABLE_NODE, &bgp_rpki_stop_cmd);
diff --git a/bgpd/bgp_updgrp.c b/bgpd/bgp_updgrp.c
index f113eccd39..c2b6632643 100644
--- a/bgpd/bgp_updgrp.c
+++ b/bgpd/bgp_updgrp.c
@@ -421,8 +421,10 @@ static unsigned int updgrp_hash_key_make(const void *p)
if (bgp_debug_neighbor_events(peer)) {
zlog_debug(
- "%pBP Update Group Hash: sort: %d UpdGrpFlags: %u UpdGrpAFFlags: %u",
- peer, peer->sort, peer->flags & PEER_UPDGRP_FLAGS,
+ "%pBP Update Group Hash: sort: %d UpdGrpFlags: %" PRIu64
+ " UpdGrpAFFlags: %u",
+ peer, peer->sort,
+ (uint64_t)(peer->flags & PEER_UPDGRP_FLAGS),
flags & PEER_UPDGRP_AF_FLAGS);
zlog_debug(
"%pBP Update Group Hash: addpath: %u UpdGrpCapFlag: %u UpdGrpCapAFFlag: %u route_adv: %u change local as: %u",
@@ -462,8 +464,10 @@ static unsigned int updgrp_hash_key_make(const void *p)
peer->shared_network &&
peer_afi_active_nego(peer, AFI_IP6));
zlog_debug(
- "%pBP Update Group Hash: Lonesoul: %u ORF prefix: %u ORF old: %u max prefix out: %u",
- peer, CHECK_FLAG(peer->flags, PEER_FLAG_LONESOUL),
+ "%pBP Update Group Hash: Lonesoul: %" PRIu64
+ " ORF prefix: %u ORF old: %u max prefix out: %u",
+ peer,
+ (uint64_t)CHECK_FLAG(peer->flags, PEER_FLAG_LONESOUL),
CHECK_FLAG(peer->af_cap[afi][safi],
PEER_CAP_ORF_PREFIX_SM_RCV),
CHECK_FLAG(peer->af_cap[afi][safi],
diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c
index baa894d67d..9d8328a794 100644
--- a/bgpd/bgp_vty.c
+++ b/bgpd/bgp_vty.c
@@ -11770,7 +11770,6 @@ static void bgp_show_neighbor_graceful_restart_time(struct vty *vty,
static void bgp_show_peer_gr_status(struct vty *vty, struct peer *p,
bool use_json, json_object *json)
{
- char buf[SU_ADDRSTRLEN] = {0};
char dn_flag[2] = {0};
/* '*' + v6 address of neighbor */
char neighborAddr[INET6_ADDRSTRLEN + 1] = {0};
@@ -11780,18 +11779,11 @@ static void bgp_show_peer_gr_status(struct vty *vty, struct peer *p,
if (p->conf_if) {
if (use_json)
- json_object_string_add(
- json, "neighborAddr",
- BGP_PEER_SU_UNSPEC(p)
- ? "none"
- : sockunion2str(&p->su, buf,
- SU_ADDRSTRLEN));
+ json_object_string_addf(json, "neighborAddr", "%pSU",
+ &p->su);
else
- vty_out(vty, "BGP neighbor on %s: %s\n", p->conf_if,
- BGP_PEER_SU_UNSPEC(p)
- ? "none"
- : sockunion2str(&p->su, buf,
- SU_ADDRSTRLEN));
+ vty_out(vty, "BGP neighbor on %s: %pSU\n", p->conf_if,
+ &p->su);
} else {
snprintf(neighborAddr, sizeof(neighborAddr), "%s%s", dn_flag,
p->host);
@@ -12459,7 +12451,7 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
json_object *json)
{
struct bgp *bgp;
- char buf1[PREFIX2STR_BUFFER], buf[SU_ADDRSTRLEN];
+ char buf1[PREFIX2STR_BUFFER];
char timebuf[BGP_UPTIME_LEN];
char dn_flag[2];
afi_t afi;
@@ -12481,11 +12473,8 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
if (!use_json) {
if (p->conf_if) /* Configured interface name. */
- vty_out(vty, "BGP neighbor on %s: %s, ", p->conf_if,
- BGP_PEER_SU_UNSPEC(p)
- ? "None"
- : sockunion2str(&p->su, buf,
- SU_ADDRSTRLEN));
+ vty_out(vty, "BGP neighbor on %s: %pSU, ", p->conf_if,
+ &p->su);
else /* Configured IP address. */
vty_out(vty, "BGP neighbor is %s%s, ", dn_flag,
p->host);
@@ -12496,9 +12485,8 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
json_object_string_add(json_neigh, "bgpNeighborAddr",
"none");
else if (p->conf_if && !BGP_PEER_SU_UNSPEC(p))
- json_object_string_add(
- json_neigh, "bgpNeighborAddr",
- sockunion2str(&p->su, buf, SU_ADDRSTRLEN));
+ json_object_string_addf(json_neigh, "bgpNeighborAddr",
+ "%pSU", &p->su);
json_object_int_add(json_neigh, "remoteAs", p->as);
@@ -13950,10 +13938,9 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
"updateSource",
p->update_if);
else if (p->update_source)
- json_object_string_add(
- json_neigh, "updateSource",
- sockunion2str(p->update_source, buf1,
- SU_ADDRSTRLEN));
+ json_object_string_addf(json_neigh,
+ "updateSource", "%pSU",
+ p->update_source);
}
} else {
/* advertisement-interval */
@@ -13967,9 +13954,7 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
if (p->update_if)
vty_out(vty, "%s", p->update_if);
else if (p->update_source)
- vty_out(vty, "%s",
- sockunion2str(p->update_source, buf1,
- SU_ADDRSTRLEN));
+ vty_out(vty, "%pSU", p->update_source);
vty_out(vty, "\n");
}
@@ -14125,30 +14110,38 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
/* Local address. */
if (p->su_local) {
if (use_json) {
- json_object_string_add(json_neigh, "hostLocal",
- sockunion2str(p->su_local, buf1,
- SU_ADDRSTRLEN));
+ json_object_string_addf(json_neigh, "hostLocal", "%pSU",
+ p->su_local);
json_object_int_add(json_neigh, "portLocal",
ntohs(p->su_local->sin.sin_port));
} else
- vty_out(vty, "Local host: %s, Local port: %d\n",
- sockunion2str(p->su_local, buf1, SU_ADDRSTRLEN),
- ntohs(p->su_local->sin.sin_port));
+ vty_out(vty, "Local host: %pSU, Local port: %d\n",
+ p->su_local, ntohs(p->su_local->sin.sin_port));
+ } else {
+ if (use_json) {
+ json_object_string_add(json_neigh, "hostLocal",
+ "Unknown");
+ json_object_int_add(json_neigh, "portLocal", -1);
+ }
}
/* Remote address. */
if (p->su_remote) {
if (use_json) {
- json_object_string_add(json_neigh, "hostForeign",
- sockunion2str(p->su_remote, buf1,
- SU_ADDRSTRLEN));
+ json_object_string_addf(json_neigh, "hostForeign",
+ "%pSU", p->su_remote);
json_object_int_add(json_neigh, "portForeign",
ntohs(p->su_remote->sin.sin_port));
} else
- vty_out(vty, "Foreign host: %s, Foreign port: %d\n",
- sockunion2str(p->su_remote, buf1,
- SU_ADDRSTRLEN),
+ vty_out(vty, "Foreign host: %pSU, Foreign port: %d\n",
+ p->su_remote,
ntohs(p->su_remote->sin.sin_port));
+ } else {
+ if (use_json) {
+ json_object_string_add(json_neigh, "hostForeign",
+ "Unknown");
+ json_object_int_add(json_neigh, "portForeign", -1);
+ }
}
/* Nexthop display. */
@@ -16630,7 +16623,6 @@ static void bgp_config_write_peer_global(struct vty *vty, struct bgp *bgp,
struct peer *peer)
{
struct peer *g_peer = NULL;
- char buf[SU_ADDRSTRLEN];
char *addr;
int if_pg_printed = false;
int if_ras_printed = false;
@@ -16832,9 +16824,8 @@ static void bgp_config_write_peer_global(struct vty *vty, struct bgp *bgp,
/* update-source */
if (peergroup_flag_check(peer, PEER_FLAG_UPDATE_SOURCE)) {
if (peer->update_source)
- vty_out(vty, " neighbor %s update-source %s\n", addr,
- sockunion2str(peer->update_source, buf,
- SU_ADDRSTRLEN));
+ vty_out(vty, " neighbor %s update-source %pSU\n", addr,
+ peer->update_source);
else if (peer->update_if)
vty_out(vty, " neighbor %s update-source %s\n", addr,
peer->update_if);
diff --git a/bgpd/bgp_zebra.c b/bgpd/bgp_zebra.c
index c1b388b05d..fc7590dcc2 100644
--- a/bgpd/bgp_zebra.c
+++ b/bgpd/bgp_zebra.c
@@ -1132,6 +1132,7 @@ static bool update_ipv4nh_for_route_install(int nh_othervrf, struct bgp *nh_bgp,
api_nh->type = NEXTHOP_TYPE_IPV4;
else {
api_nh->type = NEXTHOP_TYPE_IPV4_IFINDEX;
+ SET_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_EVPN);
SET_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_ONLINK);
api_nh->ifindex = nh_bgp->l3vni_svi_ifindex;
}
@@ -1170,6 +1171,7 @@ static bool update_ipv6nh_for_route_install(int nh_othervrf, struct bgp *nh_bgp,
api_nh->type = NEXTHOP_TYPE_IPV6;
else {
api_nh->type = NEXTHOP_TYPE_IPV6_IFINDEX;
+ SET_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_EVPN);
SET_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_ONLINK);
api_nh->ifindex = nh_bgp->l3vni_svi_ifindex;
}
@@ -1268,7 +1270,6 @@ void bgp_zebra_announce(struct bgp_dest *dest, const struct prefix *p,
mpls_label_t label;
struct bgp_sid_info *sid_info;
int nh_othervrf = 0;
- bool is_evpn;
bool nh_updated = false;
bool do_wt_ecmp;
uint64_t cum_bw = 0;
@@ -1319,11 +1320,6 @@ void bgp_zebra_announce(struct bgp_dest *dest, const struct prefix *p,
tag = info->attr->tag;
- /* If the route's source is EVPN, flag as such. */
- is_evpn = is_route_parent_evpn(info);
- if (is_evpn)
- SET_FLAG(api.flags, ZEBRA_FLAG_EVPN_ROUTE);
-
if (peer->sort == BGP_PEER_IBGP || peer->sort == BGP_PEER_CONFED
|| info->sub_type == BGP_ROUTE_AGGREGATE) {
SET_FLAG(api.flags, ZEBRA_FLAG_IBGP);
@@ -1364,6 +1360,7 @@ void bgp_zebra_announce(struct bgp_dest *dest, const struct prefix *p,
for (; mpinfo; mpinfo = bgp_path_info_mpath_next(mpinfo)) {
uint32_t nh_weight;
+ bool is_evpn;
if (valid_nh_count >= multipath_num)
break;
@@ -1430,6 +1427,8 @@ void bgp_zebra_announce(struct bgp_dest *dest, const struct prefix *p,
BGP_ORIGINAL_UPDATE(bgp_orig, mpinfo, bgp);
if (nh_family == AF_INET) {
+ is_evpn = is_route_parent_evpn(mpinfo);
+
nh_updated = update_ipv4nh_for_route_install(
nh_othervrf, bgp_orig,
&mpinfo_cp->attr->nexthop, mpinfo_cp->attr,
@@ -1441,6 +1440,8 @@ void bgp_zebra_announce(struct bgp_dest *dest, const struct prefix *p,
nexthop = bgp_path_info_to_ipv6_nexthop(mpinfo_cp,
&ifindex);
+ is_evpn = is_route_parent_evpn(mpinfo);
+
if (!nexthop)
nh_updated = update_ipv4nh_for_route_install(
nh_othervrf, bgp_orig,
@@ -1465,9 +1466,9 @@ void bgp_zebra_announce(struct bgp_dest *dest, const struct prefix *p,
|| mpinfo->peer->sort == BGP_PEER_CONFED))
allow_recursion = true;
- if (mpinfo->extra
- && bgp_is_valid_label(&mpinfo->extra->label[0])
- && !CHECK_FLAG(api.flags, ZEBRA_FLAG_EVPN_ROUTE)) {
+ if (mpinfo->extra &&
+ bgp_is_valid_label(&mpinfo->extra->label[0]) &&
+ !CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_EVPN)) {
mpls_lse_decode(mpinfo->extra->label[0], &label, &ttl,
&exp, &bos);
@@ -1485,8 +1486,8 @@ void bgp_zebra_announce(struct bgp_dest *dest, const struct prefix *p,
api_nh->weight = nh_weight;
- if (mpinfo->extra && !sid_zero(&mpinfo->extra->sid[0].sid)
- && !CHECK_FLAG(api.flags, ZEBRA_FLAG_EVPN_ROUTE)) {
+ if (mpinfo->extra && !sid_zero(&mpinfo->extra->sid[0].sid) &&
+ !CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_EVPN)) {
sid_info = &mpinfo->extra->sid[0];
memcpy(&api_nh->seg6_segs, &sid_info->sid,
@@ -1613,19 +1614,21 @@ void bgp_zebra_announce(struct bgp_dest *dest, const struct prefix *p,
label_buf[0] = '\0';
eth_buf[0] = '\0';
segs_buf[0] = '\0';
- if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_LABEL)
- && !CHECK_FLAG(api.flags, ZEBRA_FLAG_EVPN_ROUTE))
+ if (CHECK_FLAG(api_nh->flags,
+ ZAPI_NEXTHOP_FLAG_LABEL) &&
+ !CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_EVPN))
snprintf(label_buf, sizeof(label_buf),
"label %u", api_nh->labels[0]);
- if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_SEG6)
- && !CHECK_FLAG(api.flags, ZEBRA_FLAG_EVPN_ROUTE)) {
+ if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_SEG6) &&
+ !CHECK_FLAG(api_nh->flags,
+ ZAPI_NEXTHOP_FLAG_EVPN)) {
inet_ntop(AF_INET6, &api_nh->seg6_segs,
sid_buf, sizeof(sid_buf));
snprintf(segs_buf, sizeof(segs_buf), "segs %s",
sid_buf);
}
- if (CHECK_FLAG(api.flags, ZEBRA_FLAG_EVPN_ROUTE)
- && !is_zero_mac(&api_nh->rmac))
+ if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_EVPN) &&
+ !is_zero_mac(&api_nh->rmac))
snprintf(eth_buf, sizeof(eth_buf), " RMAC %s",
prefix_mac2str(&api_nh->rmac,
buf1, sizeof(buf1)));
@@ -1730,10 +1733,6 @@ void bgp_zebra_withdraw(const struct prefix *p, struct bgp_path_info *info,
api.tableid = info->attr->rmap_table_id;
}
- /* If the route's source is EVPN, flag as such. */
- if (is_route_parent_evpn(info))
- SET_FLAG(api.flags, ZEBRA_FLAG_EVPN_ROUTE);
-
if (bgp_debug_zebra(p))
zlog_debug("Tx route delete VRF %u %pFX", bgp->vrf_id,
&api.prefix);
diff --git a/bgpd/bgpd.h b/bgpd/bgpd.h
index 26ac7a4c41..9d4d5bdee5 100644
--- a/bgpd/bgpd.h
+++ b/bgpd/bgpd.h
@@ -1295,7 +1295,7 @@ struct peer {
* peer-group, the peer-specific overrides (see flags_override and
* flags_invert) must be respected.
*/
- uint32_t flags;
+ uint64_t flags;
#define PEER_FLAG_PASSIVE (1U << 0) /* passive mode */
#define PEER_FLAG_SHUTDOWN (1U << 1) /* shutdown */
#define PEER_FLAG_DONT_CAPABILITY (1U << 2) /* dont-capability */
diff --git a/bgpd/rfapi/vnc_zebra.c b/bgpd/rfapi/vnc_zebra.c
index 672a0e9780..293f88d1df 100644
--- a/bgpd/rfapi/vnc_zebra.c
+++ b/bgpd/rfapi/vnc_zebra.c
@@ -628,7 +628,6 @@ static void vnc_zebra_add_del_nve(struct bgp *bgp, struct rfapi_descriptor *rfd,
struct rfapi_nve_group_cfg *rfg = rfd->rfg;
afi_t afi = family2afi(rfd->vn_addr.addr_family);
struct prefix nhp;
- // struct prefix *nhpp;
void *pAddr;
vnc_zlog_debug_verbose("%s: entry, add=%d", __func__, add);
@@ -660,7 +659,7 @@ static void vnc_zebra_add_del_nve(struct bgp *bgp, struct rfapi_descriptor *rfd,
return;
}
- pAddr = &nhp.u.prefix4;
+ pAddr = &nhp.u.val;
/*
* Loop over the list of NVE-Groups configured for
diff --git a/debian/frr.postinst b/debian/frr.postinst
index 4e23cd3cec..eb9ec67dd9 100644
--- a/debian/frr.postinst
+++ b/debian/frr.postinst
@@ -40,7 +40,7 @@ find \
# don't chown anything that has ACLs (but don't fail if we don't
# have getfacl)
if { getfacl -c "$filename" 2>/dev/null || true; } \
- | egrep -q -v '^((user|group|other)::|$)'; then
+ | grep -E -q -v '^((user|group|other)::|$)'; then
:
else
chown frr: "$filename"
diff --git a/doc/user/installation.rst b/doc/user/installation.rst
index 401a1f2721..ba35facf2a 100644
--- a/doc/user/installation.rst
+++ b/doc/user/installation.rst
@@ -231,7 +231,8 @@ options from the list below.
Enable the support of Linux Realms. Convert tag values from 1-255 into a
realm value when inserting into the Linux kernel. Then routing policy can be
- assigned to the realm. See the tc man page.
+ assigned to the realm. See the tc man page. This option is currently not
+ compatible with the usage of nexthop groups in the linux kernel itself.
.. option:: --disable-irdp
diff --git a/doc/user/ospfd.rst b/doc/user/ospfd.rst
index e438d04e93..26810bd883 100644
--- a/doc/user/ospfd.rst
+++ b/doc/user/ospfd.rst
@@ -798,6 +798,8 @@ Showing Information
.. clicmd:: show ip ospf neighbor detail [json]
+.. clicmd:: show ip ospf neighbor A.B.C.D [detail] [json]
+
.. clicmd:: show ip ospf neighbor INTERFACE detail [json]
Display lsa information of LSDB.
diff --git a/doc/user/routemap.rst b/doc/user/routemap.rst
index 754b709173..05c9eeb755 100644
--- a/doc/user/routemap.rst
+++ b/doc/user/routemap.rst
@@ -244,7 +244,9 @@ Route Map Set Command
Set a tag on the matched route. This tag value can be from (1-4294967295).
Additionally if you have compiled with the :option:`--enable-realms`
configure option. Tag values from (1-255) are sent to the Linux kernel as a
- realm value. Then route policy can be applied. See the tc man page.
+ realm value. Then route policy can be applied. See the tc man page. As
+ a note realms cannot currently be used with the installation of nexthops
+ as nexthop groups in the linux kernel.
.. clicmd:: set ip next-hop IPV4_ADDRESS
diff --git a/doc/user/rpki.rst b/doc/user/rpki.rst
index e5bd59d9cb..cc0e7f70c6 100644
--- a/doc/user/rpki.rst
+++ b/doc/user/rpki.rst
@@ -120,7 +120,7 @@ The following commands are independent of a specific cache server.
The default value is 600 seconds.
-.. clicmd:: rpki cache (A.B.C.D|WORD) PORT [SSH_USERNAME] [SSH_PRIVKEY_PATH] [SSH_PUBKEY_PATH] [KNOWN_HOSTS_PATH] [source A.B.C.D] PREFERENCE
+.. clicmd:: rpki cache (A.B.C.D|WORD) PORT [SSH_USERNAME] [SSH_PRIVKEY_PATH] [KNOWN_HOSTS_PATH] [source A.B.C.D] preference (1-255)
Add a cache server to the socket. By default, the connection between router
@@ -137,15 +137,9 @@ The following commands are independent of a specific cache server.
SSH_USERNAME
SSH username to establish an SSH connection to the cache server.
-
SSH_PRIVKEY_PATH
Local path that includes the private key file of the router.
-
- SSH_PUBKEY_PATH
- Local path that includes the public key file of the router.
-
-
KNOWN_HOSTS_PATH
Local path that includes the known hosts file. The default value depends
on the configuration of the operating system environment, usually
diff --git a/doc/user/zebra.rst b/doc/user/zebra.rst
index 29f305520a..eca67c0609 100644
--- a/doc/user/zebra.rst
+++ b/doc/user/zebra.rst
@@ -273,6 +273,12 @@ Nexthop tracking doesn't resolve nexthops via the default route by default.
Allowing this might be useful when e.g. you want to allow BGP to peer across
the default route.
+.. clicmd:: zebra nexthop-group keep (1-3600)
+
+ Set the time that zebra will keep a created and installed nexthop group
+ before removing it from the system if the nexthop group is no longer
+ being used. The default time is 180 seconds.
+
.. clicmd:: ip nht resolve-via-default
Allow IPv4 nexthop tracking to resolve via the default route. This parameter
diff --git a/eigrpd/eigrp_vty.c b/eigrpd/eigrp_vty.c
index 0809ac2cf0..3d61294b22 100644
--- a/eigrpd/eigrp_vty.c
+++ b/eigrpd/eigrp_vty.c
@@ -240,14 +240,14 @@ DEFPY (show_ip_eigrp_interfaces,
struct eigrp *eigrp;
if (vrf && strncmp(vrf, "all", sizeof("all")) == 0) {
- struct vrf *vrf;
+ struct vrf *v;
- RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
- eigrp = eigrp_lookup(vrf->vrf_id);
+ RB_FOREACH (v, vrf_name_head, &vrfs_by_name) {
+ eigrp = eigrp_lookup(v->vrf_id);
if (!eigrp)
continue;
- vty_out(vty, "VRF %s:\n", vrf->name);
+ vty_out(vty, "VRF %s:\n", v->name);
eigrp_interface_helper(vty, eigrp, ifname, detail);
}
diff --git a/isisd/isis_bfd.c b/isisd/isis_bfd.c
index 5311a384e7..9e8267b6e8 100644
--- a/isisd/isis_bfd.c
+++ b/isisd/isis_bfd.c
@@ -206,7 +206,7 @@ static int bfd_handle_circuit_add_addr(struct isis_circuit *circuit)
struct isis_adjacency *adj;
struct listnode *node;
- if (circuit->area == 0)
+ if (circuit->area == NULL)
return 0;
for (ALL_LIST_ELEMENTS_RO(circuit->area->adjacency_list, node, adj)) {
diff --git a/isisd/isis_snmp.c b/isisd/isis_snmp.c
index 3590044f85..a184b5b30a 100644
--- a/isisd/isis_snmp.c
+++ b/isisd/isis_snmp.c
@@ -2117,7 +2117,7 @@ static uint8_t *isis_snmp_find_circ(struct variable *v, oid *name,
switch (v->magic) {
case ISIS_CIRC_IFINDEX:
- if (circuit->interface == 0)
+ if (circuit->interface == NULL)
return SNMP_INTEGER(0);
return SNMP_INTEGER(circuit->interface->ifindex);
diff --git a/lib/command.c b/lib/command.c
index c4db045633..cbecc81574 100644
--- a/lib/command.c
+++ b/lib/command.c
@@ -429,7 +429,6 @@ static char *zencrypt(const char *passwd)
{
char salt[6];
struct timeval tv;
- char *crypt(const char *, const char *);
gettimeofday(&tv, 0);
diff --git a/lib/if.c b/lib/if.c
index 89d30e4fb3..fa4fdb82d3 100644
--- a/lib/if.c
+++ b/lib/if.c
@@ -82,6 +82,8 @@ int if_cmp_name_func(const char *p1, const char *p2)
int res;
while (*p1 && *p2) {
+ char *tmp1, *tmp2;
+
/* look up to any number */
l1 = strcspn(p1, "0123456789");
l2 = strcspn(p2, "0123456789");
@@ -111,8 +113,8 @@ int if_cmp_name_func(const char *p1, const char *p2)
if (!*p2)
return 1;
- x1 = strtol(p1, (char **)&p1, 10);
- x2 = strtol(p2, (char **)&p2, 10);
+ x1 = strtol(p1, (char **)&tmp1, 10);
+ x2 = strtol(p2, (char **)&tmp2, 10);
/* let's compare numbers now */
if (x1 < x2)
@@ -120,6 +122,16 @@ int if_cmp_name_func(const char *p1, const char *p2)
if (x1 > x2)
return 1;
+ /* Compare string if numbers are equal (distinguish foo-1 from foo-001) */
+ l1 = strspn(p1, "0123456789");
+ l2 = strspn(p2, "0123456789");
+ if (l1 != l2)
+ return (strcmp(p1, p2));
+
+ /* Continue to parse the rest of the string */
+ p1 = (const char *)tmp1;
+ p2 = (const char *)tmp2;
+
/* numbers were equal, lets do it again..
(it happens with name like "eth123.456:789") */
}
diff --git a/lib/libfrr.c b/lib/libfrr.c
index 042c9d3704..f5aecd9f75 100644
--- a/lib/libfrr.c
+++ b/lib/libfrr.c
@@ -963,6 +963,8 @@ static void frr_daemonize(void)
}
close(fds[1]);
+ nb_terminate();
+ yang_terminate();
frr_daemon_wait(fds[0]);
}
diff --git a/lib/nexthop.h b/lib/nexthop.h
index c13f1b1376..f1309aa525 100644
--- a/lib/nexthop.h
+++ b/lib/nexthop.h
@@ -81,7 +81,7 @@ struct nexthop {
enum nexthop_types_t type;
- uint8_t flags;
+ uint16_t flags;
#define NEXTHOP_FLAG_ACTIVE (1 << 0) /* This nexthop is alive. */
#define NEXTHOP_FLAG_FIB (1 << 1) /* FIB nexthop. */
#define NEXTHOP_FLAG_RECURSIVE (1 << 2) /* Recursive nexthop. */
@@ -94,6 +94,8 @@ struct nexthop {
#define NEXTHOP_FLAG_RNH_FILTERED (1 << 5) /* rmap filtered, used by rnh */
#define NEXTHOP_FLAG_HAS_BACKUP (1 << 6) /* Backup nexthop index is set */
#define NEXTHOP_FLAG_SRTE (1 << 7) /* SR-TE color used for BGP traffic */
+#define NEXTHOP_FLAG_EVPN (1 << 8) /* nexthop is EVPN */
+#define NEXTHOP_FLAG_LINKDOWN (1 << 9) /* is not removed on link down */
#define NEXTHOP_IS_ACTIVE(flags) \
(CHECK_FLAG(flags, NEXTHOP_FLAG_ACTIVE) \
diff --git a/lib/nexthop_group.c b/lib/nexthop_group.c
index e8c678ad71..7284d6cea6 100644
--- a/lib/nexthop_group.c
+++ b/lib/nexthop_group.c
@@ -1094,12 +1094,10 @@ void nexthop_group_json_nexthop(json_object *j, const struct nexthop *nh)
static void nexthop_group_write_nexthop_internal(struct vty *vty,
const struct nexthop_hold *nh)
{
- char buf[100];
-
vty_out(vty, "nexthop");
if (nh->addr)
- vty_out(vty, " %s", sockunion2str(nh->addr, buf, sizeof(buf)));
+ vty_out(vty, " %pSU", nh->addr);
if (nh->intf)
vty_out(vty, " %s", nh->intf);
diff --git a/lib/prefix.h b/lib/prefix.h
index 42394ec61c..f9eef28a0b 100644
--- a/lib/prefix.h
+++ b/lib/prefix.h
@@ -594,6 +594,15 @@ static inline int is_default_host_route(const struct prefix *p)
return 0;
}
+static inline bool is_ipv6_global_unicast(const struct in6_addr *p)
+{
+ if (IN6_IS_ADDR_UNSPECIFIED(p) || IN6_IS_ADDR_LOOPBACK(p) ||
+ IN6_IS_ADDR_LINKLOCAL(p) || IN6_IS_ADDR_MULTICAST(p))
+ return false;
+
+ return true;
+}
+
/* IPv6 scope values, usable for IPv4 too (cf. below) */
/* clang-format off */
enum {
diff --git a/lib/sockunion.c b/lib/sockunion.c
index 97b198c018..eff38798cc 100644
--- a/lib/sockunion.c
+++ b/lib/sockunion.c
@@ -525,6 +525,11 @@ union sockunion *sockunion_getsockname(int fd)
sockunion_normalise_mapped(su);
return su;
}
+
+ flog_err(
+ EC_LIB_SOCKET,
+ "Unexpected AFI received(%d) for sockunion_getsockname call for fd: %d",
+ name.sa.sa_family, fd);
return NULL;
}
@@ -561,6 +566,11 @@ union sockunion *sockunion_getpeername(int fd)
sockunion_normalise_mapped(su);
return su;
}
+
+ flog_err(
+ EC_LIB_SOCKET,
+ "Unexpected AFI received(%d) for sockunion_getpeername call for fd: %d",
+ name.sa.sa_family, fd);
return NULL;
}
diff --git a/lib/vty.c b/lib/vty.c
index e0e2f39229..92db07677a 100644
--- a/lib/vty.c
+++ b/lib/vty.c
@@ -382,7 +382,6 @@ static void vty_auth(struct vty *vty, char *buf)
char *passwd = NULL;
enum node_type next_node = 0;
int fail;
- char *crypt(const char *, const char *);
switch (vty->node) {
case AUTH_NODE:
diff --git a/lib/zclient.c b/lib/zclient.c
index a933b6bb2b..e556b768ac 100644
--- a/lib/zclient.c
+++ b/lib/zclient.c
@@ -1038,7 +1038,7 @@ int zapi_nexthop_encode(struct stream *s, const struct zapi_nexthop *api_nh,
stream_putl(s, api_nh->weight);
/* Router MAC for EVPN routes. */
- if (CHECK_FLAG(api_flags, ZEBRA_FLAG_EVPN_ROUTE))
+ if (CHECK_FLAG(nh_flags, ZAPI_NEXTHOP_FLAG_EVPN))
stream_put(s, &(api_nh->rmac),
sizeof(struct ethaddr));
@@ -1402,7 +1402,7 @@ int zapi_nexthop_decode(struct stream *s, struct zapi_nexthop *api_nh,
STREAM_GETL(s, api_nh->weight);
/* Router MAC for EVPN routes. */
- if (CHECK_FLAG(api_flags, ZEBRA_FLAG_EVPN_ROUTE))
+ if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_EVPN))
STREAM_GET(&(api_nh->rmac), s,
sizeof(struct ethaddr));
@@ -1830,6 +1830,9 @@ int zapi_nexthop_from_nexthop(struct zapi_nexthop *znh,
if (CHECK_FLAG(nh->flags, NEXTHOP_FLAG_ONLINK))
SET_FLAG(znh->flags, ZAPI_NEXTHOP_FLAG_ONLINK);
+ if (CHECK_FLAG(nh->flags, NEXTHOP_FLAG_EVPN))
+ SET_FLAG(znh->flags, ZAPI_NEXTHOP_FLAG_EVPN);
+
if (nh->nh_label && (nh->nh_label->num_labels > 0)) {
/* Validate */
diff --git a/lib/zclient.h b/lib/zclient.h
index 9756923a69..c3ea2a16ff 100644
--- a/lib/zclient.h
+++ b/lib/zclient.h
@@ -452,6 +452,7 @@ struct zapi_nexthop {
#define ZAPI_NEXTHOP_FLAG_HAS_BACKUP 0x08 /* Nexthop has a backup */
#define ZAPI_NEXTHOP_FLAG_SEG6 0x10
#define ZAPI_NEXTHOP_FLAG_SEG6LOCAL 0x20
+#define ZAPI_NEXTHOP_FLAG_EVPN 0x40
/*
* ZAPI Nexthop Group. For use with protocol creation of nexthop groups.
diff --git a/nhrpd/nhrp_nhs.c b/nhrpd/nhrp_nhs.c
index 63eaf1e394..03b4b533bb 100644
--- a/nhrpd/nhrp_nhs.c
+++ b/nhrpd/nhrp_nhs.c
@@ -163,7 +163,6 @@ static void nhrp_reg_send_req(struct thread *t)
{
struct nhrp_registration *r = THREAD_ARG(t);
struct nhrp_nhs *nhs = r->nhs;
- char buf1[SU_ADDRSTRLEN], buf2[SU_ADDRSTRLEN];
struct interface *ifp = nhs->ifp;
struct nhrp_interface *nifp = ifp->info;
struct nhrp_afi_data *if_ad = &nifp->afi[nhs->afi];
@@ -189,10 +188,8 @@ static void nhrp_reg_send_req(struct thread *t)
if (sockunion_family(dst_proto) == AF_UNSPEC)
dst_proto = &if_ad->addr;
- sockunion2str(&if_ad->addr, buf1, sizeof(buf1));
- sockunion2str(dst_proto, buf2, sizeof(buf2));
- debugf(NHRP_DEBUG_COMMON, "NHS: Register %s -> %s (timeout %d)", buf1,
- buf2, r->timeout);
+ debugf(NHRP_DEBUG_COMMON, "NHS: Register %pSU -> %pSU (timeout %d)",
+ &if_ad->addr, dst_proto, r->timeout);
/* No protocol address configured for tunnel interface */
if (sockunion_family(&if_ad->addr) == AF_UNSPEC)
diff --git a/nhrpd/nhrp_peer.c b/nhrpd/nhrp_peer.c
index 67d12cbcf3..4b03032566 100644
--- a/nhrpd/nhrp_peer.c
+++ b/nhrpd/nhrp_peer.c
@@ -1083,7 +1083,6 @@ err:
static void nhrp_packet_debug(struct zbuf *zb, const char *dir)
{
- char buf[2][SU_ADDRSTRLEN];
union sockunion src_nbma, src_proto, dst_proto;
struct nhrp_packet_header *hdr;
struct zbuf zhdr;
@@ -1095,14 +1094,12 @@ static void nhrp_packet_debug(struct zbuf *zb, const char *dir)
zbuf_init(&zhdr, zb->buf, zb->tail - zb->buf, zb->tail - zb->buf);
hdr = nhrp_packet_pull(&zhdr, &src_nbma, &src_proto, &dst_proto);
- sockunion2str(&src_proto, buf[0], sizeof(buf[0]));
- sockunion2str(&dst_proto, buf[1], sizeof(buf[1]));
-
reply = packet_types[hdr->type].type == PACKET_REPLY;
- debugf(NHRP_DEBUG_COMMON, "%s %s(%d) %s -> %s", dir,
+ debugf(NHRP_DEBUG_COMMON, "%s %s(%d) %pSU -> %pSU", dir,
(packet_types[hdr->type].name ? packet_types[hdr->type].name
: "Unknown"),
- hdr->type, reply ? buf[1] : buf[0], reply ? buf[0] : buf[1]);
+ hdr->type, reply ? &dst_proto : &src_proto,
+ reply ? &src_proto : &dst_proto);
}
static int proto2afi(uint16_t proto)
diff --git a/nhrpd/nhrp_shortcut.c b/nhrpd/nhrp_shortcut.c
index 71b6dd8702..4975aca006 100644
--- a/nhrpd/nhrp_shortcut.c
+++ b/nhrpd/nhrp_shortcut.c
@@ -48,23 +48,17 @@ static void nhrp_shortcut_do_expire(struct thread *t)
static void nhrp_shortcut_cache_notify(struct notifier_block *n,
unsigned long cmd)
{
- char buf2[PREFIX_STRLEN];
-
struct nhrp_shortcut *s =
container_of(n, struct nhrp_shortcut, cache_notifier);
struct nhrp_cache *c = s->cache;
- if (c)
- sockunion2str(&c->remote_addr, buf2, sizeof(buf2));
- else
- snprintf(buf2, sizeof(buf2), "(unspec)");
switch (cmd) {
case NOTIFY_CACHE_UP:
if (!s->route_installed) {
debugf(NHRP_DEBUG_ROUTE,
- "Shortcut: route install %pFX nh %s dev %s",
- s->p, buf2, c && c->ifp ?
- c->ifp->name : "<unk>");
+ "Shortcut: route install %pFX nh %pSU dev %s",
+ s->p, &c->remote_addr,
+ c && c->ifp ? c->ifp->name : "<unk>");
nhrp_route_announce(1, s->type, s->p, c ? c->ifp : NULL,
c ? &c->remote_addr : NULL, 0);
diff --git a/nhrpd/nhrp_vty.c b/nhrpd/nhrp_vty.c
index 4db2d8b89e..3a8baa2342 100644
--- a/nhrpd/nhrp_vty.c
+++ b/nhrpd/nhrp_vty.c
@@ -1001,18 +1001,15 @@ static void show_dmvpn_entry(struct nhrp_vc *vc, void *ctx)
{
struct dmvpn_cfg *ctxt = ctx;
struct vty *vty;
- char buf[2][SU_ADDRSTRLEN];
struct json_object *json = NULL;
if (!ctxt || !ctxt->vty)
return;
vty = ctxt->vty;
- sockunion2str(&vc->local.nbma, buf[0], sizeof(buf[0]));
- sockunion2str(&vc->remote.nbma, buf[1], sizeof(buf[1]));
if (ctxt->json) {
json = json_object_new_object();
- json_object_string_add(json, "src", buf[0]);
- json_object_string_add(json, "dst", buf[1]);
+ json_object_string_addf(json, "src", "%pSU", &vc->local.nbma);
+ json_object_string_addf(json, "dst", "%pSU", &vc->remote.nbma);
if (notifier_active(&vc->notifier_list))
json_object_boolean_true_add(json, "notifierActive");
@@ -1023,9 +1020,10 @@ static void show_dmvpn_entry(struct nhrp_vc *vc, void *ctx)
json_object_string_add(json, "identity", vc->remote.id);
json_object_array_add(ctxt->json, json);
} else {
- vty_out(vty, "%-24s %-24s %c %-4d %-24s\n",
- buf[0], buf[1], notifier_active(&vc->notifier_list) ?
- 'n' : ' ', vc->ipsec, vc->remote.id);
+ vty_out(vty, "%-24pSU %-24pSU %c %-4d %-24s\n",
+ &vc->local.nbma, &vc->remote.nbma,
+ notifier_active(&vc->notifier_list) ? 'n' : ' ',
+ vc->ipsec, vc->remote.id);
}
}
diff --git a/nhrpd/zbuf.c b/nhrpd/zbuf.c
index 9cc2b56245..3d54f4ed50 100644
--- a/nhrpd/zbuf.c
+++ b/nhrpd/zbuf.c
@@ -89,7 +89,7 @@ ssize_t zbuf_read(struct zbuf *zb, int fd, size_t maxlen)
zb->tail += r;
else if (r == 0)
r = -2;
- else if (r < 0 && ERRNO_IO_RETRY(errno))
+ else if (ERRNO_IO_RETRY(errno))
r = 0;
return r;
@@ -109,7 +109,7 @@ ssize_t zbuf_write(struct zbuf *zb, int fd)
zbuf_reset(zb);
} else if (r == 0)
r = -2;
- else if (r < 0 && ERRNO_IO_RETRY(errno))
+ else if (ERRNO_IO_RETRY(errno))
r = 0;
return r;
@@ -128,7 +128,7 @@ ssize_t zbuf_recv(struct zbuf *zb, int fd)
zb->tail += r;
else if (r == 0)
r = -2;
- else if (r < 0 && ERRNO_IO_RETRY(errno))
+ else if (ERRNO_IO_RETRY(errno))
r = 0;
return r;
}
diff --git a/ospf6d/ospf6_auth_trailer.c b/ospf6d/ospf6_auth_trailer.c
index 77ac4a1877..e54f6784e8 100644
--- a/ospf6d/ospf6_auth_trailer.c
+++ b/ospf6d/ospf6_auth_trailer.c
@@ -120,7 +120,13 @@ void ospf6_auth_hdr_dump_recv(struct ospf6_header *ospfh, uint16_t length,
ospf6_at_hdr =
(struct ospf6_auth_hdr *)((uint8_t *)ospfh + oh_len);
at_hdr_len = ntohs(ospf6_at_hdr->length);
- hash_len = at_hdr_len - OSPF6_AUTH_HDR_MIN_SIZE;
+ hash_len = at_hdr_len - (uint16_t)OSPF6_AUTH_HDR_MIN_SIZE;
+ if (hash_len > KEYCHAIN_MAX_HASH_SIZE) {
+ zlog_debug(
+ "Specified value for hash_len %u is greater than expected %u",
+ hash_len, KEYCHAIN_MAX_HASH_SIZE);
+ return;
+ }
memcpy(temp, ospf6_at_hdr->data, hash_len);
temp[hash_len] = '\0';
zlog_debug("OSPF6 Authentication Trailer");
diff --git a/ospf6d/ospf6_interface.c b/ospf6d/ospf6_interface.c
index 2a503c6233..efa5d2b7ab 100644
--- a/ospf6d/ospf6_interface.c
+++ b/ospf6d/ospf6_interface.c
@@ -165,8 +165,6 @@ static uint32_t ospf6_interface_get_cost(struct ospf6_interface *oi)
cost = (uint32_t)((double)refbw / (double)bw + (double)0.5);
if (cost < 1)
cost = 1;
- else if (cost > UINT32_MAX)
- cost = UINT32_MAX;
}
return cost;
diff --git a/ospf6d/ospf6_zebra.c b/ospf6d/ospf6_zebra.c
index 8366716585..f1a4209a8e 100644
--- a/ospf6d/ospf6_zebra.c
+++ b/ospf6d/ospf6_zebra.c
@@ -432,9 +432,12 @@ static void ospf6_zebra_route_update(int type, struct ospf6_route *request,
}
/* If removing is the best path and if there's another path,
- treat this request as add the secondary path */
- if (type == REM && ospf6_route_is_best(request) && request->next
- && ospf6_route_is_same(request, request->next)) {
+ * treat this request as add the secondary path - if there are
+ * nexthops.
+ */
+ if (type == REM && ospf6_route_is_best(request) && request->next &&
+ ospf6_route_is_same(request, request->next) &&
+ ospf6_route_num_nexthops(request->next) > 0) {
if (IS_OSPF6_DEBUG_ZEBRA(SEND))
zlog_debug(
" Best-path removal resulted Secondary addition");
@@ -452,9 +455,12 @@ static void ospf6_zebra_route_update(int type, struct ospf6_route *request,
nhcount = ospf6_route_num_nexthops(request);
if (nhcount == 0) {
- if (IS_OSPF6_DEBUG_ZEBRA(SEND))
- zlog_debug(" No nexthop, ignore");
- return;
+ if (type == ADD) {
+ if (IS_OSPF6_DEBUG_ZEBRA(SEND))
+ zlog_debug(" No nexthop, ignore");
+ return;
+ } else if (IS_OSPF6_DEBUG_ZEBRA(SEND))
+ zlog_debug(" No nexthop, rem ok");
}
dest = &request->prefix;
@@ -464,17 +470,20 @@ static void ospf6_zebra_route_update(int type, struct ospf6_route *request,
api.type = ZEBRA_ROUTE_OSPF6;
api.safi = SAFI_UNICAST;
api.prefix = *dest;
- SET_FLAG(api.message, ZAPI_MESSAGE_NEXTHOP);
if (nhcount > ospf6->max_multipath) {
if (IS_OSPF6_DEBUG_ZEBRA(SEND))
zlog_debug(
" Nexthop count is greater than configured maximum-path, hence ignore the extra nexthops");
}
+
api.nexthop_num = MIN(nhcount, ospf6->max_multipath);
+ if (api.nexthop_num > 0) {
+ SET_FLAG(api.message, ZAPI_MESSAGE_NEXTHOP);
+ ospf6_route_zebra_copy_nexthops(request, api.nexthops,
+ api.nexthop_num, api.vrf_id);
+ }
- ospf6_route_zebra_copy_nexthops(request, api.nexthops, api.nexthop_num,
- api.vrf_id);
SET_FLAG(api.message, ZAPI_MESSAGE_METRIC);
api.metric = (request->path.metric_type == 2 ? request->path.u.cost_e2
: request->path.cost);
diff --git a/ospfclient/README b/ospfclient/README
index 894cd783ca..5f6d050831 100644
--- a/ospfclient/README
+++ b/ospfclient/README
@@ -1,4 +1,3 @@
-For more information about this software check out:
-
-http://www.tik.ee.ethz.ch/~keller/ospfapi/
+For more information checkout the developer guide at:
+https://docs.frrouting.org/projects/dev-guide/en/latest/ospf-api.html
diff --git a/ospfclient/ospfclient.py b/ospfclient/ospfclient.py
index be8b51f007..c7cfc88b9c 100755
--- a/ospfclient/ospfclient.py
+++ b/ospfclient/ospfclient.py
@@ -3,7 +3,7 @@
#
# December 22 2021, Christian Hopps <chopps@labn.net>
#
-# Copyright 2021, LabN Consulting, L.L.C.
+# Copyright 2021-2022, LabN Consulting, L.L.C.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
@@ -54,6 +54,7 @@ MSG_DELETE_REQUEST = 6
MSG_SYNC_REACHABLE = 7
MSG_SYNC_ISM = 8
MSG_SYNC_NSM = 9
+MSG_SYNC_ROUTER_ID = 19
smsg_info = {
MSG_REGISTER_OPAQUETYPE: ("REGISTER_OPAQUETYPE", "BBxx"),
@@ -65,6 +66,7 @@ smsg_info = {
MSG_SYNC_REACHABLE: ("MSG_SYNC_REACHABLE", ""),
MSG_SYNC_ISM: ("MSG_SYNC_ISM", ""),
MSG_SYNC_NSM: ("MSG_SYNC_NSM", ""),
+ MSG_SYNC_ROUTER_ID: ("MSG_SYNC_ROUTER_ID", ""),
}
# --------------------------
@@ -80,6 +82,7 @@ MSG_DEL_IF = 15
MSG_ISM_CHANGE = 16
MSG_NSM_CHANGE = 17
MSG_REACHABLE_CHANGE = 18
+MSG_ROUTER_ID_CHANGE = 20
amsg_info = {
MSG_REPLY: ("REPLY", "bxxx"),
@@ -91,6 +94,7 @@ amsg_info = {
MSG_ISM_CHANGE: ("ISM_CHANGE", ">IIBxxx"),
MSG_NSM_CHANGE: ("NSM_CHANGE", ">IIIBxxx"),
MSG_REACHABLE_CHANGE: ("REACHABLE_CHANGE", ">HH"),
+ MSG_ROUTER_ID_CHANGE: ("ROUTER_ID_CHANGE", ">I"),
}
OSPF_API_OK = 0
@@ -536,6 +540,11 @@ class OspfApiClient:
logging.debug("SEND: %s: request NSM changes", self)
await self.msg_send_raises(MSG_SYNC_NSM)
+ async def req_router_id_sync(self):
+ "Request a dump of the current NSM states of all neighbors."
+ logging.debug("SEND: %s: request router ID sync", self)
+ await self.msg_send_raises(MSG_SYNC_ROUTER_ID)
+
class OspfOpaqueClient(OspfApiClient):
"""A client connection to OSPF Daemon for manipulating Opaque LSA data.
@@ -564,6 +573,7 @@ class OspfOpaqueClient(OspfApiClient):
MSG_ISM_CHANGE: self._if_change_msg,
MSG_NSM_CHANGE: self._nbr_change_msg,
MSG_REACHABLE_CHANGE: self._reachable_msg,
+ MSG_ROUTER_ID_CHANGE: self._router_id_msg,
}
super().__init__(server, handlers)
@@ -573,6 +583,9 @@ class OspfOpaqueClient(OspfApiClient):
LSA_TYPE_OPAQUE_AREA: {},
LSA_TYPE_OPAQUE_AS: {},
}
+ self.router_id = ip(0)
+ self.router_id_change_cb = None
+
self.lsid_seq_num = {}
self.lsa_change_cb = None
@@ -775,6 +788,25 @@ class OspfOpaqueClient(OspfApiClient):
logging.info("RECV: %s calling callback", api_msgname(mt))
await self.reachable_change_cb(router_ids[:nadd], router_ids[nadd:])
+ async def _router_id_msg(self, mt, msg, extra, router_id):
+ router_id = ip(router_id)
+ logging.info("RECV: %s router ID %s", api_msgname(mt), router_id)
+ old_router_id = self.router_id
+ if old_router_id == router_id:
+ return
+
+ self.router_id = router_id
+ logging.info(
+ "RECV: %s new router ID %s older router ID %s",
+ api_msgname(mt),
+ router_id,
+ old_router_id,
+ )
+
+ if self.router_id_change_cb:
+ logging.info("RECV: %s calling callback", api_msgname(mt))
+ await self.router_id_change_cb(router_id, old_router_id)
+
async def add_opaque_data(self, addr, lsa_type, otype, oid, data):
"""Add an instance of opaque data.
@@ -1022,6 +1054,25 @@ class OspfOpaqueClient(OspfApiClient):
self.nsm_change_cb = callback
await self.req_nsm_states()
+ async def monitor_router_id(self, callback=None):
+ """Monitor the OSPF router ID.
+
+ The property `router_id` contains the OSPF urouter ID.
+ This value is updated prior to calling the `callback`
+
+ Args:
+ callback: callback will be called when the router ID changes.
+ The callback signature is:
+
+ `callback(new_router_id, old_router_id)`
+
+ Args:
+ new_router_id: the new router ID
+ old_router_id: the old router ID
+ """
+ self.router_id_change_cb = callback
+ await self.req_router_id_sync()
+
# ================
# CLI/Script Usage
diff --git a/ospfd/ospf_api.c b/ospfd/ospf_api.c
index 99bc6c0b03..8636db450b 100644
--- a/ospfd/ospf_api.c
+++ b/ospfd/ospf_api.c
@@ -1,6 +1,7 @@
/*
* API message handling module for OSPF daemon and client.
* Copyright (C) 2001, 2002 Ralph Keller
+ * Copyright (c) 2022, LabN Consulting, L.L.C.
*
* This file is part of GNU Zebra.
*
@@ -682,4 +683,12 @@ struct msg *new_msg_reachable_change(uint32_t seqnum, uint16_t nadd,
return msg_new(MSG_REACHABLE_CHANGE, nmsg, seqnum, len);
}
+struct msg *new_msg_router_id_change(uint32_t seqnum, struct in_addr router_id)
+{
+ struct msg_router_id_change rmsg = {.router_id = router_id};
+
+ return msg_new(MSG_ROUTER_ID_CHANGE, &rmsg, seqnum,
+ sizeof(struct msg_router_id_change));
+}
+
#endif /* SUPPORT_OSPF_API */
diff --git a/ospfd/ospf_api.h b/ospfd/ospf_api.h
index 50b0c21c77..51c8c52ce5 100644
--- a/ospfd/ospf_api.h
+++ b/ospfd/ospf_api.h
@@ -1,6 +1,7 @@
/*
* API message handling module for OSPF daemon and client.
* Copyright (C) 2001, 2002 Ralph Keller
+ * Copyright (c) 2022, LabN Consulting, L.L.C.
*
* This file is part of GNU Zebra.
*
@@ -118,6 +119,7 @@ extern void msg_fifo_free(struct msg_fifo *fifo);
#define MSG_SYNC_REACHABLE 7
#define MSG_SYNC_ISM 8
#define MSG_SYNC_NSM 9
+#define MSG_SYNC_ROUTER_ID 19
/* Messages from OSPF daemon. */
#define MSG_REPLY 10
@@ -129,6 +131,7 @@ extern void msg_fifo_free(struct msg_fifo *fifo);
#define MSG_ISM_CHANGE 16
#define MSG_NSM_CHANGE 17
#define MSG_REACHABLE_CHANGE 18
+#define MSG_ROUTER_ID_CHANGE 20
struct msg_register_opaque_type {
uint8_t lsatype;
@@ -260,6 +263,10 @@ struct msg_reachable_change {
struct in_addr router_ids[]; /* add followed by remove */
};
+struct msg_router_id_change {
+ struct in_addr router_id; /* this systems router id */
+};
+
/* We make use of a union to define a structure that covers all
possible API messages. This allows us to find out how much memory
needs to be reserved for the largest API message. */
@@ -279,6 +286,7 @@ struct apimsg {
struct msg_nsm_change nsm_change;
struct msg_lsa_change_notify lsa_change_notify;
struct msg_reachable_change reachable_change;
+ struct msg_router_id_change router_id_change;
} u;
};
@@ -338,6 +346,9 @@ extern struct msg *new_msg_reachable_change(uint32_t seqnum, uint16_t nadd,
struct in_addr *add,
uint16_t nremove,
struct in_addr *remove);
+
+extern struct msg *new_msg_router_id_change(uint32_t seqnr,
+ struct in_addr router_id);
/* string printing functions */
extern const char *ospf_api_errname(int errcode);
extern const char *ospf_api_typename(int msgtype);
diff --git a/ospfd/ospf_apiserver.c b/ospfd/ospf_apiserver.c
index 7c3fef4536..9d73c3dfe6 100644
--- a/ospfd/ospf_apiserver.c
+++ b/ospfd/ospf_apiserver.c
@@ -1,6 +1,7 @@
/*
* Server side of OSPF API.
* Copyright (C) 2001, 2002 Ralph Keller
+ * Copyright (c) 2022, LabN Consulting, L.L.C.
*
* This file is part of GNU Zebra.
*
@@ -727,6 +728,7 @@ static int ospf_apiserver_send_msg(struct ospf_apiserver *apiserv,
case MSG_ISM_CHANGE:
case MSG_NSM_CHANGE:
case MSG_REACHABLE_CHANGE:
+ case MSG_ROUTER_ID_CHANGE:
fifo = apiserv->out_async_fifo;
fd = apiserv->fd_async;
event = OSPF_APISERVER_ASYNC_WRITE;
@@ -809,6 +811,9 @@ int ospf_apiserver_handle_msg(struct ospf_apiserver *apiserv, struct msg *msg)
case MSG_SYNC_NSM:
rc = ospf_apiserver_handle_sync_nsm(apiserv, msg);
break;
+ case MSG_SYNC_ROUTER_ID:
+ rc = ospf_apiserver_handle_sync_router_id(apiserv, msg);
+ break;
default:
zlog_warn("ospf_apiserver_handle_msg: Unknown message type: %d",
msg->hdr.msgtype);
@@ -1479,6 +1484,23 @@ int ospf_apiserver_handle_sync_nsm(struct ospf_apiserver *apiserv,
}
+int ospf_apiserver_handle_sync_router_id(struct ospf_apiserver *apiserv,
+ struct msg *msg)
+{
+ struct ospf *ospf = ospf_lookup_by_vrf_id(VRF_DEFAULT);
+ uint32_t seqnum = msg_get_seq(msg);
+ struct msg *m;
+ int _rc, rc = 0;
+
+ m = new_msg_router_id_change(seqnum, ospf->router_id);
+ rc = ospf_apiserver_send_msg(apiserv, m);
+ msg_free(m);
+
+ /* Send a reply back to client with return code */
+ _rc = ospf_apiserver_send_reply(apiserv, seqnum, rc);
+ return rc ? rc : _rc;
+}
+
/* -----------------------------------------------------------
* Following are functions to originate or update LSA
* from an application.
@@ -2679,4 +2701,19 @@ void ospf_apiserver_notify_reachable(struct route_table *ort,
}
+void ospf_apiserver_clients_notify_router_id_change(struct in_addr router_id)
+{
+ struct msg *msg;
+
+ msg = new_msg_router_id_change(0, router_id);
+ if (!msg) {
+ zlog_warn("%s: new_msg_router_id_change failed", __func__);
+ return;
+ }
+
+ ospf_apiserver_clients_notify_all(msg);
+ msg_free(msg);
+}
+
+
#endif /* SUPPORT_OSPF_API */
diff --git a/ospfd/ospf_apiserver.h b/ospfd/ospf_apiserver.h
index 7d728ead93..e28202e46f 100644
--- a/ospfd/ospf_apiserver.h
+++ b/ospfd/ospf_apiserver.h
@@ -125,6 +125,8 @@ extern void ospf_apiserver_clients_notify_new_if(struct ospf_interface *oi);
extern void ospf_apiserver_clients_notify_del_if(struct ospf_interface *oi);
extern void ospf_apiserver_clients_notify_ism_change(struct ospf_interface *oi);
extern void ospf_apiserver_clients_notify_nsm_change(struct ospf_neighbor *nbr);
+extern void
+ospf_apiserver_clients_notify_router_id_change(struct in_addr router_id);
extern int ospf_apiserver_is_ready_type9(struct ospf_interface *oi);
extern int ospf_apiserver_is_ready_type10(struct ospf_area *area);
@@ -157,6 +159,8 @@ extern int ospf_apiserver_handle_sync_ism(struct ospf_apiserver *apiserv,
struct msg *msg);
extern int ospf_apiserver_handle_sync_nsm(struct ospf_apiserver *apiserv,
struct msg *msg);
+extern int ospf_apiserver_handle_sync_router_id(struct ospf_apiserver *apiserv,
+ struct msg *msg);
extern void ospf_apiserver_notify_reachable(struct route_table *ort,
struct route_table *nrt);
diff --git a/ospfd/ospf_vty.c b/ospfd/ospf_vty.c
index ee665cd25c..5bb65b6abc 100644
--- a/ospfd/ospf_vty.c
+++ b/ospfd/ospf_vty.c
@@ -4335,12 +4335,11 @@ static void show_ip_ospf_neighbour_header(struct vty *vty)
"Address", "Interface", "RXmtL", "RqstL", "DBsmL");
}
-static void show_ip_ospf_neighbor_sub(struct vty *vty,
- struct ospf_interface *oi,
- json_object *json, bool use_json)
+static void show_ip_ospf_neighbour_brief(struct vty *vty,
+ struct ospf_neighbor *nbr,
+ struct ospf_neighbor *prev_nbr,
+ json_object *json, bool use_json)
{
- struct route_node *rn;
- struct ospf_neighbor *nbr, *prev_nbr = NULL;
char msgbuf[16];
char timebuf[OSPF_TIME_DUMP_SIZE];
json_object *json_neighbor = NULL, *json_neigh_array = NULL;
@@ -4348,177 +4347,147 @@ static void show_ip_ospf_neighbor_sub(struct vty *vty,
long time_val = 0;
char uptime[OSPF_TIME_DUMP_SIZE];
- for (rn = route_top(oi->nbrs); rn; rn = route_next(rn)) {
- if ((nbr = rn->info)) {
- /* Do not show myself. */
- if (nbr == oi->nbr_self)
- continue;
- /* Down state is not shown. */
- if (nbr->state == NSM_Down)
- continue;
-
- if (nbr->ts_last_progress.tv_sec
- || nbr->ts_last_progress.tv_usec)
- time_val = monotime_since(
- &nbr->ts_last_progress, &res)
- / 1000LL;
+ if (nbr->ts_last_progress.tv_sec || nbr->ts_last_progress.tv_usec)
+ time_val =
+ monotime_since(&nbr->ts_last_progress, &res) / 1000LL;
- if (use_json) {
- char neigh_str[INET_ADDRSTRLEN];
+ if (use_json) {
+ char neigh_str[INET_ADDRSTRLEN];
- if (prev_nbr
- && !IPV4_ADDR_SAME(&prev_nbr->src,
- &nbr->src)) {
- /* Start new neigh list */
- json_neigh_array = NULL;
- }
+ if (prev_nbr && !IPV4_ADDR_SAME(&prev_nbr->src, &nbr->src)) {
+ /* Start new neigh list */
+ json_neigh_array = NULL;
+ }
- if (nbr->state == NSM_Attempt
- && nbr->router_id.s_addr == INADDR_ANY)
- strlcpy(neigh_str, "neighbor",
- sizeof(neigh_str));
- else
- inet_ntop(AF_INET, &nbr->router_id,
- neigh_str, sizeof(neigh_str));
+ if (nbr->state == NSM_Attempt &&
+ nbr->router_id.s_addr == INADDR_ANY)
+ strlcpy(neigh_str, "neighbor", sizeof(neigh_str));
+ else
+ inet_ntop(AF_INET, &nbr->router_id, neigh_str,
+ sizeof(neigh_str));
- json_object_object_get_ex(json, neigh_str,
- &json_neigh_array);
+ json_object_object_get_ex(json, neigh_str, &json_neigh_array);
- if (!json_neigh_array) {
- json_neigh_array =
- json_object_new_array();
- json_object_object_add(
- json, neigh_str,
- json_neigh_array);
- }
+ if (!json_neigh_array) {
+ json_neigh_array = json_object_new_array();
+ json_object_object_add(json, neigh_str,
+ json_neigh_array);
+ }
- json_neighbor = json_object_new_object();
+ json_neighbor = json_object_new_object();
- ospf_nbr_ism_state_message(nbr, msgbuf,
- sizeof(msgbuf));
+ ospf_nbr_ism_state_message(nbr, msgbuf, sizeof(msgbuf));
#if CONFDATE > 20230321
-CPP_NOTICE("Remove show_ip_ospf_neighbor_sub() JSON keys: priority, state, deadTimeMsecs, address, retransmitCounter, requestCounter, dbSummaryCounter")
+ CPP_NOTICE(
+ "Remove show_ip_ospf_neighbor_sub() JSON keys: priority, state, deadTimeMsecs, address, retransmitCounter, requestCounter, dbSummaryCounter")
#endif
- json_object_int_add(json_neighbor, "priority",
- nbr->priority);
- json_object_string_add(json_neighbor, "state",
- msgbuf);
- json_object_int_add(json_neighbor,
- "nbrPriority",
- nbr->priority);
- json_object_string_add(json_neighbor,
- "nbrState", msgbuf);
+ json_object_int_add(json_neighbor, "priority", nbr->priority);
+ json_object_string_add(json_neighbor, "state", msgbuf);
+ json_object_int_add(json_neighbor, "nbrPriority",
+ nbr->priority);
+ json_object_string_add(json_neighbor, "nbrState", msgbuf);
- json_object_string_add(
- json_neighbor, "converged",
- lookup_msg(ospf_nsm_state_msg,
- nbr->state, NULL));
- json_object_string_add(
- json_neighbor, "role",
- lookup_msg(ospf_ism_state_msg,
- ospf_nbr_ism_state(nbr),
- NULL));
-
- if (nbr->t_inactivity) {
- long time_store;
-
- time_store = monotime_until(
- &nbr->t_inactivity
- ->u.sands,
- NULL)
- / 1000LL;
- json_object_int_add(json_neighbor,
- "upTimeInMsec",
- time_val);
- json_object_int_add(json_neighbor,
- "deadTimeMsecs",
- time_store);
- json_object_int_add(
- json_neighbor,
- "routerDeadIntervalTimerDueMsec",
- time_store);
- json_object_string_add(
- json_neighbor, "upTime",
- ospf_timeval_dump(
- &res, uptime,
- sizeof(uptime)));
- json_object_string_add(
- json_neighbor, "deadTime",
- ospf_timer_dump(
- nbr->t_inactivity,
- timebuf,
- sizeof(timebuf)));
- } else {
- json_object_string_add(json_neighbor,
- "deadTimeMsecs",
- "inactive");
- json_object_string_add(
- json_neighbor,
- "routerDeadIntervalTimerDueMsec",
- "inactive");
- }
- json_object_string_addf(json_neighbor,
- "address", "%pI4",
- &nbr->src);
- json_object_string_addf(json_neighbor,
- "ifaceAddress", "%pI4",
- &nbr->src);
- json_object_string_add(json_neighbor,
- "ifaceName",
- IF_NAME(oi));
- json_object_int_add(
- json_neighbor, "retransmitCounter",
- ospf_ls_retransmit_count(nbr));
- json_object_int_add(
- json_neighbor,
- "linkStateRetransmissionListCounter",
- ospf_ls_retransmit_count(nbr));
- json_object_int_add(json_neighbor,
- "requestCounter",
- ospf_ls_request_count(nbr));
- json_object_int_add(
- json_neighbor,
- "linkStateRequestListCounter",
- ospf_ls_request_count(nbr));
- json_object_int_add(json_neighbor,
- "dbSummaryCounter",
- ospf_db_summary_count(nbr));
- json_object_int_add(
- json_neighbor,
- "databaseSummaryListCounter",
- ospf_db_summary_count(nbr));
+ json_object_string_add(
+ json_neighbor, "converged",
+ lookup_msg(ospf_nsm_state_msg, nbr->state, NULL));
+ json_object_string_add(json_neighbor, "role",
+ lookup_msg(ospf_ism_state_msg,
+ ospf_nbr_ism_state(nbr),
+ NULL));
+ if (nbr->t_inactivity) {
+ long time_store;
- json_object_array_add(json_neigh_array,
- json_neighbor);
- } else {
- ospf_nbr_ism_state_message(nbr, msgbuf,
- sizeof(msgbuf));
+ time_store = monotime_until(&nbr->t_inactivity->u.sands,
+ NULL) /
+ 1000LL;
+ json_object_int_add(json_neighbor, "upTimeInMsec",
+ time_val);
+ json_object_int_add(json_neighbor, "deadTimeMsecs",
+ time_store);
+ json_object_int_add(json_neighbor,
+ "routerDeadIntervalTimerDueMsec",
+ time_store);
+ json_object_string_add(
+ json_neighbor, "upTime",
+ ospf_timeval_dump(&res, uptime,
+ sizeof(uptime)));
+ json_object_string_add(
+ json_neighbor, "deadTime",
+ ospf_timer_dump(nbr->t_inactivity, timebuf,
+ sizeof(timebuf)));
+ } else {
+ json_object_string_add(json_neighbor, "deadTimeMsecs",
+ "inactive");
+ json_object_string_add(json_neighbor,
+ "routerDeadIntervalTimerDueMsec",
+ "inactive");
+ }
+ json_object_string_addf(json_neighbor, "address", "%pI4",
+ &nbr->src);
+ json_object_string_addf(json_neighbor, "ifaceAddress", "%pI4",
+ &nbr->src);
+ json_object_string_add(json_neighbor, "ifaceName",
+ IF_NAME(nbr->oi));
+ json_object_int_add(json_neighbor, "retransmitCounter",
+ ospf_ls_retransmit_count(nbr));
+ json_object_int_add(json_neighbor,
+ "linkStateRetransmissionListCounter",
+ ospf_ls_retransmit_count(nbr));
+ json_object_int_add(json_neighbor, "requestCounter",
+ ospf_ls_request_count(nbr));
+ json_object_int_add(json_neighbor,
+ "linkStateRequestListCounter",
+ ospf_ls_request_count(nbr));
+ json_object_int_add(json_neighbor, "dbSummaryCounter",
+ ospf_db_summary_count(nbr));
+ json_object_int_add(json_neighbor, "databaseSummaryListCounter",
+ ospf_db_summary_count(nbr));
- if (nbr->state == NSM_Attempt
- && nbr->router_id.s_addr == INADDR_ANY)
- vty_out(vty, "%-15s %3d %-15s ", "-",
- nbr->priority, msgbuf);
- else
- vty_out(vty, "%-15pI4 %3d %-15s ",
- &nbr->router_id, nbr->priority,
- msgbuf);
+ json_object_array_add(json_neigh_array, json_neighbor);
+ } else {
+ ospf_nbr_ism_state_message(nbr, msgbuf, sizeof(msgbuf));
- vty_out(vty, "%-15s ",
- ospf_timeval_dump(&res, uptime,
- sizeof(uptime)));
+ if (nbr->state == NSM_Attempt &&
+ nbr->router_id.s_addr == INADDR_ANY)
+ vty_out(vty, "%-15s %3d %-15s ", "-", nbr->priority,
+ msgbuf);
+ else
+ vty_out(vty, "%-15pI4 %3d %-15s ", &nbr->router_id,
+ nbr->priority, msgbuf);
+
+ vty_out(vty, "%-15s ",
+ ospf_timeval_dump(&res, uptime, sizeof(uptime)));
+
+ vty_out(vty, "%9s ",
+ ospf_timer_dump(nbr->t_inactivity, timebuf,
+ sizeof(timebuf)));
+ vty_out(vty, "%-15pI4 ", &nbr->src);
+ vty_out(vty, "%-32s %5ld %5ld %5d\n", IF_NAME(nbr->oi),
+ ospf_ls_retransmit_count(nbr),
+ ospf_ls_request_count(nbr), ospf_db_summary_count(nbr));
+ }
+}
+
+static void show_ip_ospf_neighbor_sub(struct vty *vty,
+ struct ospf_interface *oi,
+ json_object *json, bool use_json)
+{
+ struct route_node *rn;
+ struct ospf_neighbor *nbr, *prev_nbr = NULL;
+
+ for (rn = route_top(oi->nbrs); rn; rn = route_next(rn)) {
+ if ((nbr = rn->info)) {
+ /* Do not show myself. */
+ if (nbr == oi->nbr_self)
+ continue;
+ /* Down state is not shown. */
+ if (nbr->state == NSM_Down)
+ continue;
- vty_out(vty, "%9s ",
- ospf_timer_dump(nbr->t_inactivity,
- timebuf,
- sizeof(timebuf)));
- vty_out(vty, "%-15pI4 ", &nbr->src);
- vty_out(vty, "%-32s %5ld %5ld %5d\n",
- IF_NAME(oi),
- ospf_ls_retransmit_count(nbr),
- ospf_ls_request_count(nbr),
- ospf_db_summary_count(nbr));
- }
prev_nbr = nbr;
+
+ show_ip_ospf_neighbour_brief(vty, nbr, prev_nbr, json,
+ use_json);
}
}
}
@@ -5411,7 +5380,8 @@ static void show_ip_ospf_neighbor_detail_sub(struct vty *vty,
static int show_ip_ospf_neighbor_id_common(struct vty *vty, struct ospf *ospf,
struct in_addr *router_id,
- bool use_json, uint8_t use_vrf)
+ bool use_json, uint8_t use_vrf,
+ bool is_detail)
{
struct listnode *node;
struct ospf_neighbor *nbr;
@@ -5433,8 +5403,12 @@ static int show_ip_ospf_neighbor_id_common(struct vty *vty, struct ospf *ospf,
for (ALL_LIST_ELEMENTS_RO(ospf->oiflist, node, oi)) {
if ((nbr = ospf_nbr_lookup_by_routerid(oi->nbrs, router_id))) {
- show_ip_ospf_neighbor_detail_sub(vty, oi, nbr, NULL,
- json, use_json);
+ if (is_detail)
+ show_ip_ospf_neighbor_detail_sub(
+ vty, oi, nbr, NULL, json, use_json);
+ else
+ show_ip_ospf_neighbour_brief(vty, nbr, NULL,
+ json, use_json);
}
}
@@ -5446,15 +5420,13 @@ static int show_ip_ospf_neighbor_id_common(struct vty *vty, struct ospf *ospf,
return CMD_SUCCESS;
}
-DEFPY (show_ip_ospf_neighbor_id,
- show_ip_ospf_neighbor_id_cmd,
- "show ip ospf neighbor A.B.C.D$router_id [json$json]",
- SHOW_STR
- IP_STR
- "OSPF information\n"
- "Neighbor list\n"
- "Neighbor ID\n"
- JSON_STR)
+DEFPY(show_ip_ospf_neighbor_id, show_ip_ospf_neighbor_id_cmd,
+ "show ip ospf neighbor A.B.C.D$router_id [detail$detail] [json$json]",
+ SHOW_STR IP_STR
+ "OSPF information\n"
+ "Neighbor list\n"
+ "Neighbor ID\n"
+ "Detailed output\n" JSON_STR)
{
struct ospf *ospf;
struct listnode *node;
@@ -5464,22 +5436,20 @@ DEFPY (show_ip_ospf_neighbor_id,
if (!ospf->oi_running)
continue;
ret = show_ip_ospf_neighbor_id_common(vty, ospf, &router_id,
- !!json, 0);
+ !!json, 0, !!detail);
}
return ret;
}
-DEFPY (show_ip_ospf_instance_neighbor_id,
- show_ip_ospf_instance_neighbor_id_cmd,
- "show ip ospf (1-65535)$instance neighbor A.B.C.D$router_id [json$json]",
- SHOW_STR
- IP_STR
- "OSPF information\n"
- "Instance ID\n"
- "Neighbor list\n"
- "Neighbor ID\n"
- JSON_STR)
+DEFPY(show_ip_ospf_instance_neighbor_id, show_ip_ospf_instance_neighbor_id_cmd,
+ "show ip ospf (1-65535)$instance neighbor A.B.C.D$router_id [detail$detail] [json$json]",
+ SHOW_STR IP_STR
+ "OSPF information\n"
+ "Instance ID\n"
+ "Neighbor list\n"
+ "Neighbor ID\n"
+ "Detailed output\n" JSON_STR)
{
struct ospf *ospf;
@@ -5490,8 +5460,8 @@ DEFPY (show_ip_ospf_instance_neighbor_id,
if (!ospf || !ospf->oi_running)
return CMD_SUCCESS;
- return show_ip_ospf_neighbor_id_common(vty, ospf, &router_id, !!json,
- 0);
+ return show_ip_ospf_neighbor_id_common(vty, ospf, &router_id, !!json, 0,
+ !!detail);
}
static int show_ip_ospf_neighbor_detail_common(struct vty *vty,
diff --git a/ospfd/ospfd.c b/ospfd/ospfd.c
index 337456ecd8..33872950ac 100644
--- a/ospfd/ospfd.c
+++ b/ospfd/ospfd.c
@@ -61,6 +61,7 @@
#include "ospfd/ospf_ase.h"
#include "ospfd/ospf_ldp_sync.h"
#include "ospfd/ospf_gr.h"
+#include "ospfd/ospf_apiserver.h"
DEFINE_QOBJ_TYPE(ospf);
@@ -241,6 +242,10 @@ void ospf_process_refresh_data(struct ospf *ospf, bool reset)
}
ospf_external_lsa_rid_change(ospf);
+
+#ifdef SUPPORT_OSPF_API
+ ospf_apiserver_clients_notify_router_id_change(router_id);
+#endif
}
ospf->inst_shutdown = 0;
diff --git a/pimd/pim6_mld.c b/pimd/pim6_mld.c
index a67d33ca97..255fd62ba7 100644
--- a/pimd/pim6_mld.c
+++ b/pimd/pim6_mld.c
@@ -2198,7 +2198,7 @@ void gm_ifp_teardown(struct interface *ifp)
static void gm_update_ll(struct interface *ifp)
{
struct pim_interface *pim_ifp = ifp->info;
- struct gm_if *gm_ifp = pim_ifp ? pim_ifp->mld : NULL;
+ struct gm_if *gm_ifp = pim_ifp->mld;
bool was_querier;
was_querier =
diff --git a/pimd/pim6_mroute_msg.c b/pimd/pim6_mroute_msg.c
deleted file mode 100644
index 0b4b31fefa..0000000000
--- a/pimd/pim6_mroute_msg.c
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * PIM for Quagga
- * Copyright (C) 2022 Dell Technologies Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; see the file COPYING; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include <zebra.h>
-#include "log.h"
-#include "privs.h"
-#include "if.h"
-#include "prefix.h"
-#include "vty.h"
-#include "plist.h"
-#include "sockopt.h"
-#include "lib_errors.h"
-#include "lib/network.h"
-
-#include "pimd.h"
-#include "pim_instance.h"
-#include "pim_mroute.h"
-#include "pim_oil.h"
-#include "pim_str.h"
-#include "pim_time.h"
-#include "pim_iface.h"
-#include "pim_macro.h"
-#include "pim_rp.h"
-#include "pim_oil.h"
-#include "pim_ssm.h"
-#include "pim_sock.h"
-
-int pim_mroute_set(struct pim_instance *pim, int enable)
-{
- int err;
- int opt, data;
- socklen_t data_len = sizeof(data);
-
- /*
- * We need to create the VRF table for the pim mroute_socket
- */
- if (pim->vrf->vrf_id != VRF_DEFAULT) {
- frr_with_privs (&pimd_privs) {
-
- data = pim->vrf->data.l.table_id;
- err = setsockopt(pim->mroute_socket, PIM_IPPROTO,
- MRT6_TABLE, &data, data_len);
- if (err) {
- zlog_warn(
- "%s %s: failure: setsockopt(fd=%d,PIM_IPPROTO, MRT6_TABLE=%d): errno=%d: %s",
- __FILE__, __func__, pim->mroute_socket,
- data, errno, safe_strerror(errno));
- return -1;
- }
- }
- }
-
- frr_with_privs (&pimd_privs) {
- opt = enable ? MRT6_INIT : MRT6_DONE;
- /*
- * *BSD *cares* about what value we pass down
- * here
- */
- data = 1;
- err = setsockopt(pim->mroute_socket, PIM_IPPROTO, opt, &data,
- data_len);
- if (err) {
- zlog_warn(
- "%s %s: failure: setsockopt(fd=%d,PIM_IPPROTO,%s=%d): errno=%d: %s",
- __FILE__, __func__, pim->mroute_socket,
- enable ? "MRT6_INIT" : "MRT6_DONE", data, errno,
- safe_strerror(errno));
- return -1;
- }
- }
-
- if (enable) {
- /* Linux and Solaris IPV6_PKTINFO */
- data = 1;
- if (setsockopt(pim->mroute_socket, PIM_IPPROTO, IPV6_RECVPKTINFO,
- &data, data_len)) {
- zlog_warn(
- "Could not set IPV6_PKTINFO on socket fd=%d: errno=%d: %s",
- pim->mroute_socket, errno,
- safe_strerror(errno));
- }
- }
-
- setsockopt_so_recvbuf(pim->mroute_socket, 1024 * 1024 * 8);
-
- if (set_nonblocking (pim->mroute_socket) < 0) {
- zlog_warn(
- "Could not set non blocking on socket fd=%d: errno=%d: %s",
- pim->mroute_socket, errno,
- safe_strerror(errno));
- }
-
- if (enable) {
-#if defined linux
- int upcalls = MRT6MSG_WRMIFWHOLE;
- opt = MRT6_PIM;
-
- err = setsockopt(pim->mroute_socket, PIM_IPPROTO, opt, &upcalls,
- sizeof(upcalls));
- if (err) {
- zlog_warn(
- "Failure to register for WHOLE and WRONGMIF upcalls %d %s",
- errno, safe_strerror(errno));
- return -1;
- }
-#else
- zlog_warn(
- "PIM-SM will not work properly on this platform, until the ability to receive the WHOLEPKT upcall");
-#endif
- }
-
- return 0;
-}
-static const char *const mrt6msgtype2str[MRT6MSG_WRMIFWHOLE + 1] = {
- "<unknown_upcall?>", "NOCACHE", "WRONGMIF", "WHOLEPKT", "WRMIFWHOLE"};
-
-int pim_mroute_msg(struct pim_instance *pim, const char *buf,
- size_t buf_size, ifindex_t ifindex)
-{
- struct interface *ifp;
- const struct ip6_hdr *ip6_hdr;
- const struct mrt6msg *msg;
-
- if (buf_size < (int)sizeof(struct ip6_hdr))
- return 0;
-
- ip6_hdr = (const struct ip6_hdr *)buf;
-
- if ((ip6_hdr->ip6_vfc & 0xf) == 0) {
- msg = (const struct mrt6msg *)buf;
-
- ifp = pim_if_find_by_vif_index(pim, msg->im6_mif);
-
- if (!ifp)
- return 0;
- if (PIM_DEBUG_MROUTE) {
- zlog_debug(
- "%s: pim kernel upcall %s type=%d ip_p=%d from fd=%d for (S,G)=(%pI6,%pI6) on %s mifi=%d size=%ld",
- __func__, mrt6msgtype2str[msg->im6_msgtype],
- msg->im6_msgtype, ip6_hdr->ip6_nxt,
- pim->mroute_socket, &msg->im6_src,
- &msg->im6_dst, ifp->name, msg->im6_mif,
- (long int)buf_size);
- }
-
- switch (msg->im6_msgtype) {
- case MRT6MSG_WRONGMIF:
- return pim_mroute_msg_wrongvif(pim->mroute_socket, ifp,
- msg);
- case MRT6MSG_NOCACHE:
- return pim_mroute_msg_nocache(pim->mroute_socket, ifp,
- msg);
- case MRT6MSG_WHOLEPKT:
- return pim_mroute_msg_wholepkt(pim->mroute_socket, ifp,
- (const char *)msg,
- buf_size);
- case MRT6MSG_WRMIFWHOLE:
- return pim_mroute_msg_wrvifwhole(pim->mroute_socket,
- ifp, (const char *)msg,
- buf_size);
- default:
- break;
- }
- }
-
- return 0;
-}
-
diff --git a/pimd/pim_cmd_common.c b/pimd/pim_cmd_common.c
index 6b132ee6b0..03689bea8d 100644
--- a/pimd/pim_cmd_common.c
+++ b/pimd/pim_cmd_common.c
@@ -3562,10 +3562,10 @@ void show_mroute(struct pim_instance *pim, struct vty *vty, pim_sgaddr *sg,
}
vty_out(vty,
- "%-15pPAs %-15pPAs %-8s %-6s %-16s %-16s %-3d %8s\n",
- oil_origin(c_oil), oil_mcastgrp(c_oil),
- state_str, proto, in_ifname, out_ifname,
- ttl, mroute_uptime);
+ "%-15s %-15s %-8s %-6s %-16s %-16s %-3d %8s\n",
+ src_str, grp_str, state_str, proto,
+ in_ifname, out_ifname, ttl,
+ mroute_uptime);
if (first) {
src_str[0] = '\0';
diff --git a/pimd/pim_iface.c b/pimd/pim_iface.c
index 4e92a2c5dc..ddfa2187de 100644
--- a/pimd/pim_iface.c
+++ b/pimd/pim_iface.c
@@ -213,8 +213,8 @@ void pim_if_delete(struct interface *ifp)
#if PIM_IPV == 4
igmp_sock_delete_all(ifp);
#endif
-
- pim_neighbor_delete_all(ifp, "Interface removed from configuration");
+ if (pim_ifp->pim_sock_fd >= 0)
+ pim_sock_delete(ifp, "Interface removed from configuration");
pim_if_del_vif(ifp);
diff --git a/pimd/pim_instance.c b/pimd/pim_instance.c
index 8f117033e4..359e3db718 100644
--- a/pimd/pim_instance.c
+++ b/pimd/pim_instance.c
@@ -70,6 +70,8 @@ static void pim_instance_terminate(struct pim_instance *pim)
pim_msdp_exit(pim);
+ pim_mroute_socket_disable(pim);
+
XFREE(MTYPE_PIM_PLIST_NAME, pim->spt.plist);
XFREE(MTYPE_PIM_PLIST_NAME, pim->register_plist);
@@ -238,5 +240,20 @@ void pim_vrf_init(void)
void pim_vrf_terminate(void)
{
+ struct vrf *vrf;
+
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
+ struct pim_instance *pim;
+
+ pim = vrf->info;
+ if (!pim)
+ continue;
+
+ pim_ssmpingd_destroy(pim);
+ pim_instance_terminate(pim);
+
+ vrf->info = NULL;
+ }
+
vrf_terminate();
}
diff --git a/pimd/pim_instance.h b/pimd/pim_instance.h
index b19e8208ba..23b9df4aff 100644
--- a/pimd/pim_instance.h
+++ b/pimd/pim_instance.h
@@ -88,6 +88,7 @@ struct pim_router {
uint32_t register_suppress_time;
int packet_process;
uint32_t register_probe_time;
+ uint16_t multipath;
/*
* What is the default vrf that we work in
diff --git a/pimd/pim_mroute.c b/pimd/pim_mroute.c
index 1978afa7c0..8f1e6184d0 100644
--- a/pimd/pim_mroute.c
+++ b/pimd/pim_mroute.c
@@ -26,6 +26,7 @@
#include "plist.h"
#include "sockopt.h"
#include "lib_errors.h"
+#include "lib/network.h"
#include "pimd.h"
#include "pim_rpf.h"
@@ -47,6 +48,111 @@
static void mroute_read_on(struct pim_instance *pim);
+int pim_mroute_set(struct pim_instance *pim, int enable)
+{
+ int err;
+ int opt, data;
+ socklen_t data_len = sizeof(data);
+
+ /*
+ * We need to create the VRF table for the pim mroute_socket
+ */
+ if (pim->vrf->vrf_id != VRF_DEFAULT) {
+ frr_with_privs (&pimd_privs) {
+
+ data = pim->vrf->data.l.table_id;
+ err = setsockopt(pim->mroute_socket, PIM_IPPROTO,
+ MRT_TABLE, &data, data_len);
+ if (err) {
+ zlog_warn(
+ "%s %s: failure: setsockopt(fd=%d,PIM_IPPROTO, MRT_TABLE=%d): errno=%d: %s",
+ __FILE__, __func__, pim->mroute_socket,
+ data, errno, safe_strerror(errno));
+ return -1;
+ }
+ }
+ }
+
+ frr_with_privs (&pimd_privs) {
+ opt = enable ? MRT_INIT : MRT_DONE;
+ /*
+ * *BSD *cares* about what value we pass down
+ * here
+ */
+ data = 1;
+ err = setsockopt(pim->mroute_socket, PIM_IPPROTO, opt, &data,
+ data_len);
+ if (err) {
+ zlog_warn(
+ "%s %s: failure: setsockopt(fd=%d,PIM_IPPROTO,%s=%d): errno=%d: %s",
+ __FILE__, __func__, pim->mroute_socket,
+ enable ? "MRT_INIT" : "MRT_DONE", data, errno,
+ safe_strerror(errno));
+ return -1;
+ }
+ }
+
+#if defined(HAVE_IP_PKTINFO)
+ if (enable) {
+ /* Linux and Solaris IP_PKTINFO */
+ data = 1;
+ if (setsockopt(pim->mroute_socket, PIM_IPPROTO, IP_PKTINFO,
+ &data, data_len)) {
+ zlog_warn(
+ "Could not set IP_PKTINFO on socket fd=%d: errno=%d: %s",
+ pim->mroute_socket, errno,
+ safe_strerror(errno));
+ }
+ }
+#endif
+
+#if PIM_IPV == 6
+ if (enable) {
+ /* Linux and Solaris IPV6_PKTINFO */
+ data = 1;
+ if (setsockopt(pim->mroute_socket, PIM_IPPROTO,
+ IPV6_RECVPKTINFO, &data, data_len)) {
+ zlog_warn(
+ "Could not set IPV6_RECVPKTINFO on socket fd=%d: errno=%d: %s",
+ pim->mroute_socket, errno,
+ safe_strerror(errno));
+ }
+ }
+#endif
+ setsockopt_so_recvbuf(pim->mroute_socket, 1024 * 1024 * 8);
+
+ if (set_nonblocking(pim->mroute_socket) < 0) {
+ zlog_warn(
+ "Could not set non blocking on socket fd=%d: errno=%d: %s",
+ pim->mroute_socket, errno, safe_strerror(errno));
+ return -1;
+ }
+
+ if (enable) {
+#if defined linux
+ int upcalls = GMMSG_WRVIFWHOLE;
+ opt = MRT_PIM;
+
+ err = setsockopt(pim->mroute_socket, PIM_IPPROTO, opt, &upcalls,
+ sizeof(upcalls));
+ if (err) {
+ zlog_warn(
+ "Failure to register for VIFWHOLE and WRONGVIF upcalls %d %s",
+ errno, safe_strerror(errno));
+ return -1;
+ }
+#else
+ zlog_warn(
+ "PIM-SM will not work properly on this platform, until the ability to receive the WRVIFWHOLE upcall");
+#endif
+ }
+
+ return 0;
+}
+
+static const char *const gmmsgtype2str[GMMSG_WRVIFWHOLE + 1] = {
+ "<unknown_upcall?>", "NOCACHE", "WRONGVIF", "WHOLEPKT", "WRVIFWHOLE"};
+
int pim_mroute_msg_nocache(int fd, struct interface *ifp, const kernmsg *msg)
{
@@ -504,6 +610,137 @@ int pim_mroute_msg_wrvifwhole(int fd, struct interface *ifp, const char *buf,
return 0;
}
+#if PIM_IPV == 4
+static int process_igmp_packet(struct pim_instance *pim, const char *buf,
+ size_t buf_size, ifindex_t ifindex)
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+ struct in_addr ifaddr;
+ struct gm_sock *igmp;
+ const struct prefix *connected_src;
+ const struct ip *ip_hdr = (const struct ip *)buf;
+
+ /* We have the IP packet but we do not know which interface this
+ * packet was
+ * received on. Find the interface that is on the same subnet as
+ * the source
+ * of the IP packet.
+ */
+ ifp = if_lookup_by_index(ifindex, pim->vrf->vrf_id);
+
+ if (!ifp || !ifp->info)
+ return 0;
+
+ connected_src = pim_if_connected_to_source(ifp, ip_hdr->ip_src);
+
+ if (!connected_src) {
+ if (PIM_DEBUG_IGMP_PACKETS) {
+ zlog_debug(
+ "Recv IGMP packet on interface: %s from a non-connected source: %pI4",
+ ifp->name, &ip_hdr->ip_src);
+ }
+ return 0;
+ }
+
+ pim_ifp = ifp->info;
+ ifaddr = connected_src->u.prefix4;
+ igmp = pim_igmp_sock_lookup_ifaddr(pim_ifp->gm_socket_list, ifaddr);
+
+ if (PIM_DEBUG_IGMP_PACKETS) {
+ zlog_debug(
+ "%s(%s): igmp kernel upcall on %s(%p) for %pI4 -> %pI4",
+ __func__, pim->vrf->name, ifp->name, igmp,
+ &ip_hdr->ip_src, &ip_hdr->ip_dst);
+ }
+ if (igmp)
+ pim_igmp_packet(igmp, (char *)buf, buf_size);
+ else if (PIM_DEBUG_IGMP_PACKETS) {
+ zlog_debug(
+ "No IGMP socket on interface: %s with connected source: %pFX",
+ ifp->name, connected_src);
+ }
+ return 0;
+}
+#endif
+
+int pim_mroute_msg(struct pim_instance *pim, const char *buf, size_t buf_size,
+ ifindex_t ifindex)
+{
+ struct interface *ifp;
+ const ipv_hdr *ip_hdr;
+ const kernmsg *msg;
+
+ if (buf_size < (int)sizeof(ipv_hdr))
+ return 0;
+
+ ip_hdr = (const ipv_hdr *)buf;
+
+#if PIM_IPV == 4
+ if (ip_hdr->ip_p == IPPROTO_IGMP) {
+ process_igmp_packet(pim, buf, buf_size, ifindex);
+ } else if (ip_hdr->ip_p) {
+ if (PIM_DEBUG_MROUTE_DETAIL) {
+ zlog_debug(
+ "%s: no kernel upcall proto=%d src: %pI4 dst: %pI4 msg_size=%ld",
+ __func__, ip_hdr->ip_p, &ip_hdr->ip_src,
+ &ip_hdr->ip_dst, (long int)buf_size);
+ }
+
+ } else {
+#else
+
+ if ((ip_hdr->ip6_vfc & 0xf) == 0) {
+#endif
+ msg = (const kernmsg *)buf;
+
+ ifp = pim_if_find_by_vif_index(pim, msg->msg_im_vif);
+
+ if (!ifp)
+ return 0;
+ if (PIM_DEBUG_MROUTE) {
+#if PIM_IPV == 4
+ zlog_debug(
+ "%s: pim kernel upcall %s type=%d ip_p=%d from fd=%d for (S,G)=(%pI4,%pI4) on %s vifi=%d size=%ld",
+ __func__, gmmsgtype2str[msg->msg_im_msgtype],
+ msg->msg_im_msgtype, ip_hdr->ip_p,
+ pim->mroute_socket, &msg->msg_im_src,
+ &msg->msg_im_dst, ifp->name, msg->msg_im_vif,
+ (long int)buf_size);
+#else
+ zlog_debug(
+ "%s: pim kernel upcall %s type=%d ip_p=%d from fd=%d for (S,G)=(%pI6,%pI6) on %s vifi=%d size=%ld",
+ __func__, gmmsgtype2str[msg->msg_im_msgtype],
+ msg->msg_im_msgtype, ip_hdr->ip6_nxt,
+ pim->mroute_socket, &msg->msg_im_src,
+ &msg->msg_im_dst, ifp->name, msg->msg_im_vif,
+ (long int)buf_size);
+#endif
+ }
+
+ switch (msg->msg_im_msgtype) {
+ case GMMSG_WRONGVIF:
+ return pim_mroute_msg_wrongvif(pim->mroute_socket, ifp,
+ msg);
+ case GMMSG_NOCACHE:
+ return pim_mroute_msg_nocache(pim->mroute_socket, ifp,
+ msg);
+ case GMMSG_WHOLEPKT:
+ return pim_mroute_msg_wholepkt(pim->mroute_socket, ifp,
+ (const char *)msg,
+ buf_size);
+ case GMMSG_WRVIFWHOLE:
+ return pim_mroute_msg_wrvifwhole(pim->mroute_socket,
+ ifp, (const char *)msg,
+ buf_size);
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
static void mroute_read(struct thread *t)
{
struct pim_instance *pim;
diff --git a/pimd/pim_mroute.h b/pimd/pim_mroute.h
index 712c916994..d6798c52ad 100644
--- a/pimd/pim_mroute.h
+++ b/pimd/pim_mroute.h
@@ -56,6 +56,7 @@ typedef struct sioc_sg_req pim_sioc_sg_req;
#define vc_lcl_ifindex vifc_lcl_ifindex
#define vc_rmt_addr vifc_rmt_addr
+#define msg_im_msgtype im_msgtype
#define msg_im_vif im_vif
#define msg_im_src im_src
#define msg_im_dst im_dst
@@ -64,6 +65,13 @@ typedef struct sioc_sg_req pim_sioc_sg_req;
#define IGMPMSG_WRVIFWHOLE 4 /* For PIM processing */
#endif
+#ifndef GMMSG_NOCACHE
+#define GMMSG_NOCACHE IGMPMSG_NOCACHE /* For PIM processing */
+#define GMMSG_WHOLEPKT IGMPMSG_WHOLEPKT /* For PIM processing */
+#define GMMSG_WRONGVIF IGMPMSG_WRONGVIF /* For PIM processing */
+#define GMMSG_WRVIFWHOLE IGMPMSG_WRVIFWHOLE /* For PIM processing */
+#endif
+
#ifndef PIM_IPPROTO
#define PIM_IPPROTO IPPROTO_IP
#endif
@@ -94,6 +102,7 @@ typedef struct sioc_sg_req pim_sioc_sg_req;
#define MRT_VERSION MRT6_VERSION
#define MRT_ASSERT MRT6_ASSERT
#define MRT_PIM MRT6_PIM
+#define MRT_TABLE MRT6_TABLE
#endif
#ifndef PIM_IPPROTO
@@ -108,6 +117,13 @@ typedef struct sioc_sg_req pim_sioc_sg_req;
#define MRT6MSG_WRMIFWHOLE 4 /* For PIM processing */
#endif
+#ifndef GMMSG_NOCACHE
+#define GMMSG_NOCACHE MRT6MSG_NOCACHE /* For PIM processing */
+#define GMMSG_WHOLEPKT MRT6MSG_WHOLEPKT /* For PIM processing */
+#define GMMSG_WRONGVIF MRT6MSG_WRONGMIF /* For PIM processing */
+#define GMMSG_WRVIFWHOLE MRT6MSG_WRMIFWHOLE /* For PIM processing */
+#endif
+
typedef struct mif6ctl pim_vifctl;
typedef struct mrt6msg kernmsg;
typedef mifi_t vifi_t;
@@ -119,6 +135,7 @@ typedef struct sioc_sg_req6 pim_sioc_sg_req;
#define vc_pifi mif6c_pifi
#define vc_rate_limit vifc_rate_limit
+#define msg_im_msgtype im6_msgtype
#define msg_im_vif im6_mif
#define msg_im_src im6_src
#define msg_im_dst im6_dst
diff --git a/pimd/pim_mroute_msg.c b/pimd/pim_mroute_msg.c
deleted file mode 100644
index ef12ec7a49..0000000000
--- a/pimd/pim_mroute_msg.c
+++ /dev/null
@@ -1,242 +0,0 @@
-/*
- * PIM for Quagga
- * Copyright (C) 2022 Dell Technologies Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; see the file COPYING; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include <zebra.h>
-#include "log.h"
-#include "privs.h"
-#include "if.h"
-#include "prefix.h"
-#include "vty.h"
-#include "plist.h"
-#include "sockopt.h"
-#include "lib_errors.h"
-#include "lib/network.h"
-
-#include "pimd.h"
-#include "pim_instance.h"
-#include "pim_mroute.h"
-#include "pim_oil.h"
-#include "pim_str.h"
-#include "pim_iface.h"
-#include "pim_macro.h"
-#include "pim_rp.h"
-#include "pim_oil.h"
-#include "pim_msg.h"
-#include "pim_sock.h"
-
-
-int pim_mroute_set(struct pim_instance *pim, int enable)
-{
- int err;
- int opt, data;
- socklen_t data_len = sizeof(data);
-
- /*
- * We need to create the VRF table for the pim mroute_socket
- */
- if (pim->vrf->vrf_id != VRF_DEFAULT) {
- frr_with_privs(&pimd_privs) {
-
- data = pim->vrf->data.l.table_id;
- err = setsockopt(pim->mroute_socket, PIM_IPPROTO,
- MRT_TABLE, &data, data_len);
- if (err) {
- zlog_warn(
- "%s %s: failure: setsockopt(fd=%d,PIM_IPPROTO, MRT_TABLE=%d): errno=%d: %s",
- __FILE__, __func__, pim->mroute_socket,
- data, errno, safe_strerror(errno));
- return -1;
- }
-
- }
- }
-
- frr_with_privs(&pimd_privs) {
- opt = enable ? MRT_INIT : MRT_DONE;
- /*
- * *BSD *cares* about what value we pass down
- * here
- */
- data = 1;
- err = setsockopt(pim->mroute_socket, PIM_IPPROTO, opt, &data,
- data_len);
- if (err) {
- zlog_warn(
- "%s %s: failure: setsockopt(fd=%d,PIM_IPPROTO,%s=%d): errno=%d: %s",
- __FILE__, __func__, pim->mroute_socket,
- enable ? "MRT_INIT" : "MRT_DONE", data, errno,
- safe_strerror(errno));
- return -1;
- }
- }
-
-#if defined(HAVE_IP_PKTINFO)
- if (enable) {
- /* Linux and Solaris IP_PKTINFO */
- data = 1;
- if (setsockopt(pim->mroute_socket, PIM_IPPROTO, IP_PKTINFO,
- &data, data_len)) {
- zlog_warn(
- "Could not set IP_PKTINFO on socket fd=%d: errno=%d: %s",
- pim->mroute_socket, errno,
- safe_strerror(errno));
- }
- }
-#endif
-
- setsockopt_so_recvbuf(pim->mroute_socket, 1024 * 1024 * 8);
-
- if (set_nonblocking (pim->mroute_socket) < 0) {
- zlog_warn(
- "Could not set non blocking on socket fd=%d: errno=%d: %s",
- pim->mroute_socket, errno,
- safe_strerror(errno));
- }
-
- if (enable) {
-#if defined linux
- int upcalls = IGMPMSG_WRVIFWHOLE;
- opt = MRT_PIM;
-
- err = setsockopt(pim->mroute_socket, PIM_IPPROTO, opt, &upcalls,
- sizeof(upcalls));
- if (err) {
- zlog_warn(
- "Failure to register for VIFWHOLE and WRONGVIF upcalls %d %s",
- errno, safe_strerror(errno));
- return -1;
- }
-#else
- zlog_warn(
- "PIM-SM will not work properly on this platform, until the ability to receive the WRVIFWHOLE upcall");
-#endif
-
- }
-
- return 0;
-}
-
-static const char *const igmpmsgtype2str[IGMPMSG_WRVIFWHOLE + 1] = {
- "<unknown_upcall?>", "NOCACHE", "WRONGVIF", "WHOLEPKT", "WRVIFWHOLE"};
-
-
-int pim_mroute_msg(struct pim_instance *pim, const char *buf,
- size_t buf_size, ifindex_t ifindex)
-{
- struct interface *ifp;
- const struct ip *ip_hdr;
- const struct igmpmsg *msg;
-
- if (buf_size < (int)sizeof(struct ip))
- return 0;
-
- ip_hdr = (const struct ip *)buf;
-
- if (ip_hdr->ip_p == IPPROTO_IGMP) {
- struct pim_interface *pim_ifp;
- struct in_addr ifaddr;
- struct gm_sock *igmp;
- const struct prefix *connected_src;
-
- /* We have the IP packet but we do not know which interface this
- * packet was
- * received on. Find the interface that is on the same subnet as
- * the source
- * of the IP packet.
- */
- ifp = if_lookup_by_index(ifindex, pim->vrf->vrf_id);
-
- if (!ifp || !ifp->info)
- return 0;
-
- connected_src = pim_if_connected_to_source(ifp, ip_hdr->ip_src);
-
- if (!connected_src) {
- if (PIM_DEBUG_IGMP_PACKETS) {
- zlog_debug(
- "Recv IGMP packet on interface: %s from a non-connected source: %pI4",
- ifp->name, &ip_hdr->ip_src);
- }
- return 0;
- }
-
- pim_ifp = ifp->info;
- ifaddr = connected_src->u.prefix4;
- igmp = pim_igmp_sock_lookup_ifaddr(pim_ifp->gm_socket_list,
- ifaddr);
-
- if (PIM_DEBUG_IGMP_PACKETS) {
- zlog_debug(
- "%s(%s): igmp kernel upcall on %s(%p) for %pI4 -> %pI4",
- __func__, pim->vrf->name, ifp->name, igmp,
- &ip_hdr->ip_src, &ip_hdr->ip_dst);
- }
- if (igmp)
- pim_igmp_packet(igmp, (char *)buf, buf_size);
- else if (PIM_DEBUG_IGMP_PACKETS) {
- zlog_debug(
- "No IGMP socket on interface: %s with connected source: %pFX",
- ifp->name, connected_src);
- }
- } else if (ip_hdr->ip_p) {
- if (PIM_DEBUG_MROUTE_DETAIL) {
- zlog_debug(
- "%s: no kernel upcall proto=%d src: %pI4 dst: %pI4 msg_size=%ld",
- __func__, ip_hdr->ip_p, &ip_hdr->ip_src,
- &ip_hdr->ip_dst, (long int)buf_size);
- }
-
- } else {
- msg = (const struct igmpmsg *)buf;
-
- ifp = pim_if_find_by_vif_index(pim, msg->im_vif);
-
- if (!ifp)
- return 0;
- if (PIM_DEBUG_MROUTE) {
- zlog_debug(
- "%s: pim kernel upcall %s type=%d ip_p=%d from fd=%d for (S,G)=(%pI4,%pI4) on %s vifi=%d size=%ld",
- __func__, igmpmsgtype2str[msg->im_msgtype],
- msg->im_msgtype, ip_hdr->ip_p,
- pim->mroute_socket, &msg->im_src, &msg->im_dst,
- ifp->name, msg->im_vif, (long int)buf_size);
- }
-
- switch (msg->im_msgtype) {
- case IGMPMSG_WRONGVIF:
- return pim_mroute_msg_wrongvif(pim->mroute_socket, ifp,
- msg);
- case IGMPMSG_NOCACHE:
- return pim_mroute_msg_nocache(pim->mroute_socket, ifp,
- msg);
- case IGMPMSG_WHOLEPKT:
- return pim_mroute_msg_wholepkt(pim->mroute_socket, ifp,
- (const char *)msg,
- buf_size);
- case IGMPMSG_WRVIFWHOLE:
- return pim_mroute_msg_wrvifwhole(pim->mroute_socket,
- ifp, (const char *)msg,
- buf_size);
- default:
- break;
- }
- }
-
- return 0;
-}
diff --git a/pimd/pim_nht.c b/pimd/pim_nht.c
index 22716c2a92..564d16a60b 100644
--- a/pimd/pim_nht.c
+++ b/pimd/pim_nht.c
@@ -304,15 +304,15 @@ bool pim_nht_bsr_rpf_check(struct pim_instance *pim, struct in_addr bsr_addr,
* "check cache or get immediate result." But until that can
* be worked in, here's a copy of the code below :(
*/
- struct pim_zlookup_nexthop nexthop_tab[MULTIPATH_NUM];
+ struct pim_zlookup_nexthop nexthop_tab[router->multipath];
ifindex_t i;
struct interface *ifp = NULL;
int num_ifindex;
memset(nexthop_tab, 0, sizeof(nexthop_tab));
- num_ifindex = zclient_lookup_nexthop(pim, nexthop_tab,
- MULTIPATH_NUM, bsr_addr,
- PIM_NEXTHOP_LOOKUP_MAX);
+ num_ifindex = zclient_lookup_nexthop(
+ pim, nexthop_tab, router->multipath, bsr_addr,
+ PIM_NEXTHOP_LOOKUP_MAX);
if (num_ifindex <= 0)
return false;
@@ -501,8 +501,8 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim,
struct prefix *src, struct prefix *grp,
int neighbor_needed)
{
- struct pim_neighbor *nbrs[MULTIPATH_NUM], *nbr = NULL;
- struct interface *ifps[MULTIPATH_NUM];
+ struct pim_neighbor *nbrs[router->multipath], *nbr = NULL;
+ struct interface *ifps[router->multipath];
struct nexthop *nh_node = NULL;
ifindex_t first_ifindex;
struct interface *ifp = NULL;
@@ -888,11 +888,11 @@ int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
struct prefix *grp, int neighbor_needed)
{
struct pim_nexthop_cache *pnc;
- struct pim_zlookup_nexthop nexthop_tab[MULTIPATH_NUM];
- struct pim_neighbor *nbrs[MULTIPATH_NUM], *nbr = NULL;
+ struct pim_zlookup_nexthop nexthop_tab[router->multipath];
+ struct pim_neighbor *nbrs[router->multipath], *nbr = NULL;
struct pim_rpf rpf;
int num_ifindex;
- struct interface *ifps[MULTIPATH_NUM], *ifp;
+ struct interface *ifps[router->multipath], *ifp;
int first_ifindex;
int found = 0;
uint8_t i = 0;
@@ -915,9 +915,10 @@ int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
}
memset(nexthop_tab, 0,
- sizeof(struct pim_zlookup_nexthop) * MULTIPATH_NUM);
- num_ifindex = zclient_lookup_nexthop(pim, nexthop_tab, MULTIPATH_NUM,
- src_addr, PIM_NEXTHOP_LOOKUP_MAX);
+ sizeof(struct pim_zlookup_nexthop) * router->multipath);
+ num_ifindex =
+ zclient_lookup_nexthop(pim, nexthop_tab, router->multipath,
+ src_addr, PIM_NEXTHOP_LOOKUP_MAX);
if (num_ifindex < 1) {
if (PIM_DEBUG_PIM_NHT)
zlog_warn(
diff --git a/pimd/pim_pim.c b/pimd/pim_pim.c
index f0f4a7139b..1dd99d7ecd 100644
--- a/pimd/pim_pim.c
+++ b/pimd/pim_pim.c
@@ -42,6 +42,7 @@
#include "pim_register.h"
#include "pim_errors.h"
#include "pim_bsm.h"
+#include <lib/lib_errors.h>
static void on_pim_hello_send(struct thread *t);
@@ -551,6 +552,7 @@ void pim_sock_reset(struct interface *ifp)
static uint16_t ip_id = 0;
#endif
+#if PIM_IPV == 4
static int pim_msg_send_frame(int fd, char *buf, size_t len,
struct sockaddr *dst, size_t salen,
const char *ifname)
@@ -558,7 +560,6 @@ static int pim_msg_send_frame(int fd, char *buf, size_t len,
if (sendto(fd, buf, len, MSG_DONTWAIT, dst, salen) >= 0)
return 0;
-#if PIM_IPV == 4
if (errno == EMSGSIZE) {
struct ip *ip = (struct ip *)buf;
size_t hdrsize = sizeof(struct ip);
@@ -585,7 +586,6 @@ static int pim_msg_send_frame(int fd, char *buf, size_t len,
return pim_msg_send_frame(fd, (char *)ip2, sendlen, dst, salen,
ifname);
}
-#endif
zlog_warn(
"%s: sendto() failure to %pSU: iface=%s fd=%d msg_size=%zd: %m",
@@ -593,16 +593,65 @@ static int pim_msg_send_frame(int fd, char *buf, size_t len,
return -1;
}
+#else
+static int pim_msg_send_frame(pim_addr src, pim_addr dst, ifindex_t ifindex,
+ struct iovec *message, int fd)
+{
+ int retval;
+ struct msghdr smsghdr = {};
+ struct cmsghdr *scmsgp;
+ union cmsgbuf {
+ struct cmsghdr hdr;
+ uint8_t buf[CMSG_SPACE(sizeof(struct in6_pktinfo))];
+ };
+ struct in6_pktinfo *pktinfo;
+ struct sockaddr_in6 dst_sin6 = {};
+
+ union cmsgbuf cmsg_buf = {};
+
+ /* destination address */
+ dst_sin6.sin6_family = AF_INET6;
+#ifdef SIN6_LEN
+ dst_sin6.sin6_len = sizeof(struct sockaddr_in6);
+#endif /*SIN6_LEN*/
+ dst_sin6.sin6_addr = dst;
+ dst_sin6.sin6_scope_id = ifindex;
+
+ /* send msg hdr */
+ smsghdr.msg_iov = message;
+ smsghdr.msg_iovlen = 1;
+ smsghdr.msg_name = (caddr_t)&dst_sin6;
+ smsghdr.msg_namelen = sizeof(dst_sin6);
+ smsghdr.msg_control = (caddr_t)&cmsg_buf.buf;
+ smsghdr.msg_controllen = CMSG_SPACE(sizeof(struct in6_pktinfo));
+ smsghdr.msg_flags = 0;
+
+ scmsgp = CMSG_FIRSTHDR(&smsghdr);
+ scmsgp->cmsg_level = IPPROTO_IPV6;
+ scmsgp->cmsg_type = IPV6_PKTINFO;
+ scmsgp->cmsg_len = CMSG_LEN(sizeof(struct in6_pktinfo));
+
+ pktinfo = (struct in6_pktinfo *)(CMSG_DATA(scmsgp));
+ pktinfo->ipi6_ifindex = ifindex;
+ pktinfo->ipi6_addr = src;
+
+ retval = sendmsg(fd, &smsghdr, 0);
+ if (retval < 0)
+ flog_err(
+ EC_LIB_SOCKET,
+ "sendmsg failed: source: %pI6 Dest: %pI6 ifindex: %d: %s (%d)",
+ &src, &dst, ifindex, safe_strerror(errno), errno);
+
+ return retval;
+}
+#endif
+
int pim_msg_send(int fd, pim_addr src, pim_addr dst, uint8_t *pim_msg,
int pim_msg_size, struct interface *ifp)
{
- socklen_t tolen;
- unsigned char buffer[10000];
- unsigned char *msg_start;
- uint8_t ttl;
- struct pim_msg_header *header;
struct pim_interface *pim_ifp;
+
pim_ifp = ifp->info;
if (pim_ifp->pim_passive_enable) {
@@ -613,9 +662,15 @@ int pim_msg_send(int fd, pim_addr src, pim_addr dst, uint8_t *pim_msg,
return 0;
}
+#if PIM_IPV == 4
+ uint8_t ttl;
+ struct pim_msg_header *header;
+ unsigned char buffer[10000];
+
memset(buffer, 0, 10000);
header = (struct pim_msg_header *)pim_msg;
+
/*
* Omnios apparently doesn't have a #define for IP default
* ttl that is the same as all other platforms.
@@ -643,10 +698,11 @@ int pim_msg_send(int fd, pim_addr src, pim_addr dst, uint8_t *pim_msg,
break;
}
-#if PIM_IPV == 4
struct ip *ip = (struct ip *)buffer;
struct sockaddr_in to = {};
int sendlen = sizeof(*ip) + pim_msg_size;
+ socklen_t tolen;
+ unsigned char *msg_start;
ip->ip_id = htons(++ip_id);
ip->ip_hl = 5;
@@ -661,23 +717,6 @@ int pim_msg_send(int fd, pim_addr src, pim_addr dst, uint8_t *pim_msg,
to.sin_family = AF_INET;
to.sin_addr = dst;
tolen = sizeof(to);
-#else
- struct ip6_hdr *ip = (struct ip6_hdr *)buffer;
- struct sockaddr_in6 to = {};
- int sendlen = sizeof(*ip) + pim_msg_size;
-
- ip->ip6_flow = 0;
- ip->ip6_vfc = (6 << 4) | (IPTOS_PREC_INTERNETCONTROL >> 4);
- ip->ip6_plen = htons(pim_msg_size);
- ip->ip6_nxt = PIM_IP_PROTO_PIM;
- ip->ip6_hlim = ttl;
- ip->ip6_src = src;
- ip->ip6_dst = dst;
-
- to.sin6_family = AF_INET6;
- to.sin6_addr = dst;
- tolen = sizeof(to);
-#endif
msg_start = buffer + sizeof(*ip);
memcpy(msg_start, pim_msg, pim_msg_size);
@@ -694,6 +733,17 @@ int pim_msg_send(int fd, pim_addr src, pim_addr dst, uint8_t *pim_msg,
pim_msg_send_frame(fd, (char *)buffer, sendlen, (struct sockaddr *)&to,
tolen, ifp->name);
return 0;
+
+#else
+ struct iovec iovector[2];
+
+ iovector[0].iov_base = pim_msg;
+ iovector[0].iov_len = pim_msg_size;
+
+ pim_msg_send_frame(src, dst, ifp->ifindex, &iovector[0], fd);
+
+ return 0;
+#endif
}
static int hello_send(struct interface *ifp, uint16_t holdtime)
diff --git a/pimd/pim_register.c b/pimd/pim_register.c
index 1945d99d21..8403340d86 100644
--- a/pimd/pim_register.c
+++ b/pimd/pim_register.c
@@ -234,6 +234,48 @@ int pim_register_stop_recv(struct interface *ifp, uint8_t *buf, int buf_size)
return 0;
}
+#if PIM_IPV == 6
+struct in6_addr pim_register_get_unicast_v6_addr(struct pim_interface *p_ifp)
+{
+ struct listnode *node;
+ struct listnode *nextnode;
+ struct pim_secondary_addr *sec_addr;
+ struct pim_interface *pim_ifp;
+ struct interface *ifp;
+ struct pim_instance *pim = p_ifp->pim;
+
+ /* Trying to get the unicast address from the RPF interface first */
+ for (ALL_LIST_ELEMENTS(p_ifp->sec_addr_list, node, nextnode,
+ sec_addr)) {
+ if (!is_ipv6_global_unicast(&sec_addr->addr.u.prefix6))
+ continue;
+
+ return sec_addr->addr.u.prefix6;
+ }
+
+ /* Loop through all the pim interface and try to return a global
+ * unicast ipv6 address
+ */
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ pim_ifp = ifp->info;
+
+ if (!pim_ifp)
+ continue;
+
+ for (ALL_LIST_ELEMENTS(pim_ifp->sec_addr_list, node, nextnode,
+ sec_addr)) {
+ if (!is_ipv6_global_unicast(&sec_addr->addr.u.prefix6))
+ continue;
+
+ return sec_addr->addr.u.prefix6;
+ }
+ }
+
+ zlog_warn("No global address found for use to send register message");
+ return PIMADDR_ANY;
+}
+#endif
+
void pim_register_send(const uint8_t *buf, int buf_size, pim_addr src,
struct pim_rpf *rpg, int null_register,
struct pim_upstream *up)
@@ -278,6 +320,13 @@ void pim_register_send(const uint8_t *buf, int buf_size, pim_addr src,
memcpy(b1, (const unsigned char *)buf, buf_size);
+#if PIM_IPV == 6
+ /* While sending Register message to RP, we cannot use link-local
+ * address therefore using unicast ipv6 address here, choosing it
+ * from the RPF Interface
+ */
+ src = pim_register_get_unicast_v6_addr(pinfo);
+#endif
pim_msg_build_header(src, dst, buffer, buf_size + PIM_MSG_REGISTER_LEN,
PIM_MSG_TYPE_REGISTER, false);
diff --git a/pimd/pim_register.h b/pimd/pim_register.h
index 79c64d995f..ddb34921ae 100644
--- a/pimd/pim_register.h
+++ b/pimd/pim_register.h
@@ -34,7 +34,9 @@ int pim_register_stop_recv(struct interface *ifp, uint8_t *buf, int buf_size);
int pim_register_recv(struct interface *ifp, pim_addr dest_addr,
pim_addr src_addr, uint8_t *tlv_buf, int tlv_buf_size);
-
+#if PIM_IPV == 6
+struct in6_addr pim_register_get_unicast_v6_addr(struct pim_interface *p_ifp);
+#endif
void pim_register_send(const uint8_t *buf, int buf_size, pim_addr src,
struct pim_rpf *rpg, int null_register,
struct pim_upstream *up);
diff --git a/pimd/pim_rpf.c b/pimd/pim_rpf.c
index c470bcd4d7..bd4dd31a2c 100644
--- a/pimd/pim_rpf.c
+++ b/pimd/pim_rpf.c
@@ -54,7 +54,7 @@ void pim_rpf_set_refresh_time(struct pim_instance *pim)
bool pim_nexthop_lookup(struct pim_instance *pim, struct pim_nexthop *nexthop,
pim_addr addr, int neighbor_needed)
{
- struct pim_zlookup_nexthop nexthop_tab[MULTIPATH_NUM];
+ struct pim_zlookup_nexthop nexthop_tab[router->multipath];
struct pim_neighbor *nbr = NULL;
int num_ifindex;
struct interface *ifp = NULL;
@@ -92,9 +92,10 @@ bool pim_nexthop_lookup(struct pim_instance *pim, struct pim_nexthop *nexthop,
}
memset(nexthop_tab, 0,
- sizeof(struct pim_zlookup_nexthop) * MULTIPATH_NUM);
- num_ifindex = zclient_lookup_nexthop(pim, nexthop_tab, MULTIPATH_NUM,
- addr, PIM_NEXTHOP_LOOKUP_MAX);
+ sizeof(struct pim_zlookup_nexthop) * router->multipath);
+ num_ifindex =
+ zclient_lookup_nexthop(pim, nexthop_tab, router->multipath,
+ addr, PIM_NEXTHOP_LOOKUP_MAX);
if (num_ifindex < 1) {
zlog_warn(
"%s %s: could not find nexthop ifindex for address %pPAs",
diff --git a/pimd/pim_sock.c b/pimd/pim_sock.c
index 321775cce3..afc5d47118 100644
--- a/pimd/pim_sock.c
+++ b/pimd/pim_sock.c
@@ -70,18 +70,13 @@ int pim_socket_raw(int protocol)
void pim_socket_ip_hdr(int fd)
{
- const int on = 1;
-
frr_with_privs(&pimd_privs) {
#if PIM_IPV == 4
+ const int on = 1;
+
if (setsockopt(fd, IPPROTO_IP, IP_HDRINCL, &on, sizeof(on)))
zlog_err("%s: Could not turn on IP_HDRINCL option: %m",
__func__);
-#else
- if (setsockopt(fd, IPPROTO_IPV6, IPV6_HDRINCL, &on, sizeof(on)))
- zlog_err(
- "%s: Could not turn on IPV6_HDRINCL option: %m",
- __func__);
#endif
}
}
diff --git a/pimd/pim_upstream.c b/pimd/pim_upstream.c
index 3817d5d9e1..54a7f59ca4 100644
--- a/pimd/pim_upstream.c
+++ b/pimd/pim_upstream.c
@@ -1948,8 +1948,8 @@ void pim_upstream_terminate(struct pim_instance *pim)
struct pim_upstream *up;
while ((up = rb_pim_upstream_first(&pim->upstream_head))) {
- pim_upstream_del(pim, up, __func__);
- pim_upstream_timers_stop(up);
+ if (pim_upstream_del(pim, up, __func__))
+ pim_upstream_timers_stop(up);
}
rb_pim_upstream_fini(&pim->upstream_head);
diff --git a/pimd/pim_zebra.c b/pimd/pim_zebra.c
index 4135ae0408..7f217d9c2e 100644
--- a/pimd/pim_zebra.c
+++ b/pimd/pim_zebra.c
@@ -452,6 +452,7 @@ static void pim_zebra_connected(struct zclient *zclient)
static void pim_zebra_capabilities(struct zclient_capabilities *cap)
{
router->mlag_role = cap->role;
+ router->multipath = cap->ecmp;
}
static zclient_handler *const pim_handlers[] = {
diff --git a/pimd/pim_zlookup.c b/pimd/pim_zlookup.c
index a3978bd0d2..87cf434133 100644
--- a/pimd/pim_zlookup.c
+++ b/pimd/pim_zlookup.c
@@ -211,7 +211,7 @@ static int zclient_read_nexthop(struct pim_instance *pim,
metric = stream_getl(s);
nexthop_num = stream_getc(s);
- if (nexthop_num < 1) {
+ if (nexthop_num < 1 || nexthop_num > router->multipath) {
if (PIM_DEBUG_PIM_NHT_DETAIL)
zlog_debug("%s: socket %d bad nexthop_num=%d", __func__,
zlookup->sock, nexthop_num);
@@ -258,8 +258,9 @@ static int zclient_read_nexthop(struct pim_instance *pim,
nexthop_tab[num_ifindex].ifindex = nh_ifi;
++num_ifindex;
#else
- zlog_warn("cannot use IPv4 nexthop %pI4 for IPv6 %pPA",
- &nh_ip4, &addr);
+ zlog_warn(
+ "cannot use IPv4 nexthop %pI4(%d) for IPv6 %pPA",
+ &nh_ip4, nh_ifi, &addr);
#endif
break;
case NEXTHOP_TYPE_IPV6_IFINDEX:
diff --git a/pimd/pimd.c b/pimd/pimd.c
index 58e874fd11..a3be3f66e1 100644
--- a/pimd/pimd.c
+++ b/pimd/pimd.c
@@ -100,6 +100,7 @@ void pim_router_init(void)
router->debugs = 0;
router->master = frr_init();
router->t_periodic = PIM_DEFAULT_T_PERIODIC;
+ router->multipath = MULTIPATH_NUM;
/*
RFC 4601: 4.6.3. Assert Metrics
diff --git a/pimd/subdir.am b/pimd/subdir.am
index 4dd63f6b88..1424b2a45b 100644
--- a/pimd/subdir.am
+++ b/pimd/subdir.am
@@ -78,7 +78,6 @@ pimd_pimd_SOURCES = \
pimd/pim_msdp_socket.c \
pimd/pim_signals.c \
pimd/pim_zpthread.c \
- pimd/pim_mroute_msg.c \
# end
nodist_pimd_pimd_SOURCES = \
@@ -93,7 +92,6 @@ pimd_pim6d_SOURCES = \
pimd/pim6_mld.c \
pimd/pim6_stubs.c \
pimd/pim6_cmd.c \
- pimd/pim6_mroute_msg.c \
# end
nodist_pimd_pim6d_SOURCES = \
diff --git a/python/tiabwarfo.py b/python/tiabwarfo.py
index 265173e314..4a6cd6ad77 100644
--- a/python/tiabwarfo.py
+++ b/python/tiabwarfo.py
@@ -21,7 +21,6 @@ import os
import subprocess
import re
import argparse
-import subprocess
import json
structs = ['xref', 'xref_logmsg', 'xref_threadsched', 'xref_install_element', 'xrefdata', 'xrefdata_logmsg', 'cmd_element']
diff --git a/tests/topotests/bgp_default_originate/bgp_default_originate_topo1.json b/tests/topotests/bgp_default_originate/bgp_default_originate_topo1.json
new file mode 100644
index 0000000000..5fae34dd76
--- /dev/null
+++ b/tests/topotests/bgp_default_originate/bgp_default_originate_topo1.json
@@ -0,0 +1,294 @@
+{
+ "address_types": [
+ "ipv4",
+ "ipv6"
+ ],
+ "ipv4base": "192.168.0.0",
+ "ipv4mask": 3024,
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {
+ "ipv4": "192.168.0.0",
+ "v4mask": 24,
+ "ipv6": "fd00::",
+ "v6mask": 64
+ },
+ "lo_prefix": {
+ "ipv4": "1.0.",
+ "v4mask": 32,
+ "ipv6": "2001:db8:f::",
+ "v6mask": 128
+ },
+ "routers": {
+ "r0": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r0": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r0": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r1": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r0": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "200",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r0": {
+ "dest_link": {
+ "r1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ },
+ "r2": {
+ "dest_link": {
+ "r1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r0": {
+ "dest_link": {
+ "r1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ },
+ "r2": {
+ "dest_link": {
+ "r1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "300",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {"keepalivetimer": 1,
+ "holddowntimer": 3}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2": {"keepalivetimer": 1,
+ "holddowntimer": 3}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {"keepalivetimer": 1,
+ "holddowntimer": 3}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2": {"keepalivetimer": 1,
+ "holddowntimer": 3}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r4": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "400",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r3": {"keepalivetimer": 1,
+ "holddowntimer": 3}
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3": {"keepalivetimer": 1,
+ "holddowntimer": 3}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r3": {"keepalivetimer": 1,
+ "holddowntimer": 3}
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3": {"keepalivetimer": 1,
+ "holddowntimer": 3}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r4": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r3": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "500",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4": {"keepalivetimer": 1,
+ "holddowntimer": 3}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4": {"keepalivetimer": 1,
+ "holddowntimer": 3}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/tests/topotests/bgp_default_originate/test_bgp_default_originate_topo1_1.py b/tests/topotests/bgp_default_originate/test_bgp_default_originate_topo1_1.py
new file mode 100644
index 0000000000..ee71ae16e0
--- /dev/null
+++ b/tests/topotests/bgp_default_originate/test_bgp_default_originate_topo1_1.py
@@ -0,0 +1,2537 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2022 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc. ("NetDEF")
+# in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+# Shreenidhi A R <rshreenidhi@vmware.com>
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+"""
+Following tests are covered.
+1. Verify BGP default-originate route with IBGP peer
+2. Verify BGP default-originate route with EBGP peer
+3. Verify BGP default route when default-originate configured with route-map over IBGP peer
+4. Verify BGP default route when default-originate configured with route-map over EBGP peer"
+
+"""
+import os
+import sys
+import time
+import pytest
+from time import sleep
+from copy import deepcopy
+from lib.topolog import logger
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+from lib.topojson import build_config_from_json
+from lib.topolog import logger
+
+from lib.bgp import (
+ verify_bgp_convergence,
+ verify_graceful_restart,
+ create_router_bgp,
+ verify_router_id,
+ modify_as_number,
+ verify_as_numbers,
+ clear_bgp_and_verify,
+ clear_bgp,
+ verify_bgp_rib,
+ get_prefix_count_route,
+ get_dut_as_number,
+ verify_rib_default_route,
+ verify_fib_default_route,
+ verify_bgp_advertised_routes_from_neighbor,
+ verify_bgp_received_routes_from_neighbor,
+)
+from lib.common_config import (
+ interface_status,
+ verify_prefix_lists,
+ verify_fib_routes,
+ kill_router_daemons,
+ start_router_daemons,
+ shutdown_bringup_interface,
+ step,
+ required_linux_kernel_version,
+ stop_router,
+ start_router,
+ create_route_maps,
+ create_prefix_lists,
+ get_frr_ipv6_linklocal,
+ start_topology,
+ write_test_header,
+ check_address_types,
+ write_test_footer,
+ reset_config_on_routers,
+ create_static_routes,
+ check_router_status,
+ delete_route_maps,
+)
+
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# Required to instantiate the topology builder class.
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+
+# Global variables
+topo = None
+KEEPALIVETIMER = 1
+HOLDDOWNTIMER = 3
+# Global variables
+NETWORK1_1 = {"ipv4": "1.1.1.1/32", "ipv6": "1::1/128"}
+NETWORK1_2 = {"ipv4": "1.1.1.2/32", "ipv6": "1::2/128"}
+NETWORK2_1 = {"ipv4": "2.1.1.1/32", "ipv6": "2::1/128"}
+NETWORK2_2 = {"ipv4": "2.1.1.2/32", "ipv6": "2::2/128"}
+NETWORK3_1 = {"ipv4": "3.1.1.1/32", "ipv6": "3::1/128"}
+NETWORK3_2 = {"ipv4": "3.1.1.2/32", "ipv6": "3::2/128"}
+NETWORK4_1 = {"ipv4": "4.1.1.1/32", "ipv6": "4::1/128"}
+NETWORK4_2 = {"ipv4": "4.1.1.2/32", "ipv6": "4::2/128"}
+NETWORK5_1 = {"ipv4": "5.1.1.1/32", "ipv6": "5::1/128"}
+NETWORK5_2 = {"ipv4": "5.1.1.2/32", "ipv6": "5::2/128"}
+DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+NEXT_HOP_IP = {"ipv4": "Null0", "ipv6": "Null0"}
+
+IPV4_RM = "RMVIPV4"
+IPV6_RM = "RMVIPV6"
+
+IPV4_RM1 = "RMVIPV41"
+IPV6_RM1 = "RMVIPV61"
+
+IPV4_RM2 = "RMVIPV42"
+IPV6_RM2 = "RMVIPV62"
+
+IPV4_PL_1 = "PV41"
+IPV4_PL_2 = "PV42"
+
+IPV6_PL_1 = "PV61"
+IPV6_PL_2 = "PV62"
+
+
+r1_ipv4_loopback = "1.0.1.0/24"
+r2_ipv4_loopback = "1.0.2.0/24"
+r3_ipv4_loopback = "1.0.3.0/24"
+r4_ipv4_loopback = "1.0.4.0/24"
+r1_ipv6_loopback = "2001:db8:f::1:0/120"
+r2_ipv6_loopback = "2001:db8:f::2:0/120"
+r3_ipv6_loopback = "2001:db8:f::3:0/120"
+r4_ipv6_loopback = "2001:db8:f::4:0/120"
+
+r0_connected_address_ipv4 = "192.168.0.0/24"
+r0_connected_address_ipv6 = "fd00::/64"
+r1_connected_address_ipv4 = "192.168.1.0/24"
+r1_connected_address_ipv6 = "fd00:0:0:1::/64"
+r3_connected_address_ipv4 = "192.168.2.0/24"
+r3_connected_address_ipv6 = "fd00:0:0:2::/64"
+r4_connected_address_ipv4 = "192.168.3.0/24"
+r4_connected_address_ipv6 = "fd00:0:0:3::/64"
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ # Required linux kernel version for this suite to run.
+ result = required_linux_kernel_version("4.15")
+ if result is not True:
+ pytest.skip("Kernel requirements are not met")
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ json_file = "{}/bgp_default_originate_topo1.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
+ # ... and here it calls Mininet initialization functions.
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start daemons and then start routers
+ start_topology(tgen)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ global ADDR_TYPES
+ global BGP_CONVERGENCE
+ global DEFAULT_ROUTES
+ global DEFAULT_ROUTE_NXT_HOP_R1, DEFAULT_ROUTE_NXT_HOP_R3
+ global R0_NETWORK_LOOPBACK, R0_NETWORK_LOOPBACK_NXTHOP, R1_NETWORK_LOOPBACK, R1_NETWORK_LOOPBACK_NXTHOP
+ global R0_NETWORK_CONNECTED, R0_NETWORK_CONNECTED_NXTHOP, R1_NETWORK_CONNECTED, R1_NETWORK_CONNECTED_NXTHOP
+ global R4_NETWORK_LOOPBACK, R4_NETWORK_LOOPBACK_NXTHOP, R3_NETWORK_LOOPBACK, R3_NETWORK_LOOPBACK_NXTHOP
+ global R4_NETWORK_CONNECTED, R4_NETWORK_CONNECTED_NXTHOP, R3_NETWORK_CONNECTED, R3_NETWORK_CONNECTED_NXTHOP
+
+ ADDR_TYPES = check_address_types()
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format(
+ BGP_CONVERGENCE
+ )
+ # There are the global varibles used through out the file these are acheived only after building the topology.
+
+ r0_loopback_address_ipv4 = topo["routers"]["r0"]["links"]["lo"]["ipv4"]
+ r0_loopback_address_ipv4_nxt_hop = topo["routers"]["r0"]["links"]["r1"][
+ "ipv4"
+ ].split("/")[0]
+ r0_loopback_address_ipv6 = topo["routers"]["r0"]["links"]["lo"]["ipv6"]
+ r0_loopback_address_ipv6_nxt_hop = topo["routers"]["r0"]["links"]["r1"][
+ "ipv6"
+ ].split("/")[0]
+
+ r1_loopback_address_ipv4 = topo["routers"]["r1"]["links"]["lo"]["ipv4"]
+ r1_loopback_address_ipv4_nxt_hop = topo["routers"]["r1"]["links"]["r2"][
+ "ipv4"
+ ].split("/")[0]
+ r1_loopback_address_ipv6 = topo["routers"]["r1"]["links"]["lo"]["ipv6"]
+ r1_loopback_address_ipv6_nxt_hop = topo["routers"]["r1"]["links"]["r2"][
+ "ipv6"
+ ].split("/")[0]
+
+ r4_loopback_address_ipv4 = topo["routers"]["r4"]["links"]["lo"]["ipv4"]
+ r4_loopback_address_ipv4_nxt_hop = topo["routers"]["r4"]["links"]["r3"][
+ "ipv4"
+ ].split("/")[0]
+ r4_loopback_address_ipv6 = topo["routers"]["r4"]["links"]["lo"]["ipv6"]
+ r4_loopback_address_ipv6_nxt_hop = topo["routers"]["r4"]["links"]["r3"][
+ "ipv6"
+ ].split("/")[0]
+
+ r3_loopback_address_ipv4 = topo["routers"]["r3"]["links"]["lo"]["ipv4"]
+ r3_loopback_address_ipv4_nxt_hop = topo["routers"]["r3"]["links"]["r2"][
+ "ipv4"
+ ].split("/")[0]
+ r3_loopback_address_ipv6 = topo["routers"]["r3"]["links"]["lo"]["ipv6"]
+ r3_loopback_address_ipv6_nxt_hop = topo["routers"]["r3"]["links"]["r2"][
+ "ipv6"
+ ].split("/")[0]
+
+ R0_NETWORK_LOOPBACK = {
+ "ipv4": r0_loopback_address_ipv4,
+ "ipv6": r0_loopback_address_ipv6,
+ }
+ R0_NETWORK_LOOPBACK_NXTHOP = {
+ "ipv4": r0_loopback_address_ipv4_nxt_hop,
+ "ipv6": r0_loopback_address_ipv6_nxt_hop,
+ }
+
+ R1_NETWORK_LOOPBACK = {
+ "ipv4": r1_loopback_address_ipv4,
+ "ipv6": r1_loopback_address_ipv6,
+ }
+ R1_NETWORK_LOOPBACK_NXTHOP = {
+ "ipv4": r1_loopback_address_ipv4_nxt_hop,
+ "ipv6": r1_loopback_address_ipv6_nxt_hop,
+ }
+
+ R0_NETWORK_CONNECTED = {
+ "ipv4": r0_connected_address_ipv4,
+ "ipv6": r0_connected_address_ipv6,
+ }
+ R0_NETWORK_CONNECTED_NXTHOP = {
+ "ipv4": r0_loopback_address_ipv4_nxt_hop,
+ "ipv6": r0_loopback_address_ipv6_nxt_hop,
+ }
+
+ R1_NETWORK_CONNECTED = {
+ "ipv4": r1_connected_address_ipv4,
+ "ipv6": r1_connected_address_ipv6,
+ }
+ R1_NETWORK_CONNECTED_NXTHOP = {
+ "ipv4": r1_loopback_address_ipv4_nxt_hop,
+ "ipv6": r1_loopback_address_ipv6_nxt_hop,
+ }
+
+ R4_NETWORK_LOOPBACK = {
+ "ipv4": r4_loopback_address_ipv4,
+ "ipv6": r4_loopback_address_ipv6,
+ }
+ R4_NETWORK_LOOPBACK_NXTHOP = {
+ "ipv4": r4_loopback_address_ipv4_nxt_hop,
+ "ipv6": r4_loopback_address_ipv6_nxt_hop,
+ }
+
+ R3_NETWORK_LOOPBACK = {
+ "ipv4": r3_loopback_address_ipv4,
+ "ipv6": r3_loopback_address_ipv6,
+ }
+ R3_NETWORK_LOOPBACK_NXTHOP = {
+ "ipv4": r3_loopback_address_ipv4_nxt_hop,
+ "ipv6": r3_loopback_address_ipv6_nxt_hop,
+ }
+
+ R4_NETWORK_CONNECTED = {
+ "ipv4": r4_connected_address_ipv4,
+ "ipv6": r4_connected_address_ipv6,
+ }
+ R4_NETWORK_CONNECTED_NXTHOP = {
+ "ipv4": r4_loopback_address_ipv4_nxt_hop,
+ "ipv6": r4_loopback_address_ipv6_nxt_hop,
+ }
+
+ R3_NETWORK_CONNECTED = {
+ "ipv4": r3_connected_address_ipv4,
+ "ipv6": r3_connected_address_ipv6,
+ }
+ R3_NETWORK_CONNECTED_NXTHOP = {
+ "ipv4": r3_loopback_address_ipv4_nxt_hop,
+ "ipv6": r3_loopback_address_ipv6_nxt_hop,
+ }
+
+ # populating the nexthop for default routes
+
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+
+ interface = topo["routers"]["r1"]["links"]["r2"]["interface"]
+ ipv6_link_local = get_frr_ipv6_linklocal(tgen, "r1", intf=interface)
+ ipv4_nxt_hop = topo["routers"]["r1"]["links"]["r2"]["ipv4"].split("/")[0]
+ ipv6_nxt_hop = topo["routers"]["r1"]["links"]["r2"]["ipv6"].split("/")[0]
+ DEFAULT_ROUTE_NXT_HOP_R1 = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local}
+
+ interface = topo["routers"]["r3"]["links"]["r2"]["interface"]
+ ipv6_link_local = get_frr_ipv6_linklocal(tgen, "r3", intf=interface)
+ ipv4_nxt_hop = topo["routers"]["r3"]["links"]["r2"]["ipv4"].split("/")[0]
+ ipv6_nxt_hop = topo["routers"]["r3"]["links"]["r2"]["ipv6"].split("/")[0]
+ DEFAULT_ROUTE_NXT_HOP_R3 = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local}
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """Teardown the pytest environment"""
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
+
+#####################################################
+#
+# Testcases
+#
+#####################################################
+
+def test_verify_bgp_default_originate_in_IBGP_p0(request):
+ """
+ Verify BGP default-originate route with IBGP peer
+ """
+ tgen = get_topogen()
+ global BGP_CONVERGENCE
+ global topo
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+ reset_config_on_routers(tgen)
+
+ if BGP_CONVERGENCE != True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ step("Configure IPv4 and IPv6 , IBGP neighbor between R1 and R2")
+ step("Configure IPv4 and IPv6 Loopback interface on R1, R0 and R2")
+ step("Configure IPv4 and IPv6 EBGP neighbor between R0 and R1")
+
+ r0_local_as = topo["routers"]["r0"]["bgp"]["local_as"]
+ r1_local_as = topo["routers"]["r1"]["bgp"]["local_as"]
+ r2_local_as = topo["routers"]["r2"]["bgp"]["local_as"]
+ r3_local_as = topo["routers"]["r3"]["bgp"]["local_as"]
+ r4_local_as = topo["routers"]["r4"]["bgp"]["local_as"]
+
+ input_dict = {
+ "r0": {
+ "bgp": {
+ "local_as": 1000,
+ }
+ },
+ "r1": {
+ "bgp": {
+ "local_as": 2000,
+ }
+ },
+ "r2": {
+ "bgp": {
+ "local_as": 2000,
+ }
+ },
+ "r3": {
+ "bgp": {
+ "local_as": r3_local_as,
+ }
+ },
+ "r4": {
+ "bgp": {
+ "local_as": r4_local_as,
+ }
+ },
+ }
+ result = modify_as_number(tgen, topo, input_dict)
+ try:
+ assert result is True
+ except AssertionError:
+ logger.info("Expected behaviour: {}".format(result))
+ logger.info("BGP config is not created because of invalid ASNs")
+
+ step("After changing the BGP AS Path Verify the BGP Convergence")
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, " Complete Convergence is expected after changing the ASN but failed to converge --> :Failed \n Error: {}".format(
+ BGP_CONVERGENCE
+ )
+
+ step("Configure IPv4 and IPv6 static route on R1 next-hop as NULL0")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, static_routes_input)
+ assert result is True, "Testcase {} : Failed to configure the static routes {} on router R1 \n Error: {}".format(
+ tc_name,static_routes_input, result
+ )
+ step("verify IPv4 and IPv6 static route are configured and up on R1")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ }
+ ]
+ }
+ }
+ result = verify_fib_routes(tgen, addr_type, "r1", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n After configuring the static routes {} , the routes are not found in FIB \n Error: {}".format(
+ tc_name,static_routes_input, result
+ )
+
+ step(
+ "Configure redistribute static and connected on R0 and R1, for IPv4 and IPv6 address family "
+ )
+ redistribute_static = {
+ "r0": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"},
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"},
+ ]
+ }
+ },
+ }
+ }
+ },
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"},
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"},
+ ]
+ }
+ },
+ }
+ }
+ },
+ }
+ result = create_router_bgp(tgen, topo, redistribute_static)
+ assert result is True, "Testcase {} : Failed to configure the redistribute static configuration \n Error: {}".format(tc_name, result)
+
+ step(
+ "After configuring redistribute command , verify static and connected routes ( loopback connected routes) are advertised on R2"
+ )
+
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R0_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R1_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R1_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R1_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R1_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ ]
+ }
+ }
+ result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : After redistributing static routes the routes {} expected in FIB but NOT FOUND ......! \n Error: {}".format(
+ tc_name, static_routes_input,result
+ )
+
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : After redistributing static routes the routes {} expected in RIB but NOT FOUND ......! \n Error: {}".format(
+ tc_name, static_routes_input , result
+ )
+
+ step(
+ "Taking the snapshot of the prefix count before configuring the default originate"
+ )
+ snapshot1 = get_prefix_count_route(tgen, topo, dut="r2", peer="r1")
+
+ step(
+ "Configure Default originate on R1 for R1 to R2, for IPv4 and IPv6 BGP address family "
+ )
+ local_as = get_dut_as_number(tgen, dut="r1")
+ default_originate_config = {
+ "r1": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {"unicast": {"default_originate": {"r2": {}}}},
+ "ipv6": {"unicast": {"default_originate": {"r2": {}}}},
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed Configuring default originate configuration. \n Error: {}".format(tc_name, result)
+
+ step(
+ "After configuring default-originate command , verify default routes are advertised on R2 "
+ " R1 static and loopback routes received on R2 BGP and FIB"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [R1_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R1_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R1_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R1_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ ]
+ }
+ }
+
+ result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : post configuring the BGP Default originate configuration static and connected routes should not be effected but impacted on FIB .......! FAILED \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failedpost configuring the BGP Default originate configuration static and connected routes should not be effected but impacted on RIB......! FAILED \n Error: {}".format(
+ tc_name, result
+ )
+ step(
+ "Verify default route for IPv4 and IPv6 present with path=igp metric =0 , local-preference= 100 "
+ )
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+ metric=0,
+ locPrf=100,
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+ step(
+ "Taking the snapshot2 of the prefix count after configuring the default originate"
+ )
+ snapshot2 = get_prefix_count_route(tgen, topo, dut="r2", peer="r1")
+
+ step("verifying the prefix count incrementing or not ")
+ isIPv4prefix_incremented = False
+ isIPv6prefix_incremented = False
+ if snapshot1["ipv4_count"] < snapshot2["ipv4_count"]:
+ isIPv4prefix_incremented = True
+ if snapshot1["ipv6_count"] < snapshot2["ipv6_count"]:
+ isIPv6prefix_incremented = True
+
+ assert (
+ isIPv4prefix_incremented is True
+ ), "Testcase {} : Failed Error: IPV4 Prefix is not incremented on receiveing ".format(
+ tc_name
+ )
+
+ assert (
+ isIPv6prefix_incremented is True
+ ), "Testcase {} : Failed Error: IPV6 Prefix is not incremented on receiveing ".format(
+ tc_name
+ )
+ write_test_footer(tc_name)
+
+
+def test_verify_bgp_default_originate_in_EBGP_p0(request):
+ """
+ Verify BGP default-originate route with EBGP peer
+ """
+ tgen = get_topogen()
+ global BGP_CONVERGENCE
+ global topo
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+ reset_config_on_routers(tgen)
+
+ if BGP_CONVERGENCE != True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ step("Configure IPv4 and IPv6 , EBGP neighbor between R3 and R2")
+ step("Configure lPv4 and IPv6 Loopback interface on R3, R4 and R2")
+ step("Configure IPv4 and IPv6 IBGP neighbor between R4 and R3")
+ r0_local_as = topo["routers"]["r0"]["bgp"]["local_as"]
+ r1_local_as = topo["routers"]["r1"]["bgp"]["local_as"]
+ r2_local_as = topo["routers"]["r2"]["bgp"]["local_as"]
+ r3_local_as = topo["routers"]["r3"]["bgp"]["local_as"]
+ r4_local_as = topo["routers"]["r4"]["bgp"]["local_as"]
+
+ input_dict = {
+ "r0": {
+ "bgp": {
+ "local_as": r0_local_as,
+ }
+ },
+ "r1": {
+ "bgp": {
+ "local_as": r1_local_as,
+ }
+ },
+ "r2": {
+ "bgp": {
+ "local_as": r2_local_as,
+ }
+ },
+ "r3": {
+ "bgp": {
+ "local_as": 4000,
+ }
+ },
+ "r4": {
+ "bgp": {
+ "local_as": 4000,
+ }
+ },
+ }
+ result = modify_as_number(tgen, topo, input_dict)
+ try:
+ assert result is True
+ except AssertionError:
+ logger.info("Expected behaviour: {}".format(result))
+ logger.info("BGP config is not created because of invalid ASNs")
+ step("After changing the BGP AS Path Verify the BGP Convergence")
+
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "Complete convergence is expeceted after changing the ASN os the routes ..! :Failed \n Error: {}".format(
+ BGP_CONVERGENCE
+ )
+
+ step(" Configure IPv4 and IPv6 static route on R3 next-hop on R4 interface")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, static_routes_input)
+ assert result is True, "Testcase {} : Failed to configure the static routes ....! Failed \n Error: {}".format(
+ tc_name, result
+ )
+ step("verify IPv4 and IPv6 static route are configured and up on R1")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ }
+ ]
+ }
+ }
+ result = verify_fib_routes(tgen, addr_type, "r3", static_routes_input)
+ assert result is True, "Testcase {} : Route is not found in {} in FIB ......! Failed \n Error: {}".format(
+ tc_name, static_routes_input,result
+ )
+
+ step(
+ "Configure redistribute static and connected on R3 and R4 for IPv4 and IPv6 address family "
+ )
+ redistribute_static = {
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"},
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"},
+ ]
+ }
+ },
+ }
+ }
+ },
+ "r4": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"},
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"},
+ ]
+ }
+ },
+ }
+ }
+ },
+ }
+ result = create_router_bgp(tgen, topo, redistribute_static)
+ assert result is True, "Testcase {} : Failed to configure redistribute configuratin \n Error: {}".format(tc_name, result)
+
+ step(
+ "After configuring redistribute command , verify static and connected routes ( loopback connected routes) are advertised on R2"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [R3_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R3_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R3_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R3_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R4_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ ]
+ }
+ }
+ result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : static & and connected routes are expected but not found in FIB .... ! \n Error: {}".format(
+ tc_name, result
+ )
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : static & and connected routes are expected but not found in RIB .... ! \n Error: {}".format(
+ tc_name, result
+ )
+ snapshot1 = get_prefix_count_route(tgen, topo, dut="r2", peer="r3")
+ step(
+ "Configure Default originate on R3 for R3 to R2, on IPv4 and IPv6 BGP address family"
+ )
+ local_as = get_dut_as_number(tgen, dut="r3")
+ default_originate_config = {
+ "r3": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {"unicast": {"default_originate": {"r2": {}}}},
+ "ipv6": {"unicast": {"default_originate": {"r2": {}}}},
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed to configure the default originate configuration \n Error: {}".format(tc_name, result)
+
+ step(
+ "After configuring default-originate command , verify default routes are advertised on R2 on both BGP RIB and FIB"
+ )
+
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ },
+ ]
+ }
+ }
+
+ result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : static route from R1 {} and default route from R3 is expected in R2 FIB .....! NOT FOUND \n Error: {}".format(
+ tc_name, NETWORK1_1,result
+ )
+
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : static route from R1 {} and default route from R3 is expected in R2 RIB .....! NOT FOUND \n Error: {}".format(
+ tc_name,NETWORK1_1, result
+ )
+
+ step(
+ "Verify default route for IPv4 and IPv6 present with path = ebgp as path, metric =0 "
+ )
+ # local preference will bgp not applicable for eBGP
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+ metric=0,
+ expected_aspath="4000",
+ )
+ assert result is True, "Testcase {} : Default route from R3 is expected with attributes in R2 RIB .....! NOT FOUND Error: {}".format(tc_name, result)
+
+ step(
+ "Taking the snapshot2 of the prefix count after configuring the default originate"
+ )
+ snapshot2 = get_prefix_count_route(tgen, topo, dut="r2", peer="r3")
+ step(
+ "Verify out-prefix count is incremented default route on IPv4 and IPv6 neighbor"
+ )
+ isIPv4prefix_incremented = False
+ isIPv6prefix_incremented = False
+ if snapshot1["ipv4_count"] < snapshot2["ipv4_count"]:
+ isIPv4prefix_incremented = True
+ if snapshot1["ipv6_count"] < snapshot2["ipv6_count"]:
+ isIPv6prefix_incremented = True
+
+ assert (
+ isIPv4prefix_incremented is True
+ ), "Testcase {} : Failed Error: IPV4 Prefix is not incremented on receiveing ".format(
+ tc_name
+ )
+
+ assert (
+ isIPv6prefix_incremented is True
+ ), "Testcase {} : Failed Error: IPV6 Prefix is not incremented on receiveing ".format(
+ tc_name
+ )
+ write_test_footer(tc_name)
+
+
+def test_verify_bgp_default_originate_in_IBGP_with_route_map_p0(request):
+ """
+ test_verify_bgp_default_originate_in_IBGP_with_route_map_p0
+ """
+ tgen = get_topogen()
+ global BGP_CONVERGENCE
+ global topo
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+ reset_config_on_routers(tgen)
+
+ if BGP_CONVERGENCE != True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ step("Configure IPv4 and IPv6 , IBGP neighbor between R1 and R2")
+ step("Configure IPv4 and IPv6 , EBGP neighbor between R1 and R0")
+ r0_local_as = topo["routers"]["r0"]["bgp"]["local_as"]
+ r1_local_as = topo["routers"]["r1"]["bgp"]["local_as"]
+ r2_local_as = topo["routers"]["r2"]["bgp"]["local_as"]
+ r3_local_as = topo["routers"]["r3"]["bgp"]["local_as"]
+ r4_local_as = topo["routers"]["r4"]["bgp"]["local_as"]
+
+ input_dict = {
+ "r0": {
+ "bgp": {
+ "local_as": r0_local_as,
+ }
+ },
+ "r1": {
+ "bgp": {
+ "local_as": 1000,
+ }
+ },
+ "r2": {
+ "bgp": {
+ "local_as": 1000,
+ }
+ },
+ "r3": {
+ "bgp": {
+ "local_as": r3_local_as,
+ }
+ },
+ "r4": {
+ "bgp": {
+ "local_as": r4_local_as,
+ }
+ },
+ }
+ result = modify_as_number(tgen, topo, input_dict)
+ try:
+ assert result is True
+ except AssertionError:
+ logger.info("Expected behaviour: {}".format(result))
+ logger.info("BGP config is not created because of invalid ASNs")
+
+ step("After changing the BGP AS Path Verify the BGP Convergence")
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "Complete convergence is expected after changing ASN ....! ERROR :Failed \n Error: {}".format(
+ BGP_CONVERGENCE
+ )
+
+ step("Configure 2 IPv4 and 2 IPv6 Static route on R0 with next-hop as Null0")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ ]
+ }
+ }
+ result = create_static_routes(tgen, static_routes_input)
+ assert result is True, "Testcase {} : Static Configuration is Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("verify IPv4 and IPv6 static route are configured and up on R0")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ ]
+ }
+ }
+ result = verify_fib_routes(tgen, addr_type, "r0", static_routes_input)
+ assert result is True, "Testcase {} : routes {} unable is not found in R0 FIB \n Error: {}".format(
+ tc_name, static_routes_input,result
+ )
+
+ step(
+ "Configure redistribute static on IPv4 and IPv6 address family on R0 for R0 to R1 neighbor "
+ )
+ redistribute_static = {
+ "r0": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
+ "ipv6": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, redistribute_static)
+ assert result is True, "Testcase {} : Failed to configure redistribute static configuration....! \n Error: {}".format(tc_name, result)
+
+ step("verify IPv4 and IPv6 static route are configured and up on R1")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ ]
+ }
+ }
+ result = verify_fib_routes(tgen, addr_type, "r1", static_routes_input)
+ assert result is True, "Testcase {} : Failed... Routes {} expected in r0 FIB after configuring the redistribute config \n Error: {}".format(
+ tc_name,static_routes_input, result
+ )
+
+ result = verify_bgp_rib(tgen, addr_type, "r1", static_routes_input)
+ assert result is True, "Testcase {} : Failed... Routes {} expected in r0 RIB after configuring the redistribute config \n Error: {}".format(
+ tc_name, static_routes_input,result
+ )
+
+ step(
+ "Configure IPv4 prefix-list Pv4 and and IPv6 prefix-list Pv6 on R1 to match BGP route Sv41, Sv42, IPv6 route Sv61 Sv62 permit "
+ )
+ input_dict_3 = {
+ "r1": {
+ "prefix_lists": {
+ "ipv4": {
+ "Pv4": [
+ {
+ "seqid": "1",
+ "network": NETWORK1_1["ipv4"],
+ "action": "permit",
+ },
+ {
+ "seqid": "2",
+ "network": NETWORK2_1["ipv4"],
+ "action": "permit",
+ },
+ ]
+ },
+ "ipv6": {
+ "Pv6": [
+ {
+ "seqid": "1",
+ "network": NETWORK1_1["ipv6"],
+ "action": "permit",
+ },
+ {
+ "seqid": "2",
+ "network": NETWORK2_1["ipv6"],
+ "action": "permit",
+ },
+ ]
+ },
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed to configure the prefix list \n Error: {}".format(tc_name, result)
+
+ step(
+ "Configure IPV4 and IPv6 route-map (RMv4 and RMv6 ) matching prefix-list (Pv4 and Pv6) respectively on R1"
+ )
+ input_dict_3 = {
+ "r1": {
+ "route_maps": {
+ "RMv4": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "match": {"ipv4": {"prefix_lists": "Pv4"}},
+ },
+ ],
+ "RMv6": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "match": {"ipv6": {"prefix_lists": "Pv6"}},
+ },
+ ],
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed to configure the route map \n Error: {}".format(tc_name, result)
+
+ step(
+ "Configure default-originate with route-map (RMv4 and RMv6) on R1, on BGP IPv4 and IPv6 address family "
+ )
+ local_as = get_dut_as_number(tgen, dut="r1")
+ default_originate_config = {
+ "r1": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {
+ "unicast": {"default_originate": {"r2": {"route_map": "RMv4"}}}
+ },
+ "ipv6": {
+ "unicast": {"default_originate": {"r2": {"route_map": "RMv6"}}}
+ },
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed to configure the default originate \n Error: {}".format(tc_name, result)
+
+ step("Verify the default route is received in BGP RIB and FIB")
+ step(
+ "After configuring default-originate command , verify default routes are advertised on R2 "
+ )
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ }
+ ]
+ }
+ }
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed...! Expected default route from R1 not found in FIB \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed...! Expected default route from R1 not found in RIB \n Error: {}".format(
+ tc_name, result
+ )
+ step("Remove route-map RMv4 and RMv6 from default-originate command in R1")
+ NOTE = """ Configuring the default-originate should remove the previously applied default originate with condtional route-map"""
+ local_as = get_dut_as_number(tgen, dut="r1")
+ default_originate_config = {
+ "r1": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {"unicast": {"default_originate": {"r2": {}}}},
+ "ipv6": {"unicast": {"default_originate": {"r2": {}}}},
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed to remove the default originate conditional route-map \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify BGP RIB and FIB After removing route-map , default route still present on R2"
+ )
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed Default route from R1 is not found in FIB \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed Default route from R1 is not found in RIB \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Configure default-originate with route-map (RMv4 and RMv6) on R1 ")
+ local_as = get_dut_as_number(tgen, dut="r1")
+ default_originate_config = {
+ "r1": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "default_originate": {
+ "r2": {
+ "route_map": "RMv4",
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "default_originate": {
+ "r2": {
+ "route_map": "RMv6",
+ }
+ }
+ }
+ },
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed to configure the Default originate route-map \n Error: {}".format(tc_name, result)
+
+ step(
+ "After configuring default-originate command , verify default routes are advertised on R2 "
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed Default Route from R1 is not found in FIB \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed Default Route from R1 is not found in RIB \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Delete prefix list using no prefix-list")
+ input_dict_3 = {
+ "r1": {
+ "prefix_lists": {
+ "ipv4": {
+ "Pv4": [
+ {
+ "seqid": "1",
+ "network": NETWORK1_1["ipv4"],
+ "action": "permit",
+ "delete": True,
+ },
+ {
+ "seqid": "2",
+ "network": NETWORK2_1["ipv4"],
+ "action": "permit",
+ "delete": True,
+ },
+ ]
+ },
+ "ipv6": {
+ "Pv6": [
+ {
+ "seqid": "1",
+ "network": NETWORK1_1["ipv6"],
+ "action": "permit",
+ "delete": True,
+ },
+ {
+ "seqid": "2",
+ "network": NETWORK2_1["ipv6"],
+ "action": "permit",
+ "delete": True,
+ },
+ ]
+ },
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed to delete the prefix list Error: {}".format(tc_name, result)
+
+ step(
+ "Verify BGP RIB and FIB After deleting prefix-list , verify IPv4 and IPv6 default route got removed from DUT "
+ )
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ }
+ ]
+ }
+ }
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed\n After deleteing prefix default route is not expected in FIB \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n After deleteing prefix default route is not expected in RIB \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Configure prefix-list and delete route-map using no route-map")
+ input_dict_3 = {
+ "r1": {
+ "prefix_lists": {
+ "ipv4": {
+ "Pv4": [
+ {
+ "seqid": "1",
+ "network": NETWORK1_1["ipv4"],
+ "action": "permit",
+ },
+ {
+ "seqid": "2",
+ "network": NETWORK2_1["ipv4"],
+ "action": "permit",
+ },
+ ]
+ },
+ "ipv6": {
+ "Pv6": [
+ {
+ "seqid": "1",
+ "network": NETWORK1_1["ipv6"],
+ "action": "permit",
+ },
+ {
+ "seqid": "2",
+ "network": NETWORK2_1["ipv6"],
+ "action": "permit",
+ },
+ ]
+ },
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed to configure the prefix lists Error: {}".format(tc_name, result)
+
+ step(
+ "After configuring the Prefixlist cross checking the BGP Default route is configured again , before deleting the route map"
+ )
+
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ }
+ ]
+ }
+ }
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ expected=True,
+ )
+ assert result is True, "Testcase {} : Failed Default route from R1 is expected in FIB but not found \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ expected=True,
+ )
+ assert result is True, "Testcase {} : Failed Default route from R1 is expected in RIB but not found \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Deleting the routemap")
+ input_dict = {"r1": {"route_maps": ["RMv4", "RMv6"]}}
+ result = delete_route_maps(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed to delete the Route-map \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify BGP RIB and FIB ,After deleting route-map , verify IPv4 and IPv6 default route got removed from DUT"
+ )
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n After deleteing route-map default route is not expected in FIB \nError: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n After deleteing route-map default route is not expected in RIB \n Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_verify_bgp_default_originate_in_EBGP_with_route_map_p0(request):
+ """
+ test_verify_bgp_default_originate_in_EBGP_with_route_map_p0
+ """
+ tgen = get_topogen()
+ global BGP_CONVERGENCE
+ global topo
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+ reset_config_on_routers(tgen)
+
+ if BGP_CONVERGENCE != True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ step("Configure IPv4 and IPv6 , EBGP neighbor between R3 and R2")
+ step("Configure IPv4 and IPv6 IBGP neighbor between R3 and R4")
+ r0_local_as = topo["routers"]["r0"]["bgp"]["local_as"]
+ r1_local_as = topo["routers"]["r1"]["bgp"]["local_as"]
+ r2_local_as = topo["routers"]["r2"]["bgp"]["local_as"]
+ r3_local_as = topo["routers"]["r3"]["bgp"]["local_as"]
+ r4_local_as = topo["routers"]["r4"]["bgp"]["local_as"]
+
+ input_dict = {
+ "r0": {
+ "bgp": {
+ "local_as": r0_local_as,
+ }
+ },
+ "r1": {
+ "bgp": {
+ "local_as": r1_local_as,
+ }
+ },
+ "r2": {
+ "bgp": {
+ "local_as": r2_local_as,
+ }
+ },
+ "r3": {
+ "bgp": {
+ "local_as": 4000,
+ }
+ },
+ "r4": {
+ "bgp": {
+ "local_as": 4000,
+ }
+ },
+ }
+ result = modify_as_number(tgen, topo, input_dict)
+ try:
+ assert result is True
+ except AssertionError:
+ logger.info("Expected behaviour: {}".format(result))
+ logger.info("BGP config is not created because of invalid ASNs")
+ step("After changing the BGP AS Path Verify the BGP Convergence")
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format(
+ BGP_CONVERGENCE
+ )
+
+ step(
+ "Configure 2 IPv4 and 2 IPv6, Static route on R4 with next-hop as Null0 IPv4 route Sv41, Sv42, IPv6 route Sv61 Sv62"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r4": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ ]
+ }
+ }
+ result = create_static_routes(tgen, static_routes_input)
+ assert result is True, "Testcase {} : Failed to configure the static routes \n Error: {}".format(
+ tc_name, result
+ )
+ step("verify IPv4 and IPv6 static route are configured and up on R4")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r4": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ ]
+ }
+ }
+ result = verify_fib_routes(tgen, addr_type, "r4", static_routes_input)
+ assert result is True, "Testcase {} : Failed Static route {} is not found in R4 FIB \n Error: {}".format(
+ tc_name, static_routes_input,result
+ )
+
+ step(
+ "Configure redistribute static on IPv4 and IPv6 address family on R4 for R4 to R3 neighbo"
+ )
+ redistribute_static = {
+ "r4": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
+ "ipv6": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, redistribute_static)
+ assert result is True, "Testcase {} : Failed to configure the redistribute static \n Error: {}".format(tc_name, result)
+
+ step("verify IPv4 and IPv6 static route are configured and up on R3")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ ]
+ }
+ }
+ result = verify_fib_routes(tgen, addr_type, "r3", static_routes_input)
+ assert result is True, "Testcase {} : Failed static routes from R1 and R3 is not found in FIB \n Error: {}".format(
+ tc_name, result
+ )
+ result = verify_bgp_rib(tgen, addr_type, "r3", static_routes_input)
+ assert result is True, "Testcase {} : Failed static routes from R1 and R3 is not found in RIB \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Configure IPv4 prefix-list Pv4 and and IPv6 prefix-list Pv6 on R3 so new route which is not present on R3"
+ )
+ input_dict_3 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "Pv4": [
+ {
+ "seqid": "1",
+ "network": NETWORK3_1["ipv4"],
+ "action": "permit",
+ }
+ ]
+ },
+ "ipv6": {
+ "Pv6": [
+ {
+ "seqid": "1",
+ "network": NETWORK3_1["ipv6"],
+ "action": "permit",
+ }
+ ]
+ },
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed to configure the prefix lists \n Error: {}".format(tc_name, result)
+
+ step("verify IPv4 and IPv6 Prefix list got configured on R3")
+ input_dict = {"r3": {"prefix_lists": ["Pv4", "Pv6"]}}
+ result = verify_prefix_lists(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed ..! configured prefix lists {} are not found \n Error: {}".format(tc_name,input_dict, result)
+
+ step(
+ "Configure IPv4 and IPv6 route-map ( RMv4 and RMv6 ) matching prefix-list (Pv4 and Pv6 ) respectively on R3"
+ )
+ input_dict_3 = {
+ "r3": {
+ "route_maps": {
+ "RMv4": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "match": {"ipv4": {"prefix_lists": "Pv4"}},
+ },
+ ],
+ "RMv6": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "match": {"ipv6": {"prefix_lists": "Pv6"}},
+ },
+ ],
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed to configure the route-map \n Error: {}".format(tc_name, result)
+ step(
+ "Taking the snapshot of the prefix count before configuring the default originate"
+ )
+ snapshot1 = get_prefix_count_route(tgen, topo, dut="r2", peer="r3")
+ step(
+ "Configure default-originate with IPv4 and IPv6 route-map (RMv4 and RMv6) on R3"
+ )
+ local_as = get_dut_as_number(tgen, dut="r3")
+ default_originate_config = {
+ "r3": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {
+ "unicast": {"default_originate": {"r2": {"route_map": "RMv4"}}}
+ },
+ "ipv6": {
+ "unicast": {"default_originate": {"r2": {"route_map": "RMv6"}}}
+ },
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed to configure default-originate \n Error: {}".format(tc_name, result)
+
+ step("Verify the default route is NOT received in BGP RIB and FIB on R2 ")
+ step(
+ "After configuring default-originate command , verify default routes are not Received on R2 "
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Default route is not expected due to deny in prefix list \nError: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \nDefault route is not expected due to deny in prefix list\n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Add route Sv41, Sv42, IPv6 route Sv61 Sv62 on prefix list Pv4 and Pv6")
+ input_dict_3 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "Pv4": [
+ {
+ "seqid": "1",
+ "network": NETWORK1_1["ipv4"],
+ "action": "permit",
+ },
+ {
+ "seqid": "2",
+ "network": NETWORK2_1["ipv4"],
+ "action": "permit",
+ },
+ ]
+ },
+ "ipv6": {
+ "Pv6": [
+ {
+ "seqid": "1",
+ "network": NETWORK1_1["ipv6"],
+ "action": "permit",
+ },
+ {
+ "seqid": "2",
+ "network": NETWORK2_1["ipv6"],
+ "action": "permit",
+ },
+ ]
+ },
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed to configure the prefix lists Error: {}".format(tc_name, result)
+
+ step("Verify BGP default route for IPv4 and IPv6 is received on R2")
+
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed Default routes are expected in R2 FIB from R3 but not found ....! \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed Default routes are expected in R2 RIB from R3 but not found ....! \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Remove route Sv41, Sv42, IPv6 route Sv61 Sv62 on prefix list Pv4 and Pv6")
+ input_dict_3 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "Pv4": [
+ {
+ "seqid": "1",
+ "network": NETWORK1_1["ipv4"],
+ "action": "permit",
+ "delete": True,
+ },
+ {
+ "seqid": "2",
+ "network": NETWORK2_1["ipv4"],
+ "action": "permit",
+ "delete": True,
+ },
+ ]
+ },
+ "ipv6": {
+ "Pv6": [
+ {
+ "seqid": "1",
+ "network": NETWORK1_1["ipv6"],
+ "action": "permit",
+ "delete": True,
+ },
+ {
+ "seqid": "2",
+ "network": NETWORK2_1["ipv6"],
+ "action": "permit",
+ "delete": True,
+ },
+ ]
+ },
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed to remove prefix-lists from R3 Error: {}".format(tc_name, result)
+
+ step(
+ "After Removing route BGP default route for IPv4 and IPv6 is NOT received on R2"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n After Removing route in prefix list the default route is not expected in FIB \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n After Removing route in prefix list the default route is not expected in RIB\n Error: {}".format(
+ tc_name, result
+ )
+
+ step(" Add route Sv41, Sv42, IPv6 route Sv61 Sv62 on prefix list Pv4 and Pv6")
+ input_dict_3 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "Pv4": [
+ {
+ "seqid": "1",
+ "network": NETWORK1_1["ipv4"],
+ "action": "permit",
+ },
+ {
+ "seqid": "2",
+ "network": NETWORK2_1["ipv4"],
+ "action": "permit",
+ },
+ ]
+ },
+ "ipv6": {
+ "Pv6": [
+ {
+ "seqid": "1",
+ "network": NETWORK1_1["ipv6"],
+ "action": "permit",
+ },
+ {
+ "seqid": "2",
+ "network": NETWORK2_1["ipv6"],
+ "action": "permit",
+ },
+ ]
+ },
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Verify BGP default route for IPv4 and IPv6 is received on R2")
+
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Change IPv4 and IPv6 prefix-list permit and deny ")
+ input_dict_3 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "Pv4": [
+ {"seqid": "1", "network": NETWORK1_1["ipv4"], "action": "deny"},
+ {"seqid": "2", "network": NETWORK2_1["ipv4"], "action": "deny"},
+ ]
+ },
+ "ipv6": {
+ "Pv6": [
+ {"seqid": "1", "network": NETWORK1_1["ipv6"], "action": "deny"},
+ {"seqid": "2", "network": NETWORK2_1["ipv6"], "action": "deny"},
+ ]
+ },
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Verify BGP default route for IPv4 and IPv6 is not received on R2")
+
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n after denying the prefix list default route is not expected in FIB \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n after denying the prefix list default route is not expected in RIB \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Change IPv4 and IPv6 prefix-list deny to permit ")
+ input_dict_3 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "Pv4": [
+ {
+ "seqid": "1",
+ "network": NETWORK1_1["ipv4"],
+ "action": "permit",
+ },
+ {
+ "seqid": "2",
+ "network": NETWORK2_1["ipv4"],
+ "action": "permit",
+ },
+ ]
+ },
+ "ipv6": {
+ "Pv6": [
+ {
+ "seqid": "1",
+ "network": NETWORK1_1["ipv6"],
+ "action": "permit",
+ },
+ {
+ "seqid": "2",
+ "network": NETWORK2_1["ipv6"],
+ "action": "permit",
+ },
+ ]
+ },
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Verify BGP default route for IPv4 and IPv6 is received on R2")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Taking the snapshot2 of the prefix count after configuring the default originate"
+ )
+ snapshot2 = get_prefix_count_route(tgen, topo, dut="r2", peer="r3")
+
+ step("verifying the prefix count incrementing or not ")
+ isIPv4prefix_incremented = False
+ isIPv6prefix_incremented = False
+ if snapshot1["ipv4_count"] < snapshot2["ipv4_count"]:
+ isIPv4prefix_incremented = True
+ if snapshot1["ipv6_count"] < snapshot2["ipv6_count"]:
+ isIPv6prefix_incremented = True
+
+ assert (
+ isIPv4prefix_incremented is True
+ ), "Testcase {} : Failed Error: IPV4 Prefix is not incremented on receiveing ".format(
+ tc_name
+ )
+
+ assert (
+ isIPv6prefix_incremented is True
+ ), "Testcase {} : Failed Error: IPV6 Prefix is not incremented on receiveing ".format(
+ tc_name
+ )
+
+ step(
+ "Configure another IPv4 and IPv6 route-map and match same prefix-list (Sv41, Sv42, IPv6 route Sv61 Sv62) with deny statement "
+ )
+ input_dict_3 = {
+ "r3": {
+ "route_maps": {
+ "RMv41": [
+ {
+ "action": "deny",
+ "seq_id": "1",
+ "match": {"ipv4": {"prefix_lists": "Pv4"}},
+ },
+ ],
+ "RMv61": [
+ {
+ "action": "deny",
+ "seq_id": "1",
+ "match": {"ipv6": {"prefix_lists": "Pv6"}},
+ },
+ ],
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Attach route-map on IPv4 and IP6 BGP neighbor on fly")
+ local_as = get_dut_as_number(tgen, dut="r3")
+ default_originate_config = {
+ "r3": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {
+ "unicast": {"default_originate": {"r2": {"route_map": "RMv41"}}}
+ },
+ "ipv6": {
+ "unicast": {"default_originate": {"r2": {"route_map": "RMv61"}}}
+ },
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "After attaching route-map verify IPv4 and IPv6 default route is withdrawn from the R2"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ expected=False,
+ )
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ expected=False,
+ )
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Change the recently added Routemap from deny to permit")
+ input_dict_3 = {
+ "r3": {
+ "route_maps": {
+ "RMv41": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "match": {"ipv4": {"prefix_lists": "Pv4"}},
+ },
+ ],
+ "RMv61": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "match": {"ipv6": {"prefix_lists": "Pv6"}},
+ },
+ ],
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify IPv4 and IPv6 default route is advertised from the R2")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Delete default-originate route-map command while configuring ( neighbor x.x.x default-originate) for IPv4 and IPv6 BGP neighbor "
+ )
+ """ Configuring the Default originate on neighbor must remove the previously assigned deault-originate with routemap config """
+ local_as = get_dut_as_number(tgen, dut="r3")
+ default_originate_config = {
+ "r3": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {"unicast": {"default_originate": {"r2": {}}}},
+ "ipv6": {"unicast": {"default_originate": {"r2": {}}}},
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify in running config from BGP that default-originate with route-map command is removed and default-originate command is still present and default route for IPv4 and IPv6 present in RIB and FIB"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Configure default-originate with conditional route-map command on IPv4 and IPv6 address family "
+ )
+ local_as = get_dut_as_number(tgen, dut="r3")
+ default_originate_config = {
+ "r3": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {
+ "unicast": {"default_originate": {"r2": {"route_map": "RMv41"}}}
+ },
+ "ipv6": {
+ "unicast": {"default_originate": {"r2": {"route_map": "RMv61"}}}
+ },
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify in running config from BGP that default-originate with route-map command is present and default route for IPv4 and IPv6 present"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Delete default originate with 'no bgp default-originate' from IPV4 and IPV6 address family "
+ )
+ local_as = get_dut_as_number(tgen, dut="r3")
+ default_originate_config = {
+ "r3": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {
+ "unicast": {"default_originate": {"r2": {"delete": True}}}
+ },
+ "ipv6": {
+ "unicast": {"default_originate": {"r2": {"delete": True}}}
+ },
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ " Verify in running config from BGP that default-originate complete CLI is removed for IPV4 and IPV6 address family and default originate routes got deleted"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Default Route is not expected in FIB \nError: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Default Route is not expected in RIB\nError: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_default_originate/test_bgp_default_originate_topo1_2.py b/tests/topotests/bgp_default_originate/test_bgp_default_originate_topo1_2.py
new file mode 100644
index 0000000000..a9987a8f96
--- /dev/null
+++ b/tests/topotests/bgp_default_originate/test_bgp_default_originate_topo1_2.py
@@ -0,0 +1,2437 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2022 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc. ("NetDEF")
+# in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+# Shreenidhi A R <rshreenidhi@vmware.com>
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+"""
+Following tests are covered.
+5. Verify BGP default originate route-map with OUT route-map
+6. Verify BGP default originate route-map with IN route-map
+8. Verify BGP default route after removing default-originate
+9. Verify default-originate route with GR
+"""
+import os
+import sys
+import time
+import pytest
+from time import sleep
+from copy import deepcopy
+from lib.topolog import logger
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+from lib.topojson import build_config_from_json
+from lib.topolog import logger
+
+from lib.bgp import (
+ verify_bgp_convergence,
+ verify_graceful_restart,
+ create_router_bgp,
+ verify_router_id,
+ modify_as_number,
+ verify_as_numbers,
+ clear_bgp_and_verify,
+ clear_bgp,
+ verify_bgp_rib,
+ get_prefix_count_route,
+ get_dut_as_number,
+ verify_rib_default_route,
+ verify_fib_default_route,
+ verify_bgp_advertised_routes_from_neighbor,
+ verify_bgp_received_routes_from_neighbor,
+)
+from lib.common_config import (
+ interface_status,
+ verify_prefix_lists,
+ verify_fib_routes,
+ kill_router_daemons,
+ start_router_daemons,
+ shutdown_bringup_interface,
+ step,
+ required_linux_kernel_version,
+ stop_router,
+ start_router,
+ create_route_maps,
+ create_prefix_lists,
+ get_frr_ipv6_linklocal,
+ start_topology,
+ write_test_header,
+ check_address_types,
+ write_test_footer,
+ reset_config_on_routers,
+ create_static_routes,
+ check_router_status,
+ delete_route_maps,
+)
+
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# Required to instantiate the topology builder class.
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+
+# Global variables
+topo = None
+KEEPALIVETIMER = 1
+HOLDDOWNTIMER = 3
+# Global variables
+NETWORK1_1 = {"ipv4": "1.1.1.1/32", "ipv6": "1::1/128"}
+NETWORK1_2 = {"ipv4": "1.1.1.2/32", "ipv6": "1::2/128"}
+NETWORK2_1 = {"ipv4": "2.1.1.1/32", "ipv6": "2::1/128"}
+NETWORK2_2 = {"ipv4": "2.1.1.2/32", "ipv6": "2::2/128"}
+NETWORK3_1 = {"ipv4": "3.1.1.1/32", "ipv6": "3::1/128"}
+NETWORK3_2 = {"ipv4": "3.1.1.2/32", "ipv6": "3::2/128"}
+NETWORK4_1 = {"ipv4": "4.1.1.1/32", "ipv6": "4::1/128"}
+NETWORK4_2 = {"ipv4": "4.1.1.2/32", "ipv6": "4::2/128"}
+NETWORK5_1 = {"ipv4": "5.1.1.1/32", "ipv6": "5::1/128"}
+NETWORK5_2 = {"ipv4": "5.1.1.2/32", "ipv6": "5::2/128"}
+DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+NEXT_HOP_IP = {"ipv4": "Null0", "ipv6": "Null0"}
+
+IPV4_RM = "RMVIPV4"
+IPV6_RM = "RMVIPV6"
+
+IPV4_RM1 = "RMVIPV41"
+IPV6_RM1 = "RMVIPV61"
+
+IPV4_RM2 = "RMVIPV42"
+IPV6_RM2 = "RMVIPV62"
+
+IPV4_PL_1 = "PV41"
+IPV4_PL_2 = "PV42"
+
+IPV6_PL_1 = "PV61"
+IPV6_PL_2 = "PV62"
+
+
+r1_ipv4_loopback = "1.0.1.0/24"
+r2_ipv4_loopback = "1.0.2.0/24"
+r3_ipv4_loopback = "1.0.3.0/24"
+r4_ipv4_loopback = "1.0.4.0/24"
+r1_ipv6_loopback = "2001:db8:f::1:0/120"
+r2_ipv6_loopback = "2001:db8:f::2:0/120"
+r3_ipv6_loopback = "2001:db8:f::3:0/120"
+r4_ipv6_loopback = "2001:db8:f::4:0/120"
+
+r0_connected_address_ipv4 = "192.168.0.0/24"
+r0_connected_address_ipv6 = "fd00::/64"
+r1_connected_address_ipv4 = "192.168.1.0/24"
+r1_connected_address_ipv6 = "fd00:0:0:1::/64"
+r3_connected_address_ipv4 = "192.168.2.0/24"
+r3_connected_address_ipv6 = "fd00:0:0:2::/64"
+r4_connected_address_ipv4 = "192.168.3.0/24"
+r4_connected_address_ipv6 = "fd00:0:0:3::/64"
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ # Required linux kernel version for this suite to run.
+ result = required_linux_kernel_version("4.15")
+ if result is not True:
+ pytest.skip("Kernel requirements are not met")
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ json_file = "{}/bgp_default_originate_topo1.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
+ # ... and here it calls Mininet initialization functions.
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start daemons and then start routers
+ start_topology(tgen)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ global ADDR_TYPES
+ global BGP_CONVERGENCE
+ global DEFAULT_ROUTES
+ global DEFAULT_ROUTE_NXT_HOP_R1, DEFAULT_ROUTE_NXT_HOP_R3
+ global R0_NETWORK_LOOPBACK, R0_NETWORK_LOOPBACK_NXTHOP, R1_NETWORK_LOOPBACK, R1_NETWORK_LOOPBACK_NXTHOP
+ global R0_NETWORK_CONNECTED, R0_NETWORK_CONNECTED_NXTHOP, R1_NETWORK_CONNECTED, R1_NETWORK_CONNECTED_NXTHOP
+ global R4_NETWORK_LOOPBACK, R4_NETWORK_LOOPBACK_NXTHOP, R3_NETWORK_LOOPBACK, R3_NETWORK_LOOPBACK_NXTHOP
+ global R4_NETWORK_CONNECTED, R4_NETWORK_CONNECTED_NXTHOP, R3_NETWORK_CONNECTED, R3_NETWORK_CONNECTED_NXTHOP
+
+ ADDR_TYPES = check_address_types()
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format(
+ BGP_CONVERGENCE
+ )
+ # There are the global varibles used through out the file these are acheived only after building the topology.
+
+ r0_loopback_address_ipv4 = topo["routers"]["r0"]["links"]["lo"]["ipv4"]
+ r0_loopback_address_ipv4_nxt_hop = topo["routers"]["r0"]["links"]["r1"][
+ "ipv4"
+ ].split("/")[0]
+ r0_loopback_address_ipv6 = topo["routers"]["r0"]["links"]["lo"]["ipv6"]
+ r0_loopback_address_ipv6_nxt_hop = topo["routers"]["r0"]["links"]["r1"][
+ "ipv6"
+ ].split("/")[0]
+
+ r1_loopback_address_ipv4 = topo["routers"]["r1"]["links"]["lo"]["ipv4"]
+ r1_loopback_address_ipv4_nxt_hop = topo["routers"]["r1"]["links"]["r2"][
+ "ipv4"
+ ].split("/")[0]
+ r1_loopback_address_ipv6 = topo["routers"]["r1"]["links"]["lo"]["ipv6"]
+ r1_loopback_address_ipv6_nxt_hop = topo["routers"]["r1"]["links"]["r2"][
+ "ipv6"
+ ].split("/")[0]
+
+ r4_loopback_address_ipv4 = topo["routers"]["r4"]["links"]["lo"]["ipv4"]
+ r4_loopback_address_ipv4_nxt_hop = topo["routers"]["r4"]["links"]["r3"][
+ "ipv4"
+ ].split("/")[0]
+ r4_loopback_address_ipv6 = topo["routers"]["r4"]["links"]["lo"]["ipv6"]
+ r4_loopback_address_ipv6_nxt_hop = topo["routers"]["r4"]["links"]["r3"][
+ "ipv6"
+ ].split("/")[0]
+
+ r3_loopback_address_ipv4 = topo["routers"]["r3"]["links"]["lo"]["ipv4"]
+ r3_loopback_address_ipv4_nxt_hop = topo["routers"]["r3"]["links"]["r2"][
+ "ipv4"
+ ].split("/")[0]
+ r3_loopback_address_ipv6 = topo["routers"]["r3"]["links"]["lo"]["ipv6"]
+ r3_loopback_address_ipv6_nxt_hop = topo["routers"]["r3"]["links"]["r2"][
+ "ipv6"
+ ].split("/")[0]
+
+ R0_NETWORK_LOOPBACK = {
+ "ipv4": r0_loopback_address_ipv4,
+ "ipv6": r0_loopback_address_ipv6,
+ }
+ R0_NETWORK_LOOPBACK_NXTHOP = {
+ "ipv4": r0_loopback_address_ipv4_nxt_hop,
+ "ipv6": r0_loopback_address_ipv6_nxt_hop,
+ }
+
+ R1_NETWORK_LOOPBACK = {
+ "ipv4": r1_loopback_address_ipv4,
+ "ipv6": r1_loopback_address_ipv6,
+ }
+ R1_NETWORK_LOOPBACK_NXTHOP = {
+ "ipv4": r1_loopback_address_ipv4_nxt_hop,
+ "ipv6": r1_loopback_address_ipv6_nxt_hop,
+ }
+
+ R0_NETWORK_CONNECTED = {
+ "ipv4": r0_connected_address_ipv4,
+ "ipv6": r0_connected_address_ipv6,
+ }
+ R0_NETWORK_CONNECTED_NXTHOP = {
+ "ipv4": r0_loopback_address_ipv4_nxt_hop,
+ "ipv6": r0_loopback_address_ipv6_nxt_hop,
+ }
+
+ R1_NETWORK_CONNECTED = {
+ "ipv4": r1_connected_address_ipv4,
+ "ipv6": r1_connected_address_ipv6,
+ }
+ R1_NETWORK_CONNECTED_NXTHOP = {
+ "ipv4": r1_loopback_address_ipv4_nxt_hop,
+ "ipv6": r1_loopback_address_ipv6_nxt_hop,
+ }
+
+ R4_NETWORK_LOOPBACK = {
+ "ipv4": r4_loopback_address_ipv4,
+ "ipv6": r4_loopback_address_ipv6,
+ }
+ R4_NETWORK_LOOPBACK_NXTHOP = {
+ "ipv4": r4_loopback_address_ipv4_nxt_hop,
+ "ipv6": r4_loopback_address_ipv6_nxt_hop,
+ }
+
+ R3_NETWORK_LOOPBACK = {
+ "ipv4": r3_loopback_address_ipv4,
+ "ipv6": r3_loopback_address_ipv6,
+ }
+ R3_NETWORK_LOOPBACK_NXTHOP = {
+ "ipv4": r3_loopback_address_ipv4_nxt_hop,
+ "ipv6": r3_loopback_address_ipv6_nxt_hop,
+ }
+
+ R4_NETWORK_CONNECTED = {
+ "ipv4": r4_connected_address_ipv4,
+ "ipv6": r4_connected_address_ipv6,
+ }
+ R4_NETWORK_CONNECTED_NXTHOP = {
+ "ipv4": r4_loopback_address_ipv4_nxt_hop,
+ "ipv6": r4_loopback_address_ipv6_nxt_hop,
+ }
+
+ R3_NETWORK_CONNECTED = {
+ "ipv4": r3_connected_address_ipv4,
+ "ipv6": r3_connected_address_ipv6,
+ }
+ R3_NETWORK_CONNECTED_NXTHOP = {
+ "ipv4": r3_loopback_address_ipv4_nxt_hop,
+ "ipv6": r3_loopback_address_ipv6_nxt_hop,
+ }
+
+ # populating the nexthop for default routes
+
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+
+ interface = topo["routers"]["r1"]["links"]["r2"]["interface"]
+ ipv6_link_local = get_frr_ipv6_linklocal(tgen, "r1", intf=interface)
+ ipv4_nxt_hop = topo["routers"]["r1"]["links"]["r2"]["ipv4"].split("/")[0]
+ ipv6_nxt_hop = topo["routers"]["r1"]["links"]["r2"]["ipv6"].split("/")[0]
+ DEFAULT_ROUTE_NXT_HOP_R1 = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local}
+
+ interface = topo["routers"]["r3"]["links"]["r2"]["interface"]
+ ipv6_link_local = get_frr_ipv6_linklocal(tgen, "r3", intf=interface)
+ ipv4_nxt_hop = topo["routers"]["r3"]["links"]["r2"]["ipv4"].split("/")[0]
+ ipv6_nxt_hop = topo["routers"]["r3"]["links"]["r2"]["ipv6"].split("/")[0]
+ DEFAULT_ROUTE_NXT_HOP_R3 = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local}
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """Teardown the pytest environment"""
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
+
+
+#####################################################
+#
+# Local API's
+#
+#####################################################
+
+
+def configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut, peer):
+ """
+ This function groups the repetitive function calls into one function.
+ """
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ return True
+
+
+#####################################################
+#
+# Testcases
+#
+#####################################################
+
+
+def test_verify_bgp_default_originate_route_map_in_OUT_p1(request):
+ """
+ test_verify_bgp_default_originate_route_map_in_OUT_p1
+ """
+ tgen = get_topogen()
+ global BGP_CONVERGENCE
+ global topo
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+ reset_config_on_routers(tgen)
+
+ if BGP_CONVERGENCE != True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ step("Configure IPv4 and IPv6 , EBGP neighbor between R3 and R2")
+ step("Configure IPv4 and IPv6 IBGP neighbor between R3 and R4")
+ r0_local_as = topo["routers"]["r0"]["bgp"]["local_as"]
+ r1_local_as = topo["routers"]["r1"]["bgp"]["local_as"]
+ r2_local_as = topo["routers"]["r2"]["bgp"]["local_as"]
+ r3_local_as = topo["routers"]["r3"]["bgp"]["local_as"]
+ r4_local_as = topo["routers"]["r4"]["bgp"]["local_as"]
+ input_dict = {
+ "r0": {
+ "bgp": {
+ "local_as": r0_local_as,
+ }
+ },
+ "r1": {
+ "bgp": {
+ "local_as": r1_local_as,
+ }
+ },
+ "r2": {
+ "bgp": {
+ "local_as": r2_local_as,
+ }
+ },
+ "r3": {
+ "bgp": {
+ "local_as": 4000,
+ }
+ },
+ "r4": {
+ "bgp": {
+ "local_as": 4000,
+ }
+ },
+ }
+ result = modify_as_number(tgen, topo, input_dict)
+ try:
+ assert result is True
+ except AssertionError:
+ logger.info("Expected behaviour: {}".format(result))
+ logger.info("BGP config is not created because of invalid ASNs")
+ step("After changing the BGP AS Path Verify the BGP Convergence")
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format(
+ BGP_CONVERGENCE
+ )
+
+ step(
+ "Configure 2 IPv4 and 2 IPv6, Static route on R4 with next-hop as Null0 IPv4 route Sv41, Sv42, IPv6 route Sv61 Sv62"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r4": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ ]
+ }
+ }
+ result = create_static_routes(tgen, static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("verify IPv4 and IPv6 static route are configured and up on R4")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r4": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ ]
+ }
+ }
+ result = verify_fib_routes(tgen, addr_type, "r4", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Configure redistribute static knob on R4 , for R4 to R3 neighbor ")
+ redistribute_static = {
+ "r4": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
+ "ipv6": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, redistribute_static)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ expected_routes = {
+ "ipv4": [
+ {"network": NETWORK1_1["ipv4"], "nexthop": NEXT_HOP_IP["ipv4"]},
+ {"network": NETWORK2_1["ipv4"], "nexthop": NEXT_HOP_IP["ipv4"]},
+ ],
+ "ipv6": [
+ {"network": NETWORK1_1["ipv6"], "nexthop": NEXT_HOP_IP["ipv4"]},
+ {"network": NETWORK2_1["ipv6"], "nexthop": NEXT_HOP_IP["ipv4"]},
+ ],
+ }
+ result = verify_bgp_advertised_routes_from_neighbor(
+ tgen, topo, dut="r4", peer="r3", expected_routes=expected_routes
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "After redistribute static verify the routes is recevied in router R3 in RIB and FIB"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ ]
+ }
+ }
+ result = verify_fib_routes(tgen, addr_type, "r3", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ result = verify_bgp_rib(tgen, addr_type, "r3", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Configure IPv4 prefix-list Pv4 and and IPv6 prefix-list Pv6 on R3 to match BGP route Sv41, IPv6 route Sv61 with permit option "
+ )
+ input_dict_3 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "Pv4": [
+ {
+ "seqid": "1",
+ "network": NETWORK1_1["ipv4"],
+ "action": "permit",
+ }
+ ]
+ },
+ "ipv6": {
+ "Pv6": [
+ {
+ "seqid": "1",
+ "network": NETWORK1_1["ipv6"],
+ "action": "permit",
+ }
+ ]
+ },
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("verify IPv4 and IPv6 Prefix list got configured on R3")
+ input_dict = {"r3": {"prefix_lists": ["Pv4", "Pv6"]}}
+ result = verify_prefix_lists(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Configure IPv4 and IPv6 route-map RMv4 and RMv6 matching prefix-list Pv4 and Pv6 with permit option "
+ )
+ input_dict_3 = {
+ "r3": {
+ "route_maps": {
+ "RM4": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "match": {"ipv4": {"prefix_lists": "Pv4"}},
+ },
+ ],
+ "RM6": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "match": {"ipv6": {"prefix_lists": "Pv6"}},
+ },
+ ],
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Configure IPv4 prefix-list Pv42 and and IPv6 prefix-list Pv62 on R3 to match BGP route Sv42, IPv6 route Sv62 with deny option"
+ )
+ input_dict_3 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "Pv42": [
+ {"seqid": "1", "network": NETWORK2_1["ipv4"], "action": "deny"}
+ ]
+ },
+ "ipv6": {
+ "Pv62": [
+ {"seqid": "1", "network": NETWORK2_1["ipv6"], "action": "deny"}
+ ]
+ },
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("verify IPv4 and IPv6 Prefix list got configured on R3")
+ input_dict = {"r3": {"prefix_lists": ["Pv42", "Pv62"]}}
+ result = verify_prefix_lists(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Configure IPv4 and IPv6 route-map (RMv42 and RMv62 )matching prefix-list Pv42 and Pv62 with permit option "
+ )
+ input_dict_3 = {
+ "r3": {
+ "route_maps": {
+ "RMv42": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "match": {"ipv4": {"prefix_lists": "Pv42"}},
+ },
+ ],
+ "RMv62": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "match": {"ipv6": {"prefix_lists": "Pv62"}},
+ },
+ ],
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Apply IPv4 and IPv6 route-map RMv4 and RMv6 with default-originate on R3 , for R3 to R2 peers and Apply IPv4 and IPv6 out route-map RMv42 and RMv62 on R3 , for R3 to R2 peers "
+ )
+ local_as = get_dut_as_number(tgen, "r3")
+ default_originate_config = {
+ "r3": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {
+ "unicast": {"default_originate": {"r2": {"route_map": "RM4"}}}
+ },
+ "ipv6": {
+ "unicast": {"default_originate": {"r2": {"route_map": "RM6"}}}
+ },
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ updated_topo = topo
+ updated_topo["routers"]["r0"]["bgp"]["local_as"] = get_dut_as_number(tgen, "r0")
+ updated_topo["routers"]["r1"]["bgp"]["local_as"] = get_dut_as_number(tgen, "r1")
+ updated_topo["routers"]["r2"]["bgp"]["local_as"] = get_dut_as_number(tgen, "r2")
+ updated_topo["routers"]["r3"]["bgp"]["local_as"] = get_dut_as_number(tgen, "r3")
+ updated_topo["routers"]["r4"]["bgp"]["local_as"] = get_dut_as_number(tgen, "r4")
+
+ step(
+ "Apply IPv4 and IPv6 route-map RMv42 and RMv62 on R3 (OUT Direction), for R3 to R2 peers "
+ )
+ input_dict_4 = {
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {"name": "RMv42", "direction": "out"}
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {"name": "RMv62", "direction": "out"}
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ }
+ }
+
+ result = create_router_bgp(tgen, updated_topo, input_dict_4)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ NOTE = """
+ After applying route-map on neighbor verify default BGP route IPv4 IPv6 route populated in R2 BGP and routing table , verify using "show ip bgp json" "show ipv6 bgp json" "show ip route json" "show ip route json"
+ Sv42 and Sv62 route should not be present on R2
+ """
+ step(NOTE)
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ ]
+ }
+ }
+
+ result = verify_fib_routes(
+ tgen, addr_type, "r2", static_routes_input, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Static routes are not expected due to conditions \nError: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen, addr_type, "r2", static_routes_input, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Static routes are not expected due to conditions\n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Change IPv4 prefix-list Pv42 and and IPv6 prefix-list Pv62 deny to permit")
+ input_dict_3 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "Pv42": [
+ {
+ "seqid": "1",
+ "network": NETWORK2_1["ipv4"],
+ "action": "permit",
+ }
+ ]
+ },
+ "ipv6": {
+ "Pv62": [
+ {
+ "seqid": "1",
+ "network": NETWORK2_1["ipv6"],
+ "action": "permit",
+ }
+ ]
+ },
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("verify IPv4 and IPv6 Prefix list got configured on R3")
+ input_dict = {"r3": {"prefix_lists": ["Pv42", "Pv62"]}}
+ result = verify_prefix_lists(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ NOTE = """Default BGP route and IPv4 ( Sv42) , IPv6 (Sv62) route populated in R2 BGP and routing table"""
+ step(NOTE)
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("IPv4 prefix-list Pv4 and and IPv6 prefix-list Pv6 permit to deny ")
+ input_dict_3 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "Pv4": [
+ {"seqid": "1", "network": NETWORK1_1["ipv4"], "action": "deny"}
+ ]
+ },
+ "ipv6": {
+ "Pv6": [
+ {"seqid": "1", "network": NETWORK1_1["ipv6"], "action": "deny"}
+ ]
+ },
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ NOTE = """
+ Verify default-originate route (IPv4 and IPv6 ) not present on R2
+ IPv4 ( Sv42) , IPv6 (Sv62) route populated in R2 BGP
+ """
+ step(NOTE)
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n default-route in FIB is not expected due to conditions \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R3[addr_type],
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n default-route in RIB is not expected due to conditions \n Error: {}".format(
+ tc_name, result
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+def test_verify_bgp_default_originate_route_map_in_IN_p1(request):
+ """Verify BGP default originate route-map with IN route-map"""
+ tgen = get_topogen()
+ global BGP_CONVERGENCE
+ global topo
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+ reset_config_on_routers(tgen)
+
+ if BGP_CONVERGENCE != True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ step("Configure IPv4 and IPv6 , EBGP neighbor between R1 and R2")
+ step("Configure IPv4 and IPv6 , IBGP neighbor between R1 and R0")
+ r0_local_as = topo["routers"]["r0"]["bgp"]["local_as"]
+ r1_local_as = topo["routers"]["r1"]["bgp"]["local_as"]
+ r2_local_as = topo["routers"]["r2"]["bgp"]["local_as"]
+ r3_local_as = topo["routers"]["r3"]["bgp"]["local_as"]
+ r4_local_as = topo["routers"]["r4"]["bgp"]["local_as"]
+ input_dict = {
+ "r0": {
+ "bgp": {
+ "local_as": 1000,
+ }
+ },
+ "r1": {
+ "bgp": {
+ "local_as": 1000,
+ }
+ },
+ "r2": {
+ "bgp": {
+ "local_as": r2_local_as,
+ }
+ },
+ "r3": {
+ "bgp": {
+ "local_as": r3_local_as,
+ }
+ },
+ "r4": {
+ "bgp": {
+ "local_as": r4_local_as,
+ }
+ },
+ }
+ result = modify_as_number(tgen, topo, input_dict)
+ try:
+ assert result is True
+ except AssertionError:
+ logger.info("Expected behaviour: {}".format(result))
+ logger.info("BGP config is not created because of invalid ASNs")
+ step("After changing the BGP AS Path Verify the BGP Convergence")
+
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format(
+ BGP_CONVERGENCE
+ )
+
+ step(
+ "Configure 2 IPv4 and 2 IPv6, Static route on R0 with next-hop as Null0 IPv4 route Sv41, Sv42, IPv6 route Sv61 Sv62"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ ]
+ }
+ }
+ result = create_static_routes(tgen, static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("verifyIPv4 and IPv6 static routes are configure and up on R0 ")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ ]
+ }
+ }
+ result = verify_fib_routes(tgen, addr_type, "r0", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Configure redistribute static knob on R0 , for R0 to R1 IPv4 and IPv6 neighbor"
+ )
+ redistribute_static = {
+ "r0": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
+ "ipv6": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, redistribute_static)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify IPv4 and IPv6 route received on R1 ")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ ]
+ }
+ }
+ result = verify_fib_routes(tgen, addr_type, "r1", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(tgen, addr_type, "r1", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Configure IPv4 prefix-list Pv4 and and IPv6 prefix-list Pv6 on R1 to match BGP route Sv41, Sv42, IPv6 route Sv61 Sv62"
+ )
+ input_dict_3 = {
+ "r1": {
+ "prefix_lists": {
+ "ipv4": {
+ "Pv4": [
+ {
+ "seqid": "1",
+ "network": NETWORK1_1["ipv4"],
+ "action": "permit",
+ },
+ {
+ "seqid": "2",
+ "network": NETWORK2_1["ipv4"],
+ "action": "permit",
+ },
+ ]
+ },
+ "ipv6": {
+ "Pv6": [
+ {
+ "seqid": "1",
+ "network": NETWORK1_1["ipv6"],
+ "action": "permit",
+ },
+ {
+ "seqid": "2",
+ "network": NETWORK2_1["ipv6"],
+ "action": "permit",
+ },
+ ]
+ },
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("verify IPv4 and IPv6 Prefix list got configured on R1")
+ input_dict = {"r1": {"prefix_lists": ["Pv4", "Pv6"]}}
+ result = verify_prefix_lists(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Configure IPv4 and IPv6 route-map RMv4 and RMv6 matching prefix-list Pv4 and Pv6 with deny option on R1"
+ )
+ input_dict_3 = {
+ "r1": {
+ "route_maps": {
+ "RMv4": [
+ {
+ "action": "deny",
+ "seq_id": "1",
+ "match": {"ipv4": {"prefix_lists": "Pv4"}},
+ },
+ ],
+ "RMv6": [
+ {
+ "action": "deny",
+ "seq_id": "1",
+ "match": {"ipv6": {"prefix_lists": "Pv6"}},
+ },
+ ],
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Apply route-map IN direction in R1 (R1 to R0) IPv4 and IPv6 neighbor")
+ updated_topo = topo
+ updated_topo["routers"]["r0"]["bgp"]["local_as"] = get_dut_as_number(tgen, "r0")
+ updated_topo["routers"]["r1"]["bgp"]["local_as"] = get_dut_as_number(tgen, "r1")
+ updated_topo["routers"]["r2"]["bgp"]["local_as"] = get_dut_as_number(tgen, "r2")
+ updated_topo["routers"]["r3"]["bgp"]["local_as"] = get_dut_as_number(tgen, "r3")
+ updated_topo["routers"]["r4"]["bgp"]["local_as"] = get_dut_as_number(tgen, "r4")
+
+ local_as_r1 = get_dut_as_number(tgen, dut="r1")
+ input_dict_4 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r0": {
+ "dest_link": {
+ "r1": {
+ "route_maps": [
+ {
+ "name": "RMv4",
+ "direction": "in",
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r0": {
+ "dest_link": {
+ "r1": {
+ "route_maps": [
+ {
+ "name": "RMv6",
+ "direction": "in",
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ }
+ }
+
+ result = create_router_bgp(tgen, updated_topo, input_dict_4)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ STEP = "After applying route-map verify that IPv4 route Sv41, Sv42, IPv6 route Sv61 Sv62 should not present on R1 BGP and routing table "
+ step(STEP)
+
+ step(
+ "After applying route-map verify that IPv4 route Sv41, Sv42, IPv6 route Sv61 Sv62 should not present on R1 "
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ ]
+ }
+ }
+
+ result = verify_fib_routes(
+ tgen, addr_type, "r1", static_routes_input, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n default-route in FIB is not expected due to conditions \nError: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen, addr_type, "r1", static_routes_input, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n default-route in FIB is not expected due to conditions \nError: {}".format(
+ tc_name, result
+ )
+ # Routes should come to dut but not the shown in RIB thus verifying using show ip bgp nbr xxx received route
+ step(
+ " Verify the received routes \n using 'show ip bgp nbr xxx received route' in Router R1"
+ )
+ expected_routes = {
+ "ipv4": [
+ {"network": NETWORK1_1["ipv4"], "nexthop": NEXT_HOP_IP["ipv4"]},
+ {"network": NETWORK2_1["ipv4"], "nexthop": NEXT_HOP_IP["ipv4"]},
+ ],
+ "ipv6": [
+ {"network": NETWORK1_1["ipv6"], "nexthop": NEXT_HOP_IP["ipv6"]},
+ {"network": NETWORK2_1["ipv6"], "nexthop": NEXT_HOP_IP["ipv6"]},
+ ],
+ }
+ result = verify_bgp_received_routes_from_neighbor(
+ tgen, topo, dut="r1", peer="r0", expected_routes=expected_routes
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Configure default-originate on R1 for R1 to R2 IPv4 and IPv6 neighbor ")
+ local_as_r1 = get_dut_as_number(tgen, dut="r1")
+ default_originate_config = {
+ "r1": {
+ "bgp": {
+ "local_as": local_as_r1,
+ "address_family": {
+ "ipv4": {"unicast": {"default_originate": {"r2": {}}}},
+ "ipv6": {"unicast": {"default_originate": {"r2": {}}}},
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify Default originate knob is configured and default route advertised to R2 , verify on R1 "
+ )
+ expected_routes = {
+ "ipv4": [
+ {"network": "0.0.0.0/0", "nexthop": ""},
+ ],
+ "ipv6": [
+ {"network": "::/0", "nexthop": ""},
+ ],
+ }
+ result = verify_bgp_advertised_routes_from_neighbor(
+ tgen, topo, dut="r1", peer="r2", expected_routes=expected_routes
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify the Default route Route in FIB in R2")
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ }
+ ]
+ }
+ }
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Change route-map RMv4 and RMv6 from deny to permit")
+ input_dict_3 = {
+ "r1": {
+ "route_maps": {
+ "RMv4": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "match": {"ipv4": {"prefix_lists": "Pv4"}},
+ },
+ ],
+ "RMv6": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "match": {"ipv6": {"prefix_lists": "Pv6"}},
+ },
+ ],
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ NOTE = """After changing route-map to permit verify that IPv4 routes Sv41, Sv42, IPv6 routes Sv61 Sv62 present on R1 BGP and routing table , using "show ip route " "show ip bgp nbr xxx received route " "show ipv6 route " "show ipv6 bgp nbr xxx receied route """
+ step(NOTE)
+ expected_routes = {
+ "ipv4": [{"network": NETWORK1_1["ipv4"], "nexthop": NEXT_HOP_IP["ipv4"]}],
+ "ipv6": [{"network": NETWORK1_1["ipv6"], "nexthop": NEXT_HOP_IP["ipv4"]}],
+ }
+ result = verify_bgp_received_routes_from_neighbor(
+ tgen, topo, dut="r1", peer="r0", expected_routes=expected_routes
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ ]
+ }
+ }
+ result = verify_bgp_rib(tgen, addr_type, "r1", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ result = verify_fib_routes(tgen, addr_type, "r1", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Configure default static route (IPv4 and IPv6) on R2 nexthop as R1 ")
+ NEXT_HOP_IP_R1 = {}
+ r1_r2_ipv4_neighbor = topo["routers"]["r1"]["links"]["r2"]["ipv4"].split("/")[0]
+ r1_r2_ipv6_neighbor = topo["routers"]["r1"]["links"]["r2"]["ipv6"].split("/")[0]
+ NEXT_HOP_IP_R1["ipv4"] = r1_r2_ipv4_neighbor
+ NEXT_HOP_IP_R1["ipv6"] = r1_r2_ipv6_neighbor
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": "0.0.0.0/0",
+ "next_hop": NEXT_HOP_IP_R1["ipv4"],
+ },
+ {
+ "network": "0::0/0",
+ "next_hop": NEXT_HOP_IP_R1["ipv6"],
+ },
+ ]
+ }
+ }
+ result = create_static_routes(tgen, static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify Static default route is taking preference over BGP default routes , BGP default route is inactive IN RIB and static is up and installed in RIB and FIB "
+ )
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ ipv4_nxt_hop = topo["routers"]["r1"]["links"]["r2"]["ipv4"].split("/")[0]
+ ipv6_nxt_hop = topo["routers"]["r1"]["links"]["r2"]["ipv6"].split("/")[0]
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ DEFAULT_ROUTE_NXT_HOP = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_nxt_hop}
+
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP[addr_type],
+ "protocol": "static",
+ }
+ ]
+ }
+ }
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ write_test_footer(tc_name)
+
+def test_verify_default_originate_after_removing_default_originate_p1(request):
+ """Verify BGP default route after removing default-originate"""
+
+ tgen = get_topogen()
+ global BGP_CONVERGENCE
+ global topo
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+ reset_config_on_routers(tgen)
+
+ if BGP_CONVERGENCE != True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ step("Configure EBGP between R0 to R1 and IBGP between R1 to R2")
+ step("Configure EBGP between R2 to R3 and IBGP between R3 to R4")
+ r0_local_as = topo["routers"]["r0"]["bgp"]["local_as"]
+ r1_local_as = topo["routers"]["r1"]["bgp"]["local_as"]
+ r2_local_as = topo["routers"]["r2"]["bgp"]["local_as"]
+ r3_local_as = topo["routers"]["r3"]["bgp"]["local_as"]
+ r4_local_as = topo["routers"]["r4"]["bgp"]["local_as"]
+ input_dict = {
+ "r0": {
+ "bgp": {
+ "local_as": r0_local_as,
+ }
+ },
+ "r1": {
+ "bgp": {
+ "local_as": 2000,
+ }
+ },
+ "r2": {
+ "bgp": {
+ "local_as": 2000,
+ }
+ },
+ "r3": {
+ "bgp": {
+ "local_as": 5000,
+ }
+ },
+ "r4": {
+ "bgp": {
+ "local_as": 5000,
+ }
+ },
+ }
+ result = modify_as_number(tgen, topo, input_dict)
+ try:
+ assert result is True
+ except AssertionError:
+ logger.info("Expected behaviour: {}".format(result))
+ logger.info("BGP config is not created because of invalid ASNs")
+ step("After changing the BGP AS Path Verify the BGP Convergence")
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format(
+ BGP_CONVERGENCE
+ )
+
+ step("Configure IPv4 and IPv6 static route on R0 and R4")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ }
+ ]
+ },
+ "r4": {
+ "static_routes": [
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ }
+ ]
+ },
+ }
+ result = create_static_routes(tgen, static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("verify IPv4 and IPv6 static route are configured and up on R0 and R4")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ }
+ ]
+ }
+ }
+ result = verify_fib_routes(tgen, addr_type, "r0", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r4": {
+ "static_routes": [
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ }
+ ]
+ }
+ }
+ result = verify_fib_routes(tgen, addr_type, "r4", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ step(
+ "Configure redistribute connected and static on R0 (R0-R1) on R4 ( R4-R3) IPv4 and IPv6 address family "
+ )
+ redistribute_static = {
+ "r0": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"},
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"},
+ ]
+ }
+ },
+ }
+ }
+ },
+ "r4": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"},
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"},
+ ]
+ }
+ },
+ }
+ }
+ },
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {"redistribute": [{"redist_type": "connected"}]}
+ },
+ "ipv6": {
+ "unicast": {"redistribute": [{"redist_type": "connected"}]}
+ },
+ }
+ }
+ },
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {"redistribute": [{"redist_type": "connected"}]}
+ },
+ "ipv6": {
+ "unicast": {"redistribute": [{"redist_type": "connected"}]}
+ },
+ }
+ }
+ },
+ }
+ result = create_router_bgp(tgen, topo, redistribute_static)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("verify IPv4 and IPv6 static route are configured and up on R1 and R3")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R0_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R1_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R1_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R1_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R1_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ ]
+ }
+ }
+
+ result = verify_fib_routes(tgen, addr_type, "r1", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ result = verify_bgp_rib(tgen, addr_type, "r1", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("verify IPv4 and IPv6 static route are configured and up on R1 and R3")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [R3_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R3_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R3_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R3_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R4_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ ]
+ }
+ }
+
+ result = verify_fib_routes(tgen, addr_type, "r3", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(tgen, addr_type, "r3", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Configure default-originate on R1 for R1 to R2 neighbor for IPv4 and IPv6 peer "
+ )
+ local_as = get_dut_as_number(tgen, dut="r1")
+ default_originate_config = {
+ "r1": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {"unicast": {"default_originate": {"r2": {}}}},
+ "ipv6": {"unicast": {"default_originate": {"r2": {}}}},
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ step(
+ "Verify all the static , connected and loopback routes from R0,R1,R3 and R4 is receieved on R2 "
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R0_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R1_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R1_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R1_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R1_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R3_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R3_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R3_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R3_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R4_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ ]
+ }
+ }
+
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Verify the Default Originate on R2 nexthop as R1")
+
+ interface = topo["routers"]["r1"]["links"]["r2"]["interface"]
+ ipv6_link_local = get_frr_ipv6_linklocal(tgen, "r1", intf=interface)
+ ipv4_nxt_hop = topo["routers"]["r1"]["links"]["r2"]["ipv4"].split("/")[0]
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ DEFAULT_ROUTE_NXT_HOP = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local}
+
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP,
+ expected=True,
+ )
+ assert (
+ result is True
+ ), "Testcase {} : Failed \n Error: After Deactivating the BGP neighbor the default route is expected but found in RIB -> {}".format(
+ tc_name, result
+ )
+
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP,
+ expected=True,
+ )
+ assert (
+ result is True
+ ), "Testcase {} : Failed \n Error: After Deactivating the BGP neighbor the default route is expected but found in FIB -> {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Configure default-originate on R3 for R3 to R2 neighbor for IPv4 and IPv6 peer "
+ )
+ local_as = get_dut_as_number(tgen, dut="r3")
+ default_originate_config = {
+ "r3": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {"unicast": {"default_originate": {"r2": {}}}},
+ "ipv6": {"unicast": {"default_originate": {"r2": {}}}},
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ STEP = """After configuring the Default Originate From R3 --> R2
+ Both Default routes from R1 and R3 Should present in R2 BGP RIB
+ The Deafult Route from iBGP is prefferedover EBGP thus
+ Default Route From R1->r2 should only present in R2 FIB """
+ step(STEP)
+
+ interface = topo["routers"]["r3"]["links"]["r2"]["interface"]
+ ipv6_link_local = get_frr_ipv6_linklocal(tgen, "r3", intf=interface)
+ ipv4_nxt_hop = topo["routers"]["r3"]["links"]["r2"]["ipv4"].split("/")[0]
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ DEFAULT_ROUTE_NXT_HOP = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local}
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP,
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: Only IBGP default originate is expected in FIB over EBGP {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP,
+ expected=True,
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "No change on static and connected routes which got advertised from R0, R1, R3 and R4"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R0_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R1_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R1_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R1_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R1_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R3_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R3_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R3_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R3_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R4_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ ]
+ }
+ }
+
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ " Remove default-originate on R1 for R1 to R2 neighbor for IPv4 and IPv6 peer "
+ )
+ local_as = get_dut_as_number(tgen, dut="r1")
+ default_originate_config = {
+ "r1": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {
+ "unicast": {"default_originate": {"r2": {"delete": True}}}
+ },
+ "ipv6": {
+ "unicast": {"default_originate": {"r2": {"delete": True}}}
+ },
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify the Default Originate reoute from R1 to r2 is removed in R2 ")
+ interface = topo["routers"]["r1"]["links"]["r2"]["interface"]
+ ipv6_link_local = get_frr_ipv6_linklocal(tgen, "r1", intf=interface)
+ ipv4_nxt_hop = topo["routers"]["r1"]["links"]["r2"]["ipv4"].split("/")[0]
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ DEFAULT_ROUTE_NXT_HOP = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local}
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP,
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n After removing the default originate the route should not be present in FIB \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP,
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n After removing the default originate the route should not be present in RIB \n Error: {}".format(
+ tc_name, result
+ )
+
+ NOTE = """ after removing the Default originate from R1-->R2
+ Verify the BGP Default route received from R3 is present in both BGP RIB and FIB on R2
+ """
+ interface = topo["routers"]["r3"]["links"]["r2"]["interface"]
+ ipv6_link_local = get_frr_ipv6_linklocal(tgen, "r3", intf=interface)
+ ipv4_nxt_hop = topo["routers"]["r3"]["links"]["r2"]["ipv4"].split("/")[0]
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ DEFAULT_ROUTE_NXT_HOP = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local}
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP,
+ expected=True,
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP,
+ expected=True,
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "No change on static and connected routes which got advertised from R0, R1, R3 and R4"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R0_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R1_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R1_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R1_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R1_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R3_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R3_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R3_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R3_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R4_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ ]
+ }
+ }
+
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Remove default-originate on R3 for R3 to R2 neighbor for IPv4 and IPv6 peer "
+ )
+ local_as = get_dut_as_number(tgen, dut="r3")
+ default_originate_config = {
+ "r3": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {
+ "unicast": {"default_originate": {"r2": {"delete": True}}}
+ },
+ "ipv6": {
+ "unicast": {"default_originate": {"r2": {"delete": True}}}
+ },
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "After removing default originate , verify default IPv4 and IPv6 BGP routes removed on R2 from R1 ( next-hop as R3) "
+ )
+ interface = topo["routers"]["r3"]["links"]["r2"]["interface"]
+ ipv6_link_local = get_frr_ipv6_linklocal(tgen, "r3", intf=interface)
+ ipv4_nxt_hop = topo["routers"]["r3"]["links"]["r2"]["ipv4"].split("/")[0]
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ DEFAULT_ROUTE_NXT_HOP = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local}
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP,
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n After removing the default originate the route should not be present in FIB \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP,
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n After removing the default originate the route should not be present in RIB \n Error: {}".format(
+ tc_name, result
+ )
+ step(
+ "No change on static and connected routes which got advertised from R0, R1, R3 and R4"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R0_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R1_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R1_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R1_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R1_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R3_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R3_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R3_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R3_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R4_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ ]
+ }
+ }
+
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ write_test_footer(tc_name)
+
+def test_verify_default_originate_route_with_GR_p1(request):
+ """ "Verify default-originate route with GR "
+ """
+ tgen = get_topogen()
+ global BGP_CONVERGENCE
+ global topo
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+ reset_config_on_routers(tgen)
+
+ if BGP_CONVERGENCE != True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+
+ step("Configure IPV4 and IPV6 IBGP between R1 and R2 ")
+ step("Configure IPV4 and IPV6 EBGP between R2 to R3 ")
+ r0_local_as = topo['routers']['r0']['bgp']['local_as']
+ r1_local_as = topo['routers']['r1']['bgp']['local_as']
+ r2_local_as = topo['routers']['r2']['bgp']['local_as']
+ r3_local_as = topo['routers']['r3']['bgp']['local_as']
+ r4_local_as = topo['routers']['r4']['bgp']['local_as']
+ input_dict = {
+ "r0": {
+ "bgp": {
+ "local_as": r0_local_as,
+ }
+ },
+ "r1": {
+ "bgp": {
+ "local_as": 1000,
+ }
+ },
+ "r2": {
+ "bgp": {
+ "local_as": 1000,
+ }
+ },
+ "r3": {
+ "bgp": {
+ "local_as": r3_local_as,
+ }
+ },
+ "r4": {
+ "bgp": {
+ "local_as": r4_local_as,
+ }
+ },
+ }
+ result = modify_as_number(tgen, topo, input_dict)
+ try:
+ assert result is True
+ except AssertionError:
+ logger.info("Expected behaviour: {}".format(result))
+ logger.info("BGP config is not created because of invalid ASNs")
+ step("After changing the BGP AS Path Verify the BGP Convergence")
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format(
+ BGP_CONVERGENCE
+ )
+
+ step(
+ "Configure per peer Graceful restart on R2 ( restarting router) and R3 helper router "
+ )
+ input_dict = {
+ "r2": {
+ "bgp": {
+ "local_as": get_dut_as_number(tgen, "r2"),
+ "graceful-restart": {
+ "graceful-restart": True,
+ "preserve-fw-state": True,
+ },
+ }
+ },
+ "r3": {
+ "bgp": {
+ "local_as": get_dut_as_number(tgen, "r3"),
+ "graceful-restart": {"graceful-restart-helper": True},
+ }
+ },
+ }
+
+ configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r2", peer="r3")
+
+ step("verify Graceful restart at R2")
+ for addr_type in ADDR_TYPES:
+ result = verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut="r2", peer="r3"
+ )
+ assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
+
+ step(
+ "Configure default-originate on R1 for R1-R2 neighbor for IPv4 and IPv6 BGP peers "
+ )
+ local_as = get_dut_as_number(tgen, dut="r1")
+ default_originate_config = {
+ "r1": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "default_originate":{
+ "r2":{
+
+ }
+
+ }
+
+ }
+ }, "ipv6": {
+ "unicast": {
+ "default_originate":{
+ "r2":{
+
+ }
+
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ step(
+ "R2 received default-originate routes and advertised it to R3 , verify on R2 and R3"
+ )
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ step(
+ "After configuring default-originate command , verify default routes are advertised on R2 "
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input,next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type])
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input,next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type])
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+
+ step(" Kill BGPd session on R2")
+ kill_router_daemons(tgen, "r2", ["bgpd"])
+ start_router_daemons(tgen, "r2", ["bgpd"])
+
+ step("verify default route is relearned after clear bgp on R2 on BGP RIB and")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input,next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type])
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input,next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type])
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ write_test_footer(tc_name)
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/lib/bgp.py b/tests/topotests/lib/bgp.py
index 4dd44e3e9e..216756f512 100644
--- a/tests/topotests/lib/bgp.py
+++ b/tests/topotests/lib/bgp.py
@@ -29,6 +29,7 @@ from lib.common_config import (
create_common_configurations,
FRRCFG_FILE,
InvalidCLIError,
+ apply_raw_config,
check_address_types,
find_interface_with_greater_ip,
generate_ips,
@@ -74,6 +75,12 @@ def create_router_bgp(tgen, topo=None, input_dict=None, build=False, load_config
"address_family": {
"ipv4": {
"unicast": {
+ "default_originate":{
+ "neighbor":"R2",
+ "add_type":"lo"
+ "route_map":"rm"
+
+ },
"redistribute": [{
"redist_type": "static",
"attribute": {
@@ -498,6 +505,12 @@ def __create_bgp_unicast_neighbor(
topo, input_dict, router, addr_type, add_neigh
)
config_data.extend(neigh_data)
+ # configure default originate
+ if "default_originate" in addr_data:
+ default_originate_config = __create_bgp_default_originate_neighbor(
+ topo, input_dict, router, addr_type, add_neigh
+ )
+ config_data.extend(default_originate_config)
for addr_type, addr_dict in bgp_data.items():
if not addr_dict or not check_address_types(addr_type):
@@ -515,6 +528,78 @@ def __create_bgp_unicast_neighbor(
return config_data
+def __create_bgp_default_originate_neighbor(
+ topo, input_dict, router, addr_type, add_neigh=True
+):
+ """
+ Helper API to create neighbor default - originate configuration
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `router` : router id to be configured
+ """
+ tgen = get_topogen()
+ config_data = []
+ logger.debug("Entering lib API: __create_bgp_default_originate_neighbor()")
+
+ bgp_data = input_dict["address_family"]
+ neigh_data = bgp_data[addr_type]["unicast"]["default_originate"]
+ for name, peer_dict in neigh_data.items():
+ nh_details = topo[name]
+
+ neighbor_ip = None
+ if "dest-link" in neigh_data[name]:
+ dest_link = neigh_data[name]["dest-link"]
+ neighbor_ip = nh_details["links"][dest_link][addr_type].split("/")[0]
+ elif "add_type" in neigh_data[name]:
+ add_type = neigh_data[name]["add_type"]
+ neighbor_ip = nh_details["links"][add_type][addr_type].split("/")[0]
+ else:
+ neighbor_ip = nh_details["links"][router][addr_type].split("/")[0]
+
+ config_data.append("address-family {} unicast".format(addr_type))
+ if "route_map" in peer_dict:
+ route_map = peer_dict["route_map"]
+ if "delete" in peer_dict:
+ if peer_dict["delete"]:
+ config_data.append(
+ "no neighbor {} default-originate route-map {}".format(
+ neighbor_ip, route_map
+ )
+ )
+ else:
+ config_data.append(
+ " neighbor {} default-originate route-map {}".format(
+ neighbor_ip, route_map
+ )
+ )
+ else:
+ config_data.append(
+ " neighbor {} default-originate route-map {}".format(
+ neighbor_ip, route_map
+ )
+ )
+
+ else:
+ if "delete" in peer_dict:
+ if peer_dict["delete"]:
+ config_data.append(
+ "no neighbor {} default-originate".format(neighbor_ip)
+ )
+ else:
+ config_data.append(
+ "neighbor {} default-originate".format(neighbor_ip)
+ )
+ else:
+ config_data.append("neighbor {} default-originate".format(neighbor_ip))
+
+ logger.debug("Exiting lib API: __create_bgp_default_originate_neighbor()")
+ return config_data
+
+
def __create_l2vpn_evpn_address_family(
tgen, topo, input_dict, router, config_data=None
):
@@ -4574,3 +4659,876 @@ def verify_tcp_mss(tgen, dut, neighbour, configured_tcp_mss, vrf=None):
return "TCP-MSS Mismatch"
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return False
+
+
+def get_dut_as_number(tgen, dut):
+ """
+ API to get the Autonomous Number of the given DUT
+
+ params:
+ =======
+ dut : Device Under test
+
+ returns :
+ =======
+ Success : DUT Autonomous number
+ Fail : Error message with Boolean False
+ """
+ tgen = get_topogen()
+ for router, rnode in tgen.routers().items():
+ if router == dut:
+ show_bgp_json = run_frr_cmd(rnode, "sh ip bgp summary json ", isjson=True)
+ as_number = show_bgp_json["ipv4Unicast"]["as"]
+ if as_number:
+ logger.info(
+ "[dut {}] DUT contains Automnomous number :: {} ".format(
+ dut, as_number
+ )
+ )
+ return as_number
+ else:
+ logger.error(
+ "[dut {}] ERROR....! DUT doesnot contain any Automnomous number ".format(
+ dut
+ )
+ )
+ return False
+
+
+def get_prefix_count_route(
+ tgen, topo, dut, peer, vrf=None, link=None, sent=None, received=None
+):
+ """
+ API to return the prefix count of default originate the given DUT
+ dut : Device under test
+ peer : neigbor on which you are expecting the route to be received
+
+ returns :
+ prefix_count as dict with ipv4 and ipv6 value
+ """
+ # the neighbor IP address can be accessable by finding the neigborship (vice-versa)
+
+ if link:
+ neighbor_ipv4_address = topo["routers"][peer]["links"][link]["ipv4"]
+ neighbor_ipv6_address = topo["routers"][peer]["links"][link]["ipv6"]
+ else:
+ neighbor_ipv4_address = topo["routers"][peer]["links"][dut]["ipv4"]
+ neighbor_ipv6_address = topo["routers"][peer]["links"][dut]["ipv6"]
+
+ neighbor_ipv4_address = neighbor_ipv4_address.split("/")[0]
+ neighbor_ipv6_address = neighbor_ipv6_address.split("/")[0]
+ prefix_count = {}
+ tgen = get_topogen()
+ for router, rnode in tgen.routers().items():
+ if router == dut:
+
+ if vrf:
+ ipv4_cmd = "sh ip bgp vrf {} summary json".format(vrf)
+ show_bgp_json_ipv4 = run_frr_cmd(rnode, ipv4_cmd, isjson=True)
+ ipv6_cmd = "sh ip bgp vrf {} ipv6 unicast summary json".format(vrf)
+ show_bgp_json_ipv6 = run_frr_cmd(rnode, ipv6_cmd, isjson=True)
+
+ prefix_count["ipv4_count"] = show_bgp_json_ipv4["ipv4Unicast"]["peers"][
+ neighbor_ipv4_address
+ ]["pfxRcd"]
+ prefix_count["ipv6_count"] = show_bgp_json_ipv6["peers"][
+ neighbor_ipv6_address
+ ]["pfxRcd"]
+
+ logger.info(
+ "The Prefix Count of the [DUT:{} : vrf [{}] ] towards neighbor ipv4 : {} and ipv6 : {} is : {}".format(
+ dut,
+ vrf,
+ neighbor_ipv4_address,
+ neighbor_ipv6_address,
+ prefix_count,
+ )
+ )
+ return prefix_count
+
+ else:
+ show_bgp_json_ipv4 = run_frr_cmd(
+ rnode, "sh ip bgp summary json ", isjson=True
+ )
+ show_bgp_json_ipv6 = run_frr_cmd(
+ rnode, "sh ip bgp ipv6 unicast summary json ", isjson=True
+ )
+ if received:
+ prefix_count["ipv4_count"] = show_bgp_json_ipv4["ipv4Unicast"][
+ "peers"
+ ][neighbor_ipv4_address]["pfxRcd"]
+ prefix_count["ipv6_count"] = show_bgp_json_ipv6["peers"][
+ neighbor_ipv6_address
+ ]["pfxRcd"]
+
+ elif sent:
+ prefix_count["ipv4_count"] = show_bgp_json_ipv4["ipv4Unicast"][
+ "peers"
+ ][neighbor_ipv4_address]["pfxSnt"]
+ prefix_count["ipv6_count"] = show_bgp_json_ipv6["peers"][
+ neighbor_ipv6_address
+ ]["pfxSnt"]
+
+ else:
+ prefix_count["ipv4_count"] = show_bgp_json_ipv4["ipv4Unicast"][
+ "peers"
+ ][neighbor_ipv4_address]["pfxRcd"]
+ prefix_count["ipv6_count"] = show_bgp_json_ipv6["peers"][
+ neighbor_ipv6_address
+ ]["pfxRcd"]
+
+ logger.info(
+ "The Prefix Count of the DUT:{} towards neighbor ipv4 : {} and ipv6 : {} is : {}".format(
+ dut, neighbor_ipv4_address, neighbor_ipv6_address, prefix_count
+ )
+ )
+ return prefix_count
+ else:
+ logger.error("ERROR...! Unknown dut {} in topolgy".format(dut))
+
+
+@retry(retry_timeout=5)
+def verify_rib_default_route(
+ tgen,
+ topo,
+ dut,
+ routes,
+ expected_nexthop,
+ metric=None,
+ origin=None,
+ locPrf=None,
+ expected_aspath=None,
+):
+ """
+ API to verify the the 'Default route" in BGP RIB with the attributes the rout carries (metric , local preference, )
+
+ param
+ =====
+ dut : device under test
+ routes : default route with expected nexthop
+ expected_nexthop : the nexthop that is expected the deafult route
+
+ """
+ result = False
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ tgen = get_topogen()
+ connected_routes = {}
+ for router, rnode in tgen.routers().items():
+ if router == dut:
+
+ ipv4_routes = run_frr_cmd(rnode, "sh ip bgp json", isjson=True)
+ ipv6_routes = run_frr_cmd(rnode, "sh ip bgp ipv6 unicast json", isjson=True)
+ is_ipv4_default_attrib_found = False
+ is_ipv6_default_attrib_found = False
+
+ default_ipv4_route = routes["ipv4"]
+ default_ipv6_route = "::/0"
+ ipv4_route_Origin = False
+ ipv4_route_local_pref = False
+ ipv4_route_metric = False
+
+ if default_ipv4_route in ipv4_routes["routes"].keys():
+ nxt_hop_count = len(ipv4_routes["routes"][default_ipv4_route])
+ rib_next_hops = []
+ for index in range(nxt_hop_count):
+ rib_next_hops.append(
+ ipv4_routes["routes"][default_ipv4_route][index]["nexthops"][0]["ip"]
+ )
+
+ for nxt_hop in expected_nexthop.items():
+ if nxt_hop[0] == "ipv4":
+ if nxt_hop[1] in rib_next_hops:
+ logger.info(
+ "Default routes [{}] obtained from {} .....PASSED".format(
+ default_ipv4_route, nxt_hop[1]
+ )
+ )
+ else:
+ logger.error(
+ "ERROR ...! Default routes [{}] expected is missing {}".format(
+ default_ipv4_route, nxt_hop[1]
+ )
+ )
+ return False
+
+ else:
+ pass
+
+ if "origin" in ipv4_routes["routes"][default_ipv4_route][0].keys():
+ ipv4_route_Origin = ipv4_routes["routes"][default_ipv4_route][0]["origin"]
+ if "locPrf" in ipv4_routes["routes"][default_ipv4_route][0].keys():
+ ipv4_route_local_pref = ipv4_routes["routes"][default_ipv4_route][0][
+ "locPrf"
+ ]
+ if "metric" in ipv4_routes["routes"][default_ipv4_route][0].keys():
+ ipv4_route_metric = ipv4_routes["routes"][default_ipv4_route][0]["metric"]
+ else:
+ logger.error("ERROR [ DUT {}] : The Default Route Not found in RIB".format(dut))
+ return False
+
+ origin_found = False
+ locPrf_found = False
+ metric_found = False
+ as_path_found = False
+
+ if origin:
+ if origin == ipv4_route_Origin:
+ logger.info(
+ "Dafault Route {} expected origin {} Found in RIB....PASSED".format(
+ default_ipv4_route, origin
+ )
+ )
+ origin_found = True
+ else:
+ logger.error(
+ "ERROR... IPV4::! Expected Origin is {} obtained {}".format(
+ origin, ipv4_route_Origin
+ )
+ )
+ return False
+ else:
+ origin_found = True
+
+ if locPrf:
+ if locPrf == ipv4_route_local_pref:
+ logger.info(
+ "Dafault Route {} expected local preference {} Found in RIB....PASSED".format(
+ default_ipv4_route, locPrf
+ )
+ )
+ locPrf_found = True
+ else:
+ logger.error(
+ "ERROR... IPV4::! Expected Local preference is {} obtained {}".format(
+ locPrf, ipv4_route_local_pref
+ )
+ )
+ return False
+ else:
+ locPrf_found = True
+
+ if metric:
+ if metric == ipv4_route_metric:
+ logger.info(
+ "Dafault Route {} expected metric {} Found in RIB....PASSED".format(
+ default_ipv4_route, metric
+ )
+ )
+
+ metric_found = True
+ else:
+ logger.error(
+ "ERROR... IPV4::! Expected metric is {} obtained {}".format(
+ metric, ipv4_route_metric
+ )
+ )
+ return False
+ else:
+ metric_found = True
+
+ if expected_aspath:
+ obtained_aspath = ipv4_routes["routes"]["0.0.0.0/0"][0]["path"]
+ if expected_aspath in obtained_aspath:
+ as_path_found = True
+ logger.info(
+ "Dafault Route {} expected AS path {} Found in RIB....PASSED".format(
+ default_ipv4_route, expected_aspath
+ )
+ )
+ else:
+ logger.error(
+ "ERROR.....! Expected AS path {} obtained {}..... FAILED ".format(
+ expected_aspath, obtained_aspath
+ )
+ )
+ return False
+ else:
+ as_path_found = True
+
+ if origin_found and locPrf_found and metric_found and as_path_found:
+ is_ipv4_default_attrib_found = True
+ logger.info(
+ "IPV4:: Expected origin ['{}'] , Local Preference ['{}'] , Metric ['{}'] and AS path [{}] is found in RIB".format(
+ origin, locPrf, metric, expected_aspath
+ )
+ )
+ else:
+ is_ipv4_default_attrib_found = False
+ logger.error(
+ "IPV4:: Expected origin ['{}'] Obtained [{}]".format(
+ origin, ipv4_route_Origin
+ )
+ )
+ logger.error(
+ "IPV4:: Expected locPrf ['{}'] Obtained [{}]".format(
+ locPrf, ipv4_route_local_pref
+ )
+ )
+ logger.error(
+ "IPV4:: Expected metric ['{}'] Obtained [{}]".format(
+ metric, ipv4_route_metric
+ )
+ )
+ logger.error(
+ "IPV4:: Expected metric ['{}'] Obtained [{}]".format(
+ expected_aspath, obtained_aspath
+ )
+ )
+
+ route_Origin = False
+ route_local_pref = False
+ route_local_metric = False
+ default_ipv6_route = ""
+ try:
+ ipv6_routes["routes"]["0::0/0"]
+ default_ipv6_route = "0::0/0"
+ except:
+ ipv6_routes["routes"]["::/0"]
+ default_ipv6_route = "::/0"
+ if default_ipv6_route in ipv6_routes["routes"].keys():
+ nxt_hop_count = len(ipv6_routes["routes"][default_ipv6_route])
+ rib_next_hops = []
+ for index in range(nxt_hop_count):
+ rib_next_hops.append(
+ ipv6_routes["routes"][default_ipv6_route][index]["nexthops"][0]["ip"]
+ )
+ try:
+ rib_next_hops.append(
+ ipv6_routes["routes"][default_ipv6_route][index]["nexthops"][1][
+ "ip"
+ ]
+ )
+ except (KeyError, IndexError) as e:
+ logger.error("NO impact ..! Global IPV6 Address not found ")
+
+ for nxt_hop in expected_nexthop.items():
+ if nxt_hop[0] == "ipv6":
+ if nxt_hop[1] in rib_next_hops:
+ logger.info(
+ "Default routes [{}] obtained from {} .....PASSED".format(
+ default_ipv6_route, nxt_hop[1]
+ )
+ )
+ else:
+ logger.error(
+ "ERROR ...! Default routes [{}] expected from {} obtained {}".format(
+ default_ipv6_route, nxt_hop[1], rib_next_hops
+ )
+ )
+ return False
+
+ else:
+ pass
+ if "origin" in ipv6_routes["routes"][default_ipv6_route][0].keys():
+ route_Origin = ipv6_routes["routes"][default_ipv6_route][0]["origin"]
+ if "locPrf" in ipv6_routes["routes"][default_ipv6_route][0].keys():
+ route_local_pref = ipv6_routes["routes"][default_ipv6_route][0]["locPrf"]
+ if "metric" in ipv6_routes["routes"][default_ipv6_route][0].keys():
+ route_local_metric = ipv6_routes["routes"][default_ipv6_route][0]["metric"]
+
+ origin_found = False
+ locPrf_found = False
+ metric_found = False
+ as_path_found = False
+
+ if origin:
+ if origin == route_Origin:
+ logger.info(
+ "Dafault Route {} expected origin {} Found in RIB....PASSED".format(
+ default_ipv6_route, route_Origin
+ )
+ )
+ origin_found = True
+ else:
+ logger.error(
+ "ERROR... IPV6::! Expected Origin is {} obtained {}".format(
+ origin, route_Origin
+ )
+ )
+ return False
+ else:
+ origin_found = True
+
+ if locPrf:
+ if locPrf == route_local_pref:
+ logger.info(
+ "Dafault Route {} expected Local Preference {} Found in RIB....PASSED".format(
+ default_ipv6_route, route_local_pref
+ )
+ )
+ locPrf_found = True
+ else:
+ logger.error(
+ "ERROR... IPV6::! Expected Local Preference is {} obtained {}".format(
+ locPrf, route_local_pref
+ )
+ )
+ return False
+ else:
+ locPrf_found = True
+
+ if metric:
+ if metric == route_local_metric:
+ logger.info(
+ "Dafault Route {} expected metric {} Found in RIB....PASSED".format(
+ default_ipv4_route, metric
+ )
+ )
+
+ metric_found = True
+ else:
+ logger.error(
+ "ERROR... IPV6::! Expected metric is {} obtained {}".format(
+ metric, route_local_metric
+ )
+ )
+ return False
+ else:
+ metric_found = True
+
+ if expected_aspath:
+ obtained_aspath = ipv6_routes["routes"]["::/0"][0]["path"]
+ if expected_aspath in obtained_aspath:
+ as_path_found = True
+ logger.info(
+ "Dafault Route {} expected AS path {} Found in RIB....PASSED".format(
+ default_ipv4_route, expected_aspath
+ )
+ )
+ else:
+ logger.error(
+ "ERROR.....! Expected AS path {} obtained {}..... FAILED ".format(
+ expected_aspath, obtained_aspath
+ )
+ )
+ return False
+ else:
+ as_path_found = True
+
+ if origin_found and locPrf_found and metric_found and as_path_found:
+ is_ipv6_default_attrib_found = True
+ logger.info(
+ "IPV6:: Expected origin ['{}'] , Local Preference ['{}'] , Metric ['{}'] and AS path [{}] is found in RIB".format(
+ origin, locPrf, metric, expected_aspath
+ )
+ )
+ else:
+ is_ipv6_default_attrib_found = False
+ logger.error(
+ "IPV6:: Expected origin ['{}'] Obtained [{}]".format(origin, route_Origin)
+ )
+ logger.error(
+ "IPV6:: Expected locPrf ['{}'] Obtained [{}]".format(
+ locPrf, route_local_pref
+ )
+ )
+ logger.error(
+ "IPV6:: Expected metric ['{}'] Obtained [{}]".format(
+ metric, route_local_metric
+ )
+ )
+ logger.error(
+ "IPV6:: Expected metric ['{}'] Obtained [{}]".format(
+ expected_aspath, obtained_aspath
+ )
+ )
+
+ if is_ipv4_default_attrib_found and is_ipv6_default_attrib_found:
+ logger.info("The attributes are found for default route in RIB ")
+ return True
+ else:
+ return False
+
+
+@retry(retry_timeout=5)
+def verify_fib_default_route(tgen, topo, dut, routes, expected_nexthop):
+ """
+ API to verify the the 'Default route" in FIB
+
+ param
+ =====
+ dut : device under test
+ routes : default route with expected nexthop
+ expected_nexthop : the nexthop that is expected the deafult route
+
+ """
+ result = False
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ tgen = get_topogen()
+ connected_routes = {}
+ for router, rnode in tgen.routers().items():
+ if router == dut:
+ ipv4_routes = run_frr_cmd(rnode, "sh ip route json", isjson=True)
+ ipv6_routes = run_frr_cmd(rnode, "sh ipv6 route json", isjson=True)
+
+ is_ipv4_default_route_found = False
+ is_ipv6_default_route_found = False
+ if routes["ipv4"] in ipv4_routes.keys():
+ rib_ipv4_nxt_hops = []
+ ipv4_default_route = routes["ipv4"]
+ nxt_hop_count = len(ipv4_routes[ipv4_default_route][0]["nexthops"])
+ for index in range(nxt_hop_count):
+ rib_ipv4_nxt_hops.append(
+ ipv4_routes[ipv4_default_route][0]["nexthops"][index]["ip"]
+ )
+
+ if expected_nexthop["ipv4"] in rib_ipv4_nxt_hops:
+ is_ipv4_default_route_found = True
+ logger.info(
+ "{} default route with next hop {} is found in FIB ".format(
+ ipv4_default_route, expected_nexthop
+ )
+ )
+ else:
+ logger.error(
+ "ERROR .. ! {} default route with next hop {} is not found in FIB ".format(
+ ipv4_default_route, expected_nexthop
+ )
+ )
+ return False
+
+ if routes["ipv6"] in ipv6_routes.keys() or "::/0" in ipv6_routes.keys():
+ rib_ipv6_nxt_hops = []
+ if "::/0" in ipv6_routes.keys():
+ ipv6_default_route = "::/0"
+ elif routes["ipv6"] in ipv6_routes.keys():
+ ipv6_default_route = routes["ipv6"]
+
+ nxt_hop_count = len(ipv6_routes[ipv6_default_route][0]["nexthops"])
+ for index in range(nxt_hop_count):
+ rib_ipv6_nxt_hops.append(
+ ipv6_routes[ipv6_default_route][0]["nexthops"][index]["ip"]
+ )
+
+ if expected_nexthop["ipv6"] in rib_ipv6_nxt_hops:
+ is_ipv6_default_route_found = True
+ logger.info(
+ "{} default route with next hop {} is found in FIB ".format(
+ ipv6_default_route, expected_nexthop
+ )
+ )
+ else:
+ logger.error(
+ "ERROR .. ! {} default route with next hop {} is not found in FIB ".format(
+ ipv6_default_route, expected_nexthop
+ )
+ )
+ return False
+
+ if is_ipv4_default_route_found and is_ipv6_default_route_found:
+ return True
+ else:
+ logger.error(
+ "Default Route for ipv4 and ipv6 address family is not found in FIB "
+ )
+ return False
+
+
+@retry(retry_timeout=5)
+def verify_bgp_advertised_routes_from_neighbor(tgen, topo, dut, peer, expected_routes):
+ """
+ APi is verifies the the routes that are advertised from dut to peer
+
+ command used :
+ "sh ip bgp neighbor <x.x.x.x> advertised-routes" and
+ "sh ip bgp ipv6 unicast neighbor<x::x> advertised-routes"
+
+ dut : Device Under Tests
+ Peer : Peer on which the routs is expected
+ expected_routes : dual stack IPV4-and IPv6 routes to be verified
+ expected_routes
+
+ returns: True / False
+
+ """
+ result = False
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ tgen = get_topogen()
+
+ peer_ipv4_neighbor_ip = topo["routers"][peer]["links"][dut]["ipv4"].split("/")[0]
+ peer_ipv6_neighbor_ip = topo["routers"][peer]["links"][dut]["ipv6"].split("/")[0]
+
+ for router, rnode in tgen.routers().items():
+ if router == dut:
+ ipv4_receieved_routes = run_frr_cmd(
+ rnode,
+ "sh ip bgp neighbor {} advertised-routes json".format(
+ peer_ipv4_neighbor_ip
+ ),
+ isjson=True,
+ )
+ ipv6_receieved_routes = run_frr_cmd(
+ rnode,
+ "sh ip bgp ipv6 unicast neighbor {} advertised-routes json".format(
+ peer_ipv6_neighbor_ip
+ ),
+ isjson=True,
+ )
+ ipv4_route_count = 0
+ ipv6_route_count = 0
+ if ipv4_receieved_routes:
+ for index in range(len(expected_routes["ipv4"])):
+ if (
+ expected_routes["ipv4"][index]["network"]
+ in ipv4_receieved_routes["advertisedRoutes"].keys()
+ ):
+ ipv4_route_count += 1
+ logger.info(
+ "Success [DUT : {}] The Expected Route {} is advertised to {} ".format(
+ dut, expected_routes["ipv4"][index]["network"], peer
+ )
+ )
+
+ elif (
+ expected_routes["ipv4"][index]["network"]
+ in ipv4_receieved_routes["bgpOriginatingDefaultNetwork"]
+ ):
+ ipv4_route_count += 1
+ logger.info(
+ "Success [DUT : {}] The Expected Route {} is advertised to {} ".format(
+ dut, expected_routes["ipv4"][index]["network"], peer
+ )
+ )
+
+ else:
+ logger.error(
+ "ERROR....![DUT : {}] The Expected Route {} is not advertised to {} ".format(
+ dut, expected_routes["ipv4"][index]["network"], peer
+ )
+ )
+ else:
+ logger.error(ipv4_receieved_routes)
+ logger.error(
+ "ERROR...! [DUT : {}] No IPV4 Routes are advertised to the peer {}".format(
+ dut, peer
+ )
+ )
+ return False
+
+ if ipv6_receieved_routes:
+ for index in range(len(expected_routes["ipv6"])):
+ if (
+ expected_routes["ipv6"][index]["network"]
+ in ipv6_receieved_routes["advertisedRoutes"].keys()
+ ):
+ ipv6_route_count += 1
+ logger.info(
+ "Success [DUT : {}] The Expected Route {} is advertised to {} ".format(
+ dut, expected_routes["ipv6"][index]["network"], peer
+ )
+ )
+ elif (
+ expected_routes["ipv6"][index]["network"]
+ in ipv6_receieved_routes["bgpOriginatingDefaultNetwork"]
+ ):
+ ipv6_route_count += 1
+ logger.info(
+ "Success [DUT : {}] The Expected Route {} is advertised to {} ".format(
+ dut, expected_routes["ipv6"][index]["network"], peer
+ )
+ )
+ else:
+ logger.error(
+ "ERROR....![DUT : {}] The Expected Route {} is not advertised to {} ".format(
+ dut, expected_routes["ipv6"][index]["network"], peer
+ )
+ )
+ else:
+ logger.error(ipv6_receieved_routes)
+ logger.error(
+ "ERROR...! [DUT : {}] No IPV6 Routes are advertised to the peer {}".format(
+ dut, peer
+ )
+ )
+ return False
+
+ if ipv4_route_count == len(expected_routes["ipv4"]) and ipv6_route_count == len(
+ expected_routes["ipv6"]
+ ):
+ return True
+ else:
+ logger.error(
+ "ERROR ....! IPV4 : Expected Routes -> {} obtained ->{} ".format(
+ expected_routes["ipv4"], ipv4_receieved_routes["advertisedRoutes"]
+ )
+ )
+ logger.error(
+ "ERROR ....! IPV6 : Expected Routes -> {} obtained ->{} ".format(
+ expected_routes["ipv6"], ipv6_receieved_routes["advertisedRoutes"]
+ )
+ )
+ return False
+
+
+@retry(retry_timeout=5)
+def verify_bgp_received_routes_from_neighbor(tgen, topo, dut, peer, expected_routes):
+ """
+ API to verify the bgp received routes
+
+ commad used :
+ =============
+ show ip bgp neighbor <x.x.x.x> received-routes
+ show ip bgp ipv6 unicast neighbor <x::x> received-routes
+
+ params
+ =======
+ dut : Device Under Tests
+ Peer : Peer on which the routs is expected
+ expected_routes : dual stack IPV4-and IPv6 routes to be verified
+ expected_routes
+
+ returns:
+ ========
+ True / False
+ """
+ result = False
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ tgen = get_topogen()
+
+ peer_ipv4_neighbor_ip = topo["routers"][peer]["links"][dut]["ipv4"].split("/")[0]
+ peer_ipv6_neighbor_ip = topo["routers"][peer]["links"][dut]["ipv6"].split("/")[0]
+
+ logger.info("Enabling Soft configuration to neighbor INBOUND ")
+ neigbor_dict = {"ipv4": peer_ipv4_neighbor_ip, "ipv6": peer_ipv6_neighbor_ip}
+ result = configure_bgp_soft_configuration(
+ tgen, dut, neigbor_dict, direction="inbound"
+ )
+ assert (
+ result is True
+ ), " Failed to configure the soft configuration \n Error: {}".format(result)
+
+ """sleep of 10 sec is required to get the routes on peer after soft configuration"""
+ sleep(10)
+ for router, rnode in tgen.routers().items():
+ if router == dut:
+ ipv4_receieved_routes = run_frr_cmd(
+ rnode,
+ "sh ip bgp neighbor {} received-routes json".format(
+ peer_ipv4_neighbor_ip
+ ),
+ isjson=True,
+ )
+ ipv6_receieved_routes = run_frr_cmd(
+ rnode,
+ "sh ip bgp ipv6 unicast neighbor {} received-routes json".format(
+ peer_ipv6_neighbor_ip
+ ),
+ isjson=True,
+ )
+ ipv4_route_count = 0
+ ipv6_route_count = 0
+ if ipv4_receieved_routes:
+ for index in range(len(expected_routes["ipv4"])):
+ if (
+ expected_routes["ipv4"][index]["network"]
+ in ipv4_receieved_routes["receivedRoutes"].keys()
+ ):
+ ipv4_route_count += 1
+ logger.info(
+ "Success [DUT : {}] The Expected Route {} is received from {} ".format(
+ dut, expected_routes["ipv4"][index]["network"], peer
+ )
+ )
+ else:
+ logger.error(
+ "ERROR....![DUT : {}] The Expected Route {} is not received from {} ".format(
+ dut, expected_routes["ipv4"][index]["network"], peer
+ )
+ )
+ else:
+ logger.error(ipv4_receieved_routes)
+ logger.error(
+ "ERROR...! [DUT : {}] No IPV4 Routes are received from the peer {}".format(
+ dut, peer
+ )
+ )
+ return False
+
+ if ipv6_receieved_routes:
+ for index in range(len(expected_routes["ipv6"])):
+ if (
+ expected_routes["ipv6"][index]["network"]
+ in ipv6_receieved_routes["receivedRoutes"].keys()
+ ):
+ ipv6_route_count += 1
+ logger.info(
+ "Success [DUT : {}] The Expected Route {} is received from {} ".format(
+ dut, expected_routes["ipv6"][index]["network"], peer
+ )
+ )
+ else:
+ logger.error(
+ "ERROR....![DUT : {}] The Expected Route {} is not received from {} ".format(
+ dut, expected_routes["ipv6"][index]["network"], peer
+ )
+ )
+ else:
+ logger.error(ipv6_receieved_routes)
+ logger.error(
+ "ERROR...! [DUT : {}] No IPV6 Routes are received from the peer {}".format(
+ dut, peer
+ )
+ )
+ return False
+
+ if ipv4_route_count == len(expected_routes["ipv4"]) and ipv6_route_count == len(
+ expected_routes["ipv6"]
+ ):
+ return True
+ else:
+ logger.error(
+ "ERROR ....! IPV4 : Expected Routes -> {} obtained ->{} ".format(
+ expected_routes["ipv4"], ipv4_receieved_routes["advertisedRoutes"]
+ )
+ )
+ logger.error(
+ "ERROR ....! IPV6 : Expected Routes -> {} obtained ->{} ".format(
+ expected_routes["ipv6"], ipv6_receieved_routes["advertisedRoutes"]
+ )
+ )
+ return False
+
+
+def configure_bgp_soft_configuration(tgen, dut, neighbor_dict, direction):
+ """
+ Api to configure the bgp soft configuration to show the received routes from peer
+ params
+ ======
+ dut : device under test route on which the sonfiguration to be applied
+ neighbor_dict : dict element contains ipv4 and ipv6 neigbor ip
+ direction : Directionon which it should be applied in/out
+
+ returns:
+ ========
+ boolean
+ """
+ logger.info("Enabling Soft configuration to neighbor INBOUND ")
+ local_as = get_dut_as_number(tgen, dut)
+ ipv4_neighbor = neighbor_dict["ipv4"]
+ ipv6_neighbor = neighbor_dict["ipv6"]
+ direction = direction.lower()
+ if ipv4_neighbor and ipv4_neighbor:
+ raw_config = {
+ dut: {
+ "raw_config": [
+ "router bgp {}".format(local_as),
+ "address-family ipv4 unicast",
+ "neighbor {} soft-reconfiguration {} ".format(
+ ipv4_neighbor, direction
+ ),
+ "exit-address-family",
+ "address-family ipv6 unicast",
+ "neighbor {} soft-reconfiguration {} ".format(
+ ipv6_neighbor, direction
+ ),
+ "exit-address-family",
+ ]
+ }
+ }
+ result = apply_raw_config(tgen, raw_config)
+ logger.info(
+ "Success... [DUT : {}] The soft configuration onis applied on neighbors {} ".format(
+ dut, neighbor_dict
+ )
+ )
+ return True
diff --git a/tests/topotests/lib/pim.py b/tests/topotests/lib/pim.py
index 5d623c94e1..cd070e08b9 100644
--- a/tests/topotests/lib/pim.py
+++ b/tests/topotests/lib/pim.py
@@ -1592,6 +1592,13 @@ def verify_pim_interface(
if pim_interface in show_ip_pim_interface_json:
pim_intf_json = show_ip_pim_interface_json[pim_interface]
+ else:
+ errormsg = (
+ "[DUT %s]: PIM interface: %s "
+ "PIM interface ip: %s, not Found"
+ % (dut, pim_interface, pim_intf_ip)
+ )
+ return errormsg
# Verifying PIM interface
if (
@@ -3556,6 +3563,78 @@ class McastTesterHelper(HostApplicationHelper):
return True
+@retry(retry_timeout=62)
+def verify_local_igmp_groups(tgen, dut, interface, group_addresses):
+ """
+ Verify local IGMP groups are received from an intended interface
+ by running "show ip igmp join json" command
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `dut`: device under test
+ * `interface`: interface, from which IGMP groups are configured
+ * `group_addresses`: IGMP group address
+
+ Usage
+ -----
+ dut = "r1"
+ interface = "r1-r0-eth0"
+ group_address = "225.1.1.1"
+ result = verify_local_igmp_groups(tgen, dut, interface, group_address)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ if dut not in tgen.routers():
+ return False
+
+ rnode = tgen.routers()[dut]
+
+ logger.info("[DUT: %s]: Verifying local IGMP groups received:", dut)
+ show_ip_local_igmp_json = run_frr_cmd(rnode, "show ip igmp join json", isjson=True)
+
+ if type(group_addresses) is not list:
+ group_addresses = [group_addresses]
+
+ if interface not in show_ip_local_igmp_json:
+
+ errormsg = (
+ "[DUT %s]: Verifying local IGMP group received"
+ " from interface %s [FAILED]!! " % (dut, interface)
+ )
+ return errormsg
+
+ for grp_addr in group_addresses:
+ found = False
+ for index in show_ip_local_igmp_json[interface]["groups"]:
+ if index["group"] == grp_addr:
+ found = True
+ break
+ if not found:
+ errormsg = (
+ "[DUT %s]: Verifying local IGMP group received"
+ " from interface %s [FAILED]!! "
+ " Expected: %s " % (dut, interface, grp_addr)
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: Verifying local IGMP group %s received "
+ "from interface %s [PASSED]!! ",
+ dut,
+ grp_addr,
+ interface,
+ )
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
def verify_pim_interface_traffic(tgen, input_dict, return_stats=True):
"""
Verify ip pim interface traffice by running
diff --git a/tests/topotests/multicast_pim_uplink_topo1/multicast_pim_uplink_topo1.json b/tests/topotests/multicast_pim_uplink_topo1/multicast_pim_uplink_topo1.json
new file mode 100644
index 0000000000..fa98987620
--- /dev/null
+++ b/tests/topotests/multicast_pim_uplink_topo1/multicast_pim_uplink_topo1.json
@@ -0,0 +1,226 @@
+{
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 24,
+ "link_ip_start": {"ipv4": "10.0.0.0", "v4mask": 24, "link_local": "disable"},
+ "lo_prefix": {"ipv4": "1.0.", "v4mask": 32},
+ "routers": {
+ "r1": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "r2-link1": {"ipv4": "auto", "pim": "enable"},
+ "r2-link2": {"ipv4": "auto", "pim": "enable"},
+ "r2-link3": {"ipv4": "auto", "pim": "enable"},
+ "r2-link4": {"ipv4": "auto", "pim": "enable"},
+ "r3-link1": {"ipv4": "auto", "pim": "enable"},
+ "r3-link2": {"ipv4": "auto", "pim": "enable"},
+ "r3-link3": {"ipv4": "auto", "pim": "enable"},
+ "r3-link4": {"ipv4": "auto", "pim": "enable"},
+ "i1": {"ipv4": "auto", "pim": "enable"},
+ "i2": {"ipv4": "auto", "pim": "enable"}
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ],
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {},
+ "r1-link2": {},
+ "r1-link3": {},
+ "r1-link4": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r1-link1": {},
+ "r1-link2": {},
+ "r1-link3": {},
+ "r1-link4": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "r1-link1": {"ipv4": "auto", "pim": "enable"},
+ "r1-link2": {"ipv4": "auto", "pim": "enable"},
+ "r1-link3": {"ipv4": "auto", "pim": "enable"},
+ "r1-link4": {"ipv4": "auto", "pim": "enable"},
+ "r4-link1": {"ipv4": "auto", "pim": "enable"},
+ "r4-link2": {"ipv4": "auto", "pim": "enable"},
+ "r4-link3": {"ipv4": "auto", "pim": "enable"},
+ "r4-link4": {"ipv4": "auto", "pim": "enable"},
+ "i3": {"ipv4": "auto", "pim": "enable"},
+ "i4": {"ipv4": "auto", "pim": "enable"}
+ },
+ "bgp": {
+ "local_as": "200",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ],
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {},
+ "r2-link2": {},
+ "r2-link3": {},
+ "r2-link4": {}
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r2-link1": {},
+ "r2-link2": {},
+ "r2-link3": {},
+ "r2-link4": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "r1-link1": {"ipv4": "auto", "pim": "enable"},
+ "r1-link2": {"ipv4": "auto", "pim": "enable"},
+ "r1-link3": {"ipv4": "auto", "pim": "enable"},
+ "r1-link4": {"ipv4": "auto", "pim": "enable"},
+ "r4-link1": {"ipv4": "auto", "pim": "enable"},
+ "r4-link2": {"ipv4": "auto", "pim": "enable"},
+ "r4-link3": {"ipv4": "auto", "pim": "enable"},
+ "r4-link4": {"ipv4": "auto", "pim": "enable"},
+ "i5": {"ipv4": "auto", "pim": "enable"}
+ },
+ "bgp": {
+ "local_as": "300",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ],
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3-link1": {},
+ "r3-link2": {},
+ "r3-link3": {},
+ "r3-link4": {}
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3-link1": {},
+ "r3-link2": {},
+ "r3-link3": {},
+ "r3-link4": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r4": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "r2-link1": {"ipv4": "auto", "pim": "enable"},
+ "r2-link2": {"ipv4": "auto", "pim": "enable"},
+ "r2-link3": {"ipv4": "auto", "pim": "enable"},
+ "r2-link4": {"ipv4": "auto", "pim": "enable"},
+ "r3-link1": {"ipv4": "auto", "pim": "enable"},
+ "r3-link2": {"ipv4": "auto", "pim": "enable"},
+ "r3-link3": {"ipv4": "auto", "pim": "enable"},
+ "r3-link4": {"ipv4": "auto", "pim": "enable"},
+ "i6": {"ipv4": "auto", "pim": "enable"},
+ "i7": {"ipv4": "auto", "pim": "enable"}
+ },
+ "bgp": {
+ "local_as": "400",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ],
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r4-link1": {},
+ "r4-link2": {},
+ "r4-link3": {},
+ "r4-link4": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r4-link1": {},
+ "r4-link2": {},
+ "r4-link3": {},
+ "r4-link4": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "i1": {
+ "links": {
+ "r1": {"ipv4": "auto"}
+ }
+ },
+ "i2": {
+ "links": {
+ "r1": {"ipv4": "auto"}
+ }
+ },
+ "i3": {
+ "links": {
+ "r2": {"ipv4": "auto"}
+ }
+ },
+ "i4": {
+ "links": {
+ "r2": {"ipv4": "auto"}
+ }
+ },
+ "i5": {
+ "links": {
+ "r3": {"ipv4": "auto"}
+ }
+ },
+ "i6": {
+ "links": {
+ "r4": {"ipv4": "auto"}
+ }
+ },
+ "i7": {
+ "links": {
+ "r4": {"ipv4": "auto"}
+ }
+ }
+ }
+}
diff --git a/tests/topotests/multicast_pim_uplink_topo1/test_multicast_pim_uplink_topo1.py b/tests/topotests/multicast_pim_uplink_topo1/test_multicast_pim_uplink_topo1.py
new file mode 100644
index 0000000000..8a505a86b5
--- /dev/null
+++ b/tests/topotests/multicast_pim_uplink_topo1/test_multicast_pim_uplink_topo1.py
@@ -0,0 +1,3327 @@
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2022 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation,
+# Inc. ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+Following tests are covered to test multicast pim uplink:
+
+1. Verify mroutes OIL and IIF updated correctly when receivers present inside
+ and outside of DUT
+2. Verify mroutes OIL and IIF updated correctly when source present inside
+ and outside of DUT
+3. Verify Mroutes and BSM forwarding when edge is transit node
+4. Verify mroutes updated correctly after source interface shut/no shut
+5. Verify mroutes updated correctly after receiver interface shut/no shut
+6. Verify mroute updated correctly after sending IGMP prune and join
+7. Verify mroute updated correctly after clear mroute
+8. Verify (*,G) mroute entries after changing the RP configuration
+9. Verify mroute entries after FRR service stop and start
+"""
+
+import os
+import sys
+import json
+import time
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# Required to instantiate the topology builder class.
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+
+from lib.common_config import (
+ start_topology,
+ write_test_header,
+ write_test_footer,
+ step,
+ reset_config_on_routers,
+ shutdown_bringup_interface,
+ start_router,
+ stop_router,
+ create_static_routes,
+ required_linux_kernel_version,
+ topo_daemons,
+)
+from lib.bgp import create_router_bgp
+from lib.pim import (
+ create_pim_config,
+ create_igmp_config,
+ verify_igmp_groups,
+ verify_mroutes,
+ clear_pim_interface_traffic,
+ verify_upstream_iif,
+ clear_mroute,
+ verify_multicast_traffic,
+ verify_pim_rp_info,
+ verify_pim_interface_traffic,
+ verify_pim_state,
+ McastTesterHelper,
+)
+from lib.topolog import logger
+from lib.topojson import build_config_from_json
+
+# Global variables
+GROUP_RANGE_1 = [
+ "225.1.1.1/32",
+ "225.1.1.2/32",
+ "225.1.1.3/32",
+ "225.1.1.4/32",
+ "225.1.1.5/32",
+]
+IGMP_JOIN_RANGE_1 = ["225.1.1.1", "225.1.1.2", "225.1.1.3", "225.1.1.4", "225.1.1.5"]
+GROUP_RANGE_2 = [
+ "226.1.1.1/32",
+ "226.1.1.2/32",
+ "226.1.1.3/32",
+ "226.1.1.4/32",
+ "226.1.1.5/32",
+]
+IGMP_JOIN_RANGE_2 = ["226.1.1.1", "226.1.1.2", "226.1.1.3", "226.1.1.4", "226.1.1.5"]
+GROUP_RANGE_3 = [
+ "227.1.1.1/32",
+ "227.1.1.2/32",
+ "227.1.1.3/32",
+ "227.1.1.4/32",
+ "227.1.1.5/32",
+]
+IGMP_JOIN_RANGE_3 = ["227.1.1.1", "227.1.1.2", "227.1.1.3", "227.1.1.4", "227.1.1.5"]
+
+r1_r2_links = []
+r1_r3_links = []
+r2_r1_links = []
+r3_r1_links = []
+r2_r4_links = []
+r4_r2_links = []
+r4_r3_links = []
+HELLO_TIMER = 1
+HOLD_TIMER = 3
+
+pytestmark = [pytest.mark.pimd]
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ # Required linux kernel version for this suite to run.
+ result = required_linux_kernel_version("4.19")
+ if result is not True:
+ pytest.skip("Kernel requirements are not met")
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ testdir = os.path.dirname(os.path.realpath(__file__))
+ json_file = "{}/multicast_pim_uplink_topo1.json".format(testdir)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
+ # ... and here it calls Mininet initialization functions.
+
+ # get list of daemons needs to be started for this suite.
+ daemons = topo_daemons(tgen, tgen.json_topo)
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen, daemons)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, tgen.json_topo)
+
+ # Pre-requisite data
+ get_interfaces_names(topo)
+
+ # XXX Replace this using "with McastTesterHelper()... " in each test if possible.
+ global app_helper
+ app_helper = McastTesterHelper(tgen)
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """Teardown the pytest environment"""
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ app_helper.cleanup()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
+
+
+#####################################################
+#
+# Local APIs
+#
+#####################################################
+
+
+def get_interfaces_names(topo):
+ """
+ API to fetch interfaces names and create list, which further would be used
+ for verification
+
+ Parameters
+ ----------
+ * `topo` : inout JSON data
+ """
+
+ for link in range(1, 5):
+
+ intf = topo["routers"]["r1"]["links"]["r2-link{}".format(link)]["interface"]
+ r1_r2_links.append(intf)
+
+ intf = topo["routers"]["r1"]["links"]["r3-link{}".format(link)]["interface"]
+ r1_r3_links.append(intf)
+
+ intf = topo["routers"]["r2"]["links"]["r1-link{}".format(link)]["interface"]
+ r2_r1_links.append(intf)
+
+ intf = topo["routers"]["r3"]["links"]["r1-link{}".format(link)]["interface"]
+ r3_r1_links.append(intf)
+
+ intf = topo["routers"]["r2"]["links"]["r4-link{}".format(link)]["interface"]
+ r2_r4_links.append(intf)
+
+ intf = topo["routers"]["r4"]["links"]["r2-link{}".format(link)]["interface"]
+ r4_r2_links.append(intf)
+
+ intf = topo["routers"]["r4"]["links"]["r3-link{}".format(link)]["interface"]
+ r4_r3_links.append(intf)
+
+
+def configure_static_routes_for_rp_reachability(tgen, topo):
+ """
+ API to configure static routes for rp reachability
+
+ Parameters
+ ----------
+ * `topo` : inout JSON data
+ """
+
+ for i in range(1, 5):
+ static_routes = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": [
+ topo["routers"]["r2"]["links"]["lo"]["ipv4"],
+ topo["routers"]["i6"]["links"]["r4"]["ipv4"],
+ topo["routers"]["i7"]["links"]["r4"]["ipv4"],
+ topo["routers"]["r4"]["links"]["lo"]["ipv4"],
+ ],
+ "next_hop": topo["routers"]["r2"]["links"][
+ "r1-link{}".format(i)
+ ]["ipv4"].split("/")[0],
+ },
+ {
+ "network": [
+ topo["routers"]["r3"]["links"]["lo"]["ipv4"],
+ topo["routers"]["i6"]["links"]["r4"]["ipv4"],
+ topo["routers"]["i7"]["links"]["r4"]["ipv4"],
+ topo["routers"]["r4"]["links"]["lo"]["ipv4"],
+ ],
+ "next_hop": topo["routers"]["r3"]["links"][
+ "r1-link{}".format(i)
+ ]["ipv4"].split("/")[0],
+ },
+ ]
+ },
+ "r2": {
+ "static_routes": [
+ {
+ "network": [
+ topo["routers"]["i6"]["links"]["r4"]["ipv4"],
+ topo["routers"]["i7"]["links"]["r4"]["ipv4"],
+ topo["routers"]["r4"]["links"]["lo"]["ipv4"],
+ topo["routers"]["r3"]["links"]["lo"]["ipv4"],
+ ],
+ "next_hop": topo["routers"]["r4"]["links"][
+ "r2-link{}".format(i)
+ ]["ipv4"].split("/")[0],
+ },
+ {
+ "network": [
+ topo["routers"]["r1"]["links"]["lo"]["ipv4"],
+ topo["routers"]["r3"]["links"]["lo"]["ipv4"],
+ topo["routers"]["i1"]["links"]["r1"]["ipv4"],
+ topo["routers"]["i2"]["links"]["r1"]["ipv4"],
+ ],
+ "next_hop": topo["routers"]["r1"]["links"][
+ "r2-link{}".format(i)
+ ]["ipv4"].split("/")[0],
+ },
+ ]
+ },
+ "r3": {
+ "static_routes": [
+ {
+ "network": [
+ topo["routers"]["r4"]["links"]["lo"]["ipv4"],
+ topo["routers"]["i6"]["links"]["r4"]["ipv4"],
+ topo["routers"]["i7"]["links"]["r4"]["ipv4"],
+ topo["routers"]["r2"]["links"]["lo"]["ipv4"],
+ ],
+ "next_hop": topo["routers"]["r4"]["links"][
+ "r3-link{}".format(i)
+ ]["ipv4"].split("/")[0],
+ },
+ {
+ "network": [
+ topo["routers"]["r1"]["links"]["lo"]["ipv4"],
+ topo["routers"]["i1"]["links"]["r1"]["ipv4"],
+ topo["routers"]["i2"]["links"]["r1"]["ipv4"],
+ topo["routers"]["r2"]["links"]["lo"]["ipv4"],
+ ],
+ "next_hop": topo["routers"]["r1"]["links"][
+ "r3-link{}".format(i)
+ ]["ipv4"].split("/")[0],
+ },
+ ]
+ },
+ "r4": {
+ "static_routes": [
+ {
+ "network": [
+ topo["routers"]["r3"]["links"]["lo"]["ipv4"],
+ topo["routers"]["i1"]["links"]["r1"]["ipv4"],
+ topo["routers"]["i2"]["links"]["r1"]["ipv4"],
+ topo["routers"]["r1"]["links"]["lo"]["ipv4"],
+ ],
+ "next_hop": topo["routers"]["r3"]["links"][
+ "r4-link{}".format(i)
+ ]["ipv4"].split("/")[0],
+ },
+ {
+ "network": [
+ topo["routers"]["r2"]["links"]["lo"]["ipv4"],
+ topo["routers"]["i1"]["links"]["r1"]["ipv4"],
+ topo["routers"]["i2"]["links"]["r1"]["ipv4"],
+ topo["routers"]["r1"]["links"]["lo"]["ipv4"],
+ ],
+ "next_hop": topo["routers"]["r2"]["links"][
+ "r4-link{}".format(i)
+ ]["ipv4"].split("/")[0],
+ },
+ ]
+ },
+ }
+
+ result = create_static_routes(tgen, static_routes)
+ assert result is True, "API {} : Failed Error: {}".\
+ format(sys._getframe().f_code.co_name, result)
+
+
+def verify_state_incremented(state_before, state_after):
+ """
+ API to compare interface traffic state incrementing
+
+ Parameters
+ ----------
+ * `state_before` : State dictionary for any particular instance
+ * `state_after` : State dictionary for any particular instance
+ """
+
+ for router, state_data in state_before.items():
+ for state, value in state_data.items():
+ if state_before[router][state] > state_after[router][state]:
+ errormsg = (
+ "[DUT: %s]: state %s value has not"
+ " incremented, Initial value: %s, "
+ "Current value: %s [FAILED!!]"
+ % (
+ router,
+ state,
+ state_before[router][state],
+ state_after[router][state],
+ )
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT: %s]: State %s value is "
+ "incremented, Initial value: %s, Current value: %s"
+ " [PASSED!!]",
+ router,
+ state,
+ state_before[router][state],
+ state_after[router][state],
+ )
+
+ return True
+
+
+#####################################################
+#
+# Testcases
+#
+#####################################################
+
+
+def test_mroutes_updated_with_correct_oil_iif_when_receiver_is_in_and_outside_DUT_p0(
+ request,
+):
+ """
+ Verify mroutes OIL and IIF updated correctly when receivers present inside
+ and outside of DUT
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ app_helper.stop_all_hosts()
+ clear_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Enable IGMP on DUT and R4 interface")
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ intf_r4_i7 = topo["routers"]["r4"]["links"]["i7"]["interface"]
+ for dut, intf in zip(["r1", "r4"], [intf_r1_i1, intf_r4_i7]):
+ input_dict = {dut: {"igmp": {"interfaces": {intf: {"igmp": {"version": "2"}}}}}}
+
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Send IGMP joins from DUT and R4 for group range 225.1.1.1-5")
+ input_join = {
+ "i1": topo["routers"]["i1"]["links"]["r1"]["interface"],
+ "i7": topo["routers"]["i7"]["links"]["r4"]["interface"],
+ }
+
+ for recvr, recvr_intf in input_join.items():
+ result = app_helper.run_join(recvr, IGMP_JOIN_RANGE_1, join_intf=recvr_intf)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure RP as R2 for group range 225.1.1.1-5")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Done in base config: " "Configure EBGP peering between all the nodes")
+
+ step("Done in base config: " "Enable PIM on all the interfaces of all the nodes")
+
+ step("Send traffic from R4 for group range 225.1.1.1-5")
+
+ input_src = {"i6": topo["routers"]["i6"]["links"]["r4"]["interface"]}
+
+ result = app_helper.run_traffic("i6", IGMP_JOIN_RANGE_1, "r4")
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "IGMP groups are received on DUT and R4 verify using 'show ip igmp groups'"
+ " and 'show ip igmp groups json'"
+ )
+
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ result = verify_igmp_groups(tgen, "r1", intf_r1_i1, IGMP_JOIN_RANGE_1)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ intf_r4_i7 = topo["routers"]["r4"]["links"]["i7"]["interface"]
+ result = verify_igmp_groups(tgen, "r4", intf_r4_i7, IGMP_JOIN_RANGE_1)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("(*,G) IIF and OIL updated on both the nodes")
+
+ step(
+ "(S,G) IIF updated towards shortest path to source on both the nodes "
+ ", verify using 'show ip mroute' and 'show ip mroute json'"
+ )
+
+ source_i6 = topo["routers"]["i6"]["links"]["r4"]["ipv4"].split("/")[0]
+ input_dict_star_sg = [
+ {
+ "dut": "r1",
+ "src_address": "*",
+ "iif": r1_r2_links + r1_r3_links,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": "*",
+ "iif": r4_r2_links + r4_r3_links,
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ {
+ "dut": "r1",
+ "src_address": source_i6,
+ "iif": r1_r2_links + r1_r3_links,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": source_i6,
+ "iif": topo["routers"]["r4"]["links"]["i6"]["interface"],
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ ]
+
+ for data in input_dict_star_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "OIL is updated and traffic is received for all the groups on both "
+ "the nodes , verify using 'show ip multicast'; 'show ip multicast json'"
+ )
+
+ intf_r4_i6 = topo["routers"]["r4"]["links"]["i6"]["interface"]
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ input_traffic = {
+ "r1": {"traffic_sent": [intf_r1_i1]},
+ "r4": {"traffic_received": [intf_r4_i6]},
+ }
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Random shut of upstream interface from DUT side")
+ for i in range(1, 5, 2):
+ intf = topo["routers"]["r1"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf, False)
+
+ step(
+ "After shut of upstream interface from DUT verify mroutes has moved "
+ "to another interface (R2 or R3) and updated with correct OIL/IIF using"
+ " 'show ip mroute json'"
+ )
+
+ for data in input_dict_star_sg:
+ if data["src_address"] == "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step("Random no shut of upstream interface from DUT side")
+ for i in range(1, 5, 2):
+ intf = topo["routers"]["r1"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf, True)
+
+ step(
+ "After no shut of upstream interface from DUT verify no change on"
+ "mroute using 'show ip mroute json'; 'show ip upstream json'"
+ )
+
+ for data in input_dict_star_sg:
+ if data["src_address"] == "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step("Shut of upstream interface in alternate fashion from R4 side")
+ for i in range(1, 5, 2):
+ intf = topo["routers"]["r4"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r4", intf, False)
+
+ step(
+ "After shut of upstream interface from R4 verify mroutes has moved "
+ "to another interface (R2 or R3) and updated with correct OIL/IIF using"
+ " 'show ip mroute json'"
+ )
+
+ for data in input_dict_star_sg:
+ if data["src_address"] == "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step("No shut of upstream interface in alternate fashion from R4 side")
+ for i in range(1, 5, 2):
+ intf = topo["routers"]["r4"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r4", intf, True)
+
+ step(
+ "After no shut of upstream interface from DUT verify no change on"
+ "mroute using 'show ip mroute json'; 'show ip upstream json'"
+ )
+
+ for data in input_dict_star_sg:
+ if data["src_address"] == "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Send different IGMP joins from DUT and R4 for group range (From DUT "
+ "225.1.1.1-5 and from R4 226.1.1.1-5)"
+ )
+
+ result = app_helper.run_join("i7", IGMP_JOIN_RANGE_2, "r4")
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Send traffic for all the groups from R4")
+
+ input_src = {"i6": topo["routers"]["i6"]["links"]["r4"]["interface"]}
+ result = app_helper.run_traffic("i6", IGMP_JOIN_RANGE_1 + IGMP_JOIN_RANGE_2, "r4")
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "IGMP groups are received on DUT and R4 verify using 'show ip igmp groups'"
+ " and 'show ip igmp groups json'"
+ )
+
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ result = verify_igmp_groups(tgen, "r1", intf_r1_i1, IGMP_JOIN_RANGE_1)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ intf_r4_i7 = topo["routers"]["r4"]["links"]["i7"]["interface"]
+ result = verify_igmp_groups(
+ tgen, "r4", intf_r4_i7, IGMP_JOIN_RANGE_1 + IGMP_JOIN_RANGE_2
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("(*,G) IIF and OIL updated on both the nodes")
+
+ for data in input_dict_star_sg:
+ if data["src_address"] == "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "(S,G) IIF updated towards shortest path to source on both the nodes "
+ ", verify using 'show ip mroute' and 'show ip mroute json'"
+ )
+
+ for data in input_dict_star_sg:
+ if data["src_address"] != "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "OIL is updated and traffic is received for all the groups on both "
+ "the nodes , verify using 'show ip multicast'; 'show ip multicast json'"
+ )
+
+ intf_r4_i6 = topo["routers"]["r4"]["links"]["i6"]["interface"]
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ input_traffic = {
+ "r1": {"traffic_sent": [intf_r1_i1]},
+ "r4": {"traffic_received": [intf_r4_i6]},
+ }
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Random shut of upstream interface from DUT side")
+ for i in range(1, 5, 2):
+ intf = topo["routers"]["r1"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf, False)
+
+ step(
+ "After shut of upstream interface from DUT verify mroutes has moved "
+ "to another interface (R2 or R3) and updated with correct OIL/IIF using"
+ " 'show ip mroute json'"
+ )
+
+ for data in input_dict_star_sg:
+ if data["src_address"] == "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step("Random no shut of upstream interface from DUT side")
+ for i in range(1, 5, 2):
+ intf = topo["routers"]["r1"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf, True)
+
+ step(
+ "After no shut of upstream interface from DUT verify no change on"
+ "mroute using 'show ip mroute json'; 'show ip upstream json'"
+ )
+
+ for data in input_dict_star_sg:
+ if data["src_address"] == "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_mroutes_updated_with_correct_oil_iif_when_source_is_in_and_outside_DUT_p0(
+ request,
+):
+ """
+ Verify mroutes OIL and IIF updated correctly when source present inside
+ and outside of DUT
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ app_helper.stop_all_hosts()
+ clear_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Enable IGMP on DUT and R4 interface")
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ intf_r4_i7 = topo["routers"]["r4"]["links"]["i7"]["interface"]
+ for dut, intf in zip(["r1", "r4"], [intf_r1_i1, intf_r4_i7]):
+ input_dict = {dut: {"igmp": {"interfaces": {intf: {"igmp": {"version": "2"}}}}}}
+
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Send IGMP joins from DUT and R4 for group range 225.1.1.1-5")
+ input_join = {
+ "i1": topo["routers"]["i1"]["links"]["r1"]["interface"],
+ "i7": topo["routers"]["i7"]["links"]["r4"]["interface"],
+ }
+
+ for recvr, recvr_intf in input_join.items():
+ result = app_helper.run_join(recvr, IGMP_JOIN_RANGE_1, join_intf=recvr_intf)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure RP as R2 for group range 225.1.1.1-5")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Done in base config: " "Configure EBGP peering between all the nodes")
+
+ step("Done in base config: " "Enable PIM on all the interfaces of all the nodes")
+
+ step("Send traffic from R4 for group range 225.1.1.1-5")
+
+ result = app_helper.run_traffic("i6", IGMP_JOIN_RANGE_1, "r4")
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "IGMP groups are received on DUT and R4 verify using 'show ip igmp groups'"
+ " and 'show ip igmp groups json'"
+ )
+
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ result = verify_igmp_groups(tgen, "r1", intf_r1_i1, IGMP_JOIN_RANGE_1)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ intf_r4_i7 = topo["routers"]["r4"]["links"]["i7"]["interface"]
+ result = verify_igmp_groups(tgen, "r4", intf_r4_i7, IGMP_JOIN_RANGE_1)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("(*,G) IIF and OIL updated on both the nodes")
+
+ step(
+ "(S,G) IIF updated towards shortest path to source on both the nodes "
+ ", verify using 'show ip mroute' and 'show ip mroute json'"
+ )
+
+ source_i6 = topo["routers"]["i6"]["links"]["r4"]["ipv4"].split("/")[0]
+ input_dict_star_sg = [
+ {
+ "dut": "r1",
+ "src_address": "*",
+ "iif": r1_r2_links + r1_r3_links,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": "*",
+ "iif": r4_r2_links + r4_r3_links,
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ {
+ "dut": "r1",
+ "src_address": source_i6,
+ "iif": r1_r2_links + r1_r3_links,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": source_i6,
+ "iif": topo["routers"]["r4"]["links"]["i6"]["interface"],
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ ]
+
+ for data in input_dict_star_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "OIL is updated and traffic is received for all the groups on both "
+ "the nodes , verify using 'show ip multicast'; 'show ip multicast json'"
+ )
+
+ intf_r4_i6 = topo["routers"]["r4"]["links"]["i6"]["interface"]
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ input_traffic = {
+ "r1": {"traffic_sent": [intf_r1_i1]},
+ "r4": {"traffic_received": [intf_r4_i6]},
+ }
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Random shut of upstream interface from DUT side")
+ for i in range(1, 5, 2):
+ intf = topo["routers"]["r1"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf, False)
+
+ step(
+ "After shut of upstream interface from DUT verify mroutes has moved "
+ "to another interface (R2 or R3) and updated with correct OIL/IIF using"
+ " 'show ip mroute json'"
+ )
+
+ for data in input_dict_star_sg:
+ if data["src_address"] == "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step("Random no shut of upstream interface from DUT side")
+ for i in range(1, 5, 2):
+ intf = topo["routers"]["r1"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf, True)
+
+ step(
+ "After no shut of upstream interface from DUT verify no change on"
+ "mroute using 'show ip mroute json'; 'show ip upstream json'"
+ )
+
+ for data in input_dict_star_sg:
+ if data["src_address"] == "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step("Random shut of upstream interface from R4 side")
+ for i in range(1, 5, 2):
+ intf = topo["routers"]["r4"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r4", intf, False)
+
+ step(
+ "After shut of upstream interface from R4 verify mroutes has moved "
+ "to another interface (R2 or R3) and updated with correct OIL/IIF using"
+ " 'show ip mroute json'"
+ )
+
+ for data in input_dict_star_sg:
+ if data["src_address"] == "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step("Random no shut of upstream interface from R4 side")
+ for i in range(1, 5, 2):
+ intf = topo["routers"]["r4"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r4", intf, True)
+
+ step(
+ "After no shut of upstream interface from DUT verify no change on"
+ "mroute using 'show ip mroute json'; 'show ip upstream json'"
+ )
+
+ for data in input_dict_star_sg:
+ if data["src_address"] == "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Send different IGMP joins from DUT and R4 for group range (From DUT "
+ "225.1.1.1-5 and from R4 226.1.1.1-5)"
+ )
+
+ result = app_helper.run_join("i7", IGMP_JOIN_RANGE_2, "r4")
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Send traffic for all the groups from R4")
+
+ result = app_helper.run_traffic("i6", IGMP_JOIN_RANGE_1 + IGMP_JOIN_RANGE_2, "r4")
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "IGMP groups are received on DUT and R4 verify using 'show ip igmp groups'"
+ " and 'show ip igmp groups json'"
+ )
+
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ result = verify_igmp_groups(tgen, "r1", intf_r1_i1, IGMP_JOIN_RANGE_1)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ intf_r4_i7 = topo["routers"]["r4"]["links"]["i7"]["interface"]
+ result = verify_igmp_groups(
+ tgen, "r4", intf_r4_i7, IGMP_JOIN_RANGE_1 + IGMP_JOIN_RANGE_2
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("(*,G) IIF and OIL updated on both the nodes")
+
+ for data in input_dict_star_sg:
+ if data["src_address"] == "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "(S,G) IIF updated towards shortest path to source on both the nodes "
+ ", verify using 'show ip mroute' and 'show ip mroute json'"
+ )
+
+ for data in input_dict_star_sg:
+ if data["src_address"] != "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "OIL is updated and traffic is received for all the groups on both "
+ "the nodes , verify using 'show ip multicast'; 'show ip multicast json'"
+ )
+
+ intf_r4_i6 = topo["routers"]["r4"]["links"]["i6"]["interface"]
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ input_traffic = {
+ "r1": {"traffic_sent": [intf_r1_i1]},
+ "r4": {"traffic_received": [intf_r4_i6]},
+ }
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Random shut and no shut of upstream interface from DUT side")
+
+ step("Random shut of upstream interface from DUT side")
+ for i in range(1, 5, 2):
+ intf = topo["routers"]["r1"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf, False)
+
+ step(
+ "After shut of upstream interface from DUT verify mroutes has moved "
+ "to another interface (R2 or R3) and updated with correct OIL/IIF using"
+ " 'show ip mroute json'"
+ )
+
+ for data in input_dict_star_sg:
+ if data["src_address"] == "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step("Random no shut of upstream interface from DUT side")
+ for i in range(1, 5, 2):
+ intf = topo["routers"]["r1"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf, True)
+
+ step(
+ "After no shut of upstream interface from DUT verify no change on"
+ "mroute using 'show ip mroute json'; 'show ip upstream json'"
+ )
+
+ for data in input_dict_star_sg:
+ if data["src_address"] == "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_verify_mroutes_forwarding_p0(request):
+ """
+ Verify Mroutes and BSM forwarding when edge is transit node
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ app_helper.stop_all_hosts()
+ clear_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("To make DUT as transit node , shut all the links from R3 to R4 nodes")
+ for i in range(1, 5):
+ intf = topo["routers"]["r3"]["links"]["r4-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r3", intf, False)
+
+ intf = topo["routers"]["r4"]["links"]["r3-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r4", intf, False)
+
+ step("Enable IGMP on DUT and R3 interface")
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ intf_r3_i5 = topo["routers"]["r3"]["links"]["i5"]["interface"]
+ for dut, intf in zip(["r1", "r3"], [intf_r1_i1, intf_r3_i5]):
+ input_dict = {dut: {"igmp": {"interfaces": {intf: {"igmp": {"version": "2"}}}}}}
+
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Send IGMP joins from DUT and R4 for group range 226.1.1.1-5")
+ input_join = {
+ "i1": topo["routers"]["i1"]["links"]["r1"]["interface"],
+ "i5": topo["routers"]["i5"]["links"]["r3"]["interface"],
+ }
+
+ for recvr, recvr_intf in input_join.items():
+ result = app_helper.run_join(recvr, IGMP_JOIN_RANGE_2, join_intf=recvr_intf)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure RP as R2 for group range 226.1.1.1-5")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_2,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Done in base config: " "Configure EBGP peering between all the nodes")
+
+ step("Done in base config: " "Enable PIM on all the interfaces of all the nodes")
+
+ step("Send traffic from R4 for group range 226.1.1.1-5")
+
+ input_src = {
+ "i6": topo["routers"]["i6"]["links"]["r4"]["interface"],
+ "i2": topo["routers"]["i2"]["links"]["r1"]["interface"],
+ }
+
+ for src, src_intf in input_src.items():
+ result = app_helper.run_traffic(src, IGMP_JOIN_RANGE_2, bind_intf=src_intf)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "BSR and candidate RP info populated in R3 node verify using "
+ "'show ip pim rp-info json'"
+ )
+
+ rp_addr_r2 = topo["routers"]["r2"]["links"]["lo"]["ipv4"].split("/")[0]
+
+ result = verify_pim_rp_info(
+ tgen, topo, "r2", GROUP_RANGE_2, "lo", rp_addr_r2, "Static"
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("(*,G) IIF and OIL updated on both the nodes")
+
+ step(
+ "(S,G) IIF updated towards shortest path to source on both the nodes "
+ ", verify using 'show ip mroute' and 'show ip mroute json'"
+ )
+
+ step(
+ "DUT created (*,G) and (S,G) entries as transit node for 226.1.1.1-5 "
+ "mroutes , OIL is local received and toward R3"
+ )
+
+ source_i6 = topo["routers"]["i6"]["links"]["r4"]["ipv4"].split("/")[0]
+ source_i2 = topo["routers"]["i2"]["links"]["r1"]["ipv4"].split("/")[0]
+ input_dict_star_sg = [
+ {
+ "dut": "r1",
+ "src_address": "*",
+ "iif": r1_r2_links + r1_r3_links,
+ "oil": r1_r3_links + [topo["routers"]["r1"]["links"]["i1"]["interface"]],
+ },
+ {
+ "dut": "r1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["r1"]["links"]["i2"]["interface"],
+ "oil": r1_r3_links + [topo["routers"]["r1"]["links"]["i1"]["interface"]],
+ },
+ {
+ "dut": "r1",
+ "src_address": source_i6,
+ "iif": r1_r2_links + r1_r3_links,
+ "oil": r1_r3_links + [topo["routers"]["r1"]["links"]["i1"]["interface"]],
+ },
+ {
+ "dut": "r3",
+ "src_address": "*",
+ "iif": r3_r1_links,
+ "oil": topo["routers"]["r3"]["links"]["i5"]["interface"],
+ },
+ {
+ "dut": "r3",
+ "src_address": source_i2,
+ "iif": r3_r1_links,
+ "oil": topo["routers"]["r3"]["links"]["i5"]["interface"],
+ },
+ {
+ "dut": "r3",
+ "src_address": source_i6,
+ "iif": r3_r1_links,
+ "oil": topo["routers"]["r3"]["links"]["i5"]["interface"],
+ },
+ ]
+
+ for data in input_dict_star_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_2,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "OIL is updated and traffic is received for all the groups on both "
+ "the nodes , verify using 'show ip multicast'; 'show ip multicast json'"
+ )
+
+ intf_r3_i5 = topo["routers"]["r3"]["links"]["i5"]["interface"]
+ intf_r1_i2 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ input_traffic = {
+ "r3": {"traffic_sent": [intf_r3_i5]},
+ "r1": {"traffic_sent": [intf_r1_i2]},
+ }
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Send different join from R3 (232.1.1.1-5) and traffic "
+ "from R4 for same range"
+ )
+
+ input_join = {"i5": topo["routers"]["i5"]["links"]["r3"]["interface"]}
+ result = app_helper.run_join("i5", IGMP_JOIN_RANGE_3, "r3")
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_3,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ input_src = {"i6": topo["routers"]["i6"]["links"]["r4"]["interface"]}
+ result = app_helper.run_traffic("i6", IGMP_JOIN_RANGE_3, "r4")
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("For different join (232.1.1.1-5) DUT created mroute OIL toward R3 only")
+
+ source_i6 = topo["routers"]["i6"]["links"]["r4"]["ipv4"].split("/")[0]
+ input_dict_sg = [
+ {"dut": "r1", "src_address": "*", "iif": r1_r2_links, "oil": r1_r3_links},
+ {"dut": "r1", "src_address": source_i6, "iif": r1_r2_links, "oil": r1_r3_links},
+ ]
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_3,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_3
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Shut from DUT to R2 and no shut from DUT")
+
+ for i in range(1, 5):
+ intf = topo["routers"]["r1"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf, False)
+
+ step(
+ "After Shut (R1-R2) link from DUT, verify IIF on DUT changed to "
+ "different uplink interface on DUT 'show ip mroute json' for R4 so "
+ "connected urce"
+ )
+
+ input_dict_sg = [
+ {
+ "dut": "r1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["r1"]["links"]["i2"]["interface"],
+ "oil": r1_r3_links + [topo["routers"]["r1"]["links"]["i1"]["interface"]],
+ }
+ ]
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_2,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Traffic is received fine for R4 source 'show ip multicast json' on DUT")
+
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("No shut from DUT to R2 and no shut from DUT")
+
+ for i in range(1, 5):
+ intf = topo["routers"]["r1"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf, True)
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_2,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Shut and no shut DUT to R2 within 30 sec from DUT")
+
+ for i in range(1, 5):
+ intf = topo["routers"]["r1"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf, False)
+
+ for i in range(1, 5):
+ intf = topo["routers"]["r1"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf, True)
+
+ step(
+ "Shut and No shut in 30 sec time , verify on R2 added 2 entries in mroute "
+ ", shut link OIL got timeout after sometime"
+ )
+
+ for data in input_dict_star_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_2,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_2
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_mroutes_updated_correctly_after_source_interface_shut_noshut_p1(request):
+ """
+ Verify mroutes updated correctly after source interface shut/no shut
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ app_helper.stop_all_hosts()
+ clear_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Enable IGMP on DUT and R4 interface")
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ intf_r4_i7 = topo["routers"]["r4"]["links"]["i7"]["interface"]
+ for dut, intf in zip(["r1", "r4"], [intf_r1_i1, intf_r4_i7]):
+ input_dict = {dut: {"igmp": {"interfaces": {intf: {"igmp": {"version": "2"}}}}}}
+
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Send IGMP joins from DUT and R4 for group range 225.1.1.1-5")
+ input_join = {
+ "i1": topo["routers"]["i1"]["links"]["r1"]["interface"],
+ "i7": topo["routers"]["i7"]["links"]["r4"]["interface"],
+ }
+
+ for recvr, recvr_intf in input_join.items():
+ result = app_helper.run_join(recvr, IGMP_JOIN_RANGE_1, join_intf=recvr_intf)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure RP as R2 for group range 225.1.1.1-5")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Done in base config: " "Configure EBGP peering between all the nodes")
+
+ step("Done in base config: " "Enable PIM on all the interfaces of all the nodes")
+
+ step("Send traffic from R4 for group range 225.1.1.1-5")
+ step("Send traffic from DUT for group range 225.1.1.1-5")
+
+ input_src = {
+ "i6": topo["routers"]["i6"]["links"]["r4"]["interface"],
+ "i2": topo["routers"]["i2"]["links"]["r1"]["interface"],
+ }
+
+ for src, src_intf in input_src.items():
+ result = app_helper.run_traffic(src, IGMP_JOIN_RANGE_1, bind_intf=src_intf)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("(*,G) IIF and OIL updated on both the nodes")
+
+ input_dict_starg = [
+ {
+ "dut": "r1",
+ "src_address": "*",
+ "iif": r1_r2_links + r1_r3_links,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": "*",
+ "iif": r4_r2_links + r4_r3_links,
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ ]
+
+ for data in input_dict_starg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "(S,G) IIF updated towards shortest path to source on both the nodes "
+ ", verify using 'show ip mroute' and 'show ip mroute json'"
+ )
+
+ source_i6 = topo["routers"]["i6"]["links"]["r4"]["ipv4"].split("/")[0]
+ source_i2 = topo["routers"]["i2"]["links"]["r1"]["ipv4"].split("/")[0]
+ input_dict_sg = [
+ {
+ "dut": "r1",
+ "src_address": source_i6,
+ "iif": r1_r2_links + r1_r3_links,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["r1"]["links"]["i2"]["interface"],
+ "oil": r1_r3_links + [topo["routers"]["r1"]["links"]["i1"]["interface"]],
+ },
+ {
+ "dut": "r4",
+ "src_address": source_i6,
+ "iif": topo["routers"]["r4"]["links"]["i6"]["interface"],
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": source_i2,
+ "iif": r4_r2_links + r4_r3_links,
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ ]
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "OIL is updated and traffic is received for all the groups on both "
+ "the nodes , verify using 'show ip multicast'; 'show ip multicast json'"
+ )
+
+ intf_r4_i6 = topo["routers"]["r4"]["links"]["i6"]["interface"]
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ input_traffic = {
+ "r1": {"traffic_sent": [intf_r1_i1]},
+ "r4": {"traffic_received": [intf_r4_i6]},
+ }
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("On R1 for local IGMP receivers, OIL towards RP is removed")
+
+ input_dict = [
+ {
+ "dut": "r1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["r1"]["links"]["i2"]["interface"],
+ "oil": topo["routers"]["r1"]["links"]["i2"]["interface"],
+ }
+ ]
+
+ for data in input_dict:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {} : Failed "
+ "Mroute IIF and OIF are same \n Error: {}".format(tc_name, result)
+ )
+
+ step("Shut and No shut source interface multiple time")
+
+ for i in range(0, 2):
+ step("Shut and no shut the source interface from DUT")
+ intf_r1_i2 = topo["routers"]["r1"]["links"]["i2"]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf_r1_i2, False)
+ shutdown_bringup_interface(tgen, "r1", intf_r1_i2, True)
+
+ step(
+ "After shut/no shut of source interface verify all the (S,G) "
+ "got re-learn and IIF/OIF pointing any of the links from R2 or "
+ "R3 verify using 'show ip mroute json'"
+ )
+
+ step(
+ "(S,G) OIL on R1 has only respective receiver port and uplink port "
+ " , RP side oil is removed"
+ )
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step("No change seen on (*,G) mroutes")
+
+ for data in input_dict_starg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Traffic is received for all the groups , verify using "
+ "'show ip multicast count json'"
+ )
+
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Shut and no shut the source interface from R4")
+
+ intf_r4_i6 = topo["routers"]["r4"]["links"]["i6"]["interface"]
+ shutdown_bringup_interface(tgen, "r4", intf_r4_i6, False)
+ shutdown_bringup_interface(tgen, "r4", intf_r4_i6, True)
+
+ step(
+ "After shut/no shut of source interface verify all the (S,G) "
+ "got re-learn and IIF/OIF pointing any of the links from R2 or "
+ "R3 verify using 'show ip mroute json'"
+ )
+
+ step(
+ "(S,G) OIL on R1 has only respective receiver port and uplink port "
+ " , RP side oil is removed"
+ )
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step("No change seen on (*,G) mroutes")
+
+ for data in input_dict_starg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Traffic is received for all the groups , verify using "
+ "'show ip multicast count json'"
+ )
+
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Shut source interface from R4 and no shut immediate after the "
+ "same source upstream expires from DUT"
+ )
+
+ intf_r4_i6 = topo["routers"]["r4"]["links"]["i6"]["interface"]
+ shutdown_bringup_interface(tgen, "r4", intf_r4_i6, False)
+ shutdown_bringup_interface(tgen, "r4", intf_r4_i6, True)
+
+ step(
+ "After no shut verify mroutes populated and multicast traffic resume ,"
+ " verify using 'show ip mroute json' 'show ip multicast count json'"
+ )
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Shut source interface from DUT and no shut immediate after the "
+ "same source upstream expires from R4"
+ )
+
+ intf_r1_i2 = topo["routers"]["r1"]["links"]["i2"]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf_r1_i2, False)
+ shutdown_bringup_interface(tgen, "r1", intf_r1_i2, True)
+
+ step(
+ "After no shut verify mroutes populated and multicast traffic resume ,"
+ " verify using 'show ip mroute json' 'show ip multicast count json'"
+ )
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_mroutes_updated_correctly_after_receiver_interface_shut_noshut_p1(request):
+ """
+ Verify mroutes updated correctly after receiver interface shut/no shut
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ app_helper.stop_all_hosts()
+ clear_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Enable IGMP on DUT and R4 interface")
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ intf_r4_i7 = topo["routers"]["r4"]["links"]["i7"]["interface"]
+ for dut, intf in zip(["r1", "r4"], [intf_r1_i1, intf_r4_i7]):
+ input_dict = {dut: {"igmp": {"interfaces": {intf: {"igmp": {"version": "2"}}}}}}
+
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Send IGMP joins from DUT and R4 for group range 225.1.1.1-5")
+ input_join = {
+ "i1": topo["routers"]["i1"]["links"]["r1"]["interface"],
+ "i7": topo["routers"]["i7"]["links"]["r4"]["interface"],
+ }
+
+ for recvr, recvr_intf in input_join.items():
+ result = app_helper.run_join(recvr, IGMP_JOIN_RANGE_1, join_intf=recvr_intf)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure RP as R2 for group range 225.1.1.1-5")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Done in base config: " "Configure EBGP peering between all the nodes")
+
+ step("Done in base config: " "Enable PIM on all the interfaces of all the nodes")
+
+ step("Send traffic from R4 for group range 225.1.1.1-5")
+
+ result = app_helper.run_traffic("i6", IGMP_JOIN_RANGE_1, "r4")
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Send traffic from DUT for group range 225.1.1.1-5")
+
+ result = app_helper.run_traffic("i2", IGMP_JOIN_RANGE_1, "r1")
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("(*,G) IIF and OIL updated on both the nodes")
+
+ input_dict_starg = [
+ {
+ "dut": "r1",
+ "src_address": "*",
+ "iif_r1_r2": r1_r2_links + r1_r3_links,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": "*",
+ "iif_r1_r2": r4_r2_links + r4_r3_links,
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ ]
+
+ for data in input_dict_starg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif_r1_r2"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif_r1_r2"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "(S,G) IIF updated towards shortest path to source on both the nodes "
+ ", verify using 'show ip mroute' and 'show ip mroute json'"
+ )
+
+ source_i6 = topo["routers"]["i6"]["links"]["r4"]["ipv4"].split("/")[0]
+ source_i2 = topo["routers"]["i2"]["links"]["r1"]["ipv4"].split("/")[0]
+ input_dict_sg = [
+ {
+ "dut": "r1",
+ "src_address": source_i6,
+ "iif": r1_r2_links + r1_r3_links,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["r1"]["links"]["i2"]["interface"],
+ "oil": r1_r3_links + [topo["routers"]["r1"]["links"]["i1"]["interface"]],
+ },
+ {
+ "dut": "r4",
+ "src_address": source_i6,
+ "iif": topo["routers"]["r4"]["links"]["i6"]["interface"],
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": source_i2,
+ "iif": r4_r2_links + r4_r3_links,
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ ]
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "OIL is updated and traffic is received for all the groups on both "
+ "the nodes , verify using 'show ip multicast'; 'show ip multicast json'"
+ )
+
+ intf_r4_i6 = topo["routers"]["r4"]["links"]["i6"]["interface"]
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ input_traffic = {
+ "r1": {"traffic_sent": [intf_r1_i1]},
+ "r4": {"traffic_received": [intf_r4_i6]},
+ }
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Shut and no shut the source interface from DUT")
+ for i in range(1, 5):
+ intf = topo["routers"]["r1"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf, False)
+
+ for i in range(1, 5):
+ intf = topo["routers"]["r1"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf, True)
+
+ step(
+ "After shut/no shut of source interface verify all the (S,G) "
+ "got re-learn and IIF/OIF pointing any of the links from R2 or "
+ "R3 verify using 'show ip mroute json'"
+ )
+
+ step(
+ "(S,G) OIL on R1 has only respective receiver port and uplink port "
+ " , RP side oil is removed"
+ )
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Traffic is received for all the groups , verify using "
+ "'show ip multicast count json'"
+ )
+
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Shut the receiver interface from R4")
+ for i in range(1, 5):
+ intf = topo["routers"]["r4"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r4", intf, False)
+
+ for i in range(1, 5):
+ intf = topo["routers"]["r4"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r4", intf, True)
+
+ step(
+ "After shut/no shut of source interface verify all the (S,G) "
+ "got re-learn and IIF/OIF pointing any of the links from R2 or "
+ "R3 verify using 'show ip mroute json'"
+ )
+
+ step(
+ "(S,G) OIL on R1 has only respective receiver port and uplink port "
+ " , RP side oil is removed"
+ )
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Traffic is received for all the groups , verify using "
+ "'show ip multicast count json'"
+ )
+
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Shut and no shut the receiver interface from DUT after PIM upstream" " timeout"
+ )
+
+ for i in range(1, 5):
+ intf = topo["routers"]["r1"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf, False)
+
+ for i in range(1, 5):
+ intf = topo["routers"]["r1"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf, True)
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Traffic is received for all the groups , verify using "
+ "'show ip multicast count json'"
+ )
+
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Shut and no shut the receiver interface from R4 after PIM upstream " "timeout"
+ )
+
+ for i in range(1, 5):
+ intf = topo["routers"]["r4"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r4", intf, False)
+
+ for i in range(1, 5):
+ intf = topo["routers"]["r4"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r4", intf, True)
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Traffic is received for all the groups , verify using "
+ "'show ip multicast count json'"
+ )
+
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_mroutes_updated_after_sending_IGMP_prune_and_join_p1(request):
+ """
+ Verify mroute updated correctly after sending IGMP prune and join
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ app_helper.stop_all_hosts()
+ clear_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Enable IGMP on DUT and R4 interface")
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ intf_r4_i7 = topo["routers"]["r4"]["links"]["i7"]["interface"]
+ for dut, intf in zip(["r1", "r4"], [intf_r1_i1, intf_r4_i7]):
+ input_dict = {dut: {"igmp": {"interfaces": {intf: {"igmp": {"version": "2"}}}}}}
+
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Send IGMP joins from DUT and R4 for group range 225.1.1.1-5")
+ input_join = {
+ "i1": topo["routers"]["i1"]["links"]["r1"]["interface"],
+ "i7": topo["routers"]["i7"]["links"]["r4"]["interface"],
+ }
+
+ for recvr, recvr_intf in input_join.items():
+ result = app_helper.run_join(recvr, IGMP_JOIN_RANGE_1, join_intf=recvr_intf)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure RP as R2 for group range 225.1.1.1-5")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Done in base config: " "Configure EBGP peering between all the nodes")
+
+ step("Done in base config: " "Enable PIM on all the interfaces of all the nodes")
+
+ step("Send traffic from R4 for group range 225.1.1.1-5")
+ step("Send traffic from DUT for group range 225.1.1.1-5")
+
+ input_src = {
+ "i6": topo["routers"]["i6"]["links"]["r4"]["interface"],
+ "i2": topo["routers"]["i2"]["links"]["r1"]["interface"],
+ }
+
+ for src, src_intf in input_src.items():
+ result = app_helper.run_traffic(src, IGMP_JOIN_RANGE_1, bind_intf=src_intf)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("(*,G) IIF and OIL updated on both the nodes")
+
+ input_dict_starg = [
+ {
+ "dut": "r1",
+ "src_address": "*",
+ "iif_r1_r2": r1_r2_links + r1_r3_links,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": "*",
+ "iif_r1_r2": r4_r2_links + r4_r3_links,
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ ]
+
+ for data in input_dict_starg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif_r1_r2"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif_r1_r2"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "(S,G) IIF updated towards shortest path to source on both the nodes "
+ ", verify using 'show ip mroute' and 'show ip mroute json'"
+ )
+
+ source_i6 = topo["routers"]["i6"]["links"]["r4"]["ipv4"].split("/")[0]
+ source_i2 = topo["routers"]["i2"]["links"]["r1"]["ipv4"].split("/")[0]
+ input_dict_sg = [
+ {
+ "dut": "r1",
+ "src_address": source_i6,
+ "iif": r1_r2_links + r1_r3_links,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["r1"]["links"]["i2"]["interface"],
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": source_i6,
+ "iif": topo["routers"]["r4"]["links"]["i6"]["interface"],
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": source_i2,
+ "iif": r4_r2_links + r4_r3_links,
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ ]
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "OIL is updated and traffic is received for all the groups on both "
+ "the nodes , verify using 'show ip multicast'; 'show ip multicast json'"
+ )
+
+ intf_r4_i6 = topo["routers"]["r4"]["links"]["i6"]["interface"]
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ input_traffic = {
+ "r1": {"traffic_sent": [intf_r1_i1]},
+ "r4": {"traffic_received": [intf_r4_i6]},
+ }
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Send IGMP prune and join for receivers connected on DUT")
+ step("Send IGMP prune and join for receivers connected on R4")
+
+ app_helper.stop_all_hosts()
+
+ step(
+ "After sending prune verify (*,G) and (S,G) entries got cleared "
+ "from all the nodes"
+ )
+
+ for data in input_dict_starg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif_r1_r2"],
+ data["oil"],
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {} : Failed "
+ " mroute are still present \n Error: {}".format(tc_name, result)
+ )
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {} : Failed "
+ " mroute are still present \n Error: {}".format(tc_name, result)
+ )
+
+ step(
+ "After sending joins verify (*,G) and (S,G) entries got populated "
+ "again correct OIL and IIF info (any of the link of R2 or R3) verify "
+ "using 'show ip mroute json'"
+ )
+
+ for recvr, recvr_intf in input_join.items():
+ result = app_helper.run_join(recvr, IGMP_JOIN_RANGE_1, join_intf=recvr_intf)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ for src, src_intf in input_src.items():
+ result = app_helper.run_traffic(src, IGMP_JOIN_RANGE_1, bind_intf=src_intf)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_starg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif_r1_r2"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Multicast traffic receiver for all the groups verify using "
+ "'show ip multicast count'"
+ )
+
+ input_traffic = {
+ "r1": {"traffic_sent": [intf_r1_i1]},
+ "r4": {"traffic_received": [intf_r4_i6]},
+ }
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_mroutes_updated_after_after_clear_mroute_p1(request):
+ """
+ Verify mroute updated correctly after clear mroute
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ app_helper.stop_all_hosts()
+ clear_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Enable IGMP on DUT and R4 interface")
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ intf_r4_i7 = topo["routers"]["r4"]["links"]["i7"]["interface"]
+ for dut, intf in zip(["r1", "r4"], [intf_r1_i1, intf_r4_i7]):
+ input_dict = {dut: {"igmp": {"interfaces": {intf: {"igmp": {"version": "2"}}}}}}
+
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Send IGMP joins from DUT and R4 for group range 225.1.1.1-5")
+ input_join = {
+ "i1": topo["routers"]["i1"]["links"]["r1"]["interface"],
+ "i7": topo["routers"]["i7"]["links"]["r4"]["interface"],
+ }
+
+ for recvr, recvr_intf in input_join.items():
+ result = app_helper.run_join(recvr, IGMP_JOIN_RANGE_1, join_intf=recvr_intf)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure RP as R2 for group range 225.1.1.1-5")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Done in base config: " "Configure EBGP peering between all the nodes")
+
+ step("Done in base config: " "Enable PIM on all the interfaces of all the nodes")
+
+ step("Send traffic from R4 for group range 225.1.1.1-5")
+ step("Send traffic from DUT for group range 225.1.1.1-5")
+
+ input_src = {
+ "i6": topo["routers"]["i6"]["links"]["r4"]["interface"],
+ "i2": topo["routers"]["i2"]["links"]["r1"]["interface"],
+ }
+
+ for src, src_intf in input_src.items():
+ result = app_helper.run_traffic(src, IGMP_JOIN_RANGE_1, bind_intf=src_intf)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("(*,G) IIF and OIL updated on both the nodes")
+
+ input_dict_starg = [
+ {
+ "dut": "r1",
+ "src_address": "*",
+ "iif_r1_r2": r1_r2_links + r1_r3_links,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": "*",
+ "iif_r1_r2": r4_r2_links + r4_r3_links,
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ ]
+
+ for data in input_dict_starg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif_r1_r2"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif_r1_r2"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "(S,G) IIF updated towards shortest path to source on both the nodes "
+ ", verify using 'show ip mroute' and 'show ip mroute json'"
+ )
+
+ source_i6 = topo["routers"]["i6"]["links"]["r4"]["ipv4"].split("/")[0]
+ source_i2 = topo["routers"]["i2"]["links"]["r1"]["ipv4"].split("/")[0]
+ input_dict_sg = [
+ {
+ "dut": "r1",
+ "src_address": source_i6,
+ "iif": r1_r2_links + r1_r3_links,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["r1"]["links"]["i2"]["interface"],
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": source_i6,
+ "iif": topo["routers"]["r4"]["links"]["i6"]["interface"],
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": source_i2,
+ "iif": r4_r2_links + r4_r3_links,
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ ]
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "OIL is updated and traffic is received for all the groups on both "
+ "the nodes , verify using 'show ip multicast'; 'show ip multicast json'"
+ )
+
+ intf_r4_i6 = topo["routers"]["r4"]["links"]["i6"]["interface"]
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ input_traffic = {
+ "r1": {"traffic_sent": [intf_r1_i1]},
+ "r4": {"traffic_received": [intf_r4_i6]},
+ }
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Clear ip mroute from DUT")
+ clear_mroute(tgen, "r1")
+
+ step("Clear ip mroute from r4")
+ clear_mroute(tgen, "r4")
+
+ step(
+ "Multicast traffic receiver for all the groups verify using "
+ "'show ip multicast count'"
+ )
+
+ intf_r4_i6 = topo["routers"]["r4"]["links"]["i6"]["interface"]
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ input_traffic = {
+ "r1": {"traffic_sent": [intf_r1_i1]},
+ "r4": {"traffic_received": [intf_r4_i6]},
+ }
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_mroutes_updated_after_changing_rp_config_p1(request):
+ """
+ Verify (*,G) mroute entries after changing the RP configuration
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ app_helper.stop_all_hosts()
+ clear_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Unconfigure BGP from all nodes as using static routes")
+
+ input_dict = {}
+ DUT = ["r1", "r2", "r3", "r4"]
+ ASN = [100, 200, 300, 400]
+ for dut, asn in zip(DUT, ASN):
+ temp = {dut: {"bgp": {}}}
+ input_dict.update(temp)
+
+ temp[dut]["bgp"].update({"local_as": asn, "delete": True})
+
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Enable IGMP on DUT and R4 interface")
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ intf_r4_i7 = topo["routers"]["r4"]["links"]["i7"]["interface"]
+ for dut, intf in zip(["r1", "r4"], [intf_r1_i1, intf_r4_i7]):
+ input_dict = {dut: {"igmp": {"interfaces": {intf: {"igmp": {"version": "2"}}}}}}
+
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Send IGMP joins from DUT and R4 for group range 225.1.1.1-5")
+ input_join = {
+ "i1": topo["routers"]["i1"]["links"]["r1"]["interface"],
+ "i7": topo["routers"]["i7"]["links"]["r4"]["interface"],
+ }
+
+ for recvr, recvr_intf in input_join.items():
+ result = app_helper.run_join(recvr, IGMP_JOIN_RANGE_1, join_intf=recvr_intf)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure RP as R2 for group range 225.1.1.1-5")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Configure static routes between nodes for making RP and source" "reachable")
+
+ configure_static_routes_for_rp_reachability(tgen, topo)
+
+ step("Done in base config: " "Enable PIM on all the interfaces of all the nodes")
+
+ step("Send traffic from R4 for group range 225.1.1.1-5")
+ step("Send traffic from DUT for group range 225.1.1.1-5")
+
+ input_src = {
+ "i6": topo["routers"]["i6"]["links"]["r4"]["interface"],
+ "i2": topo["routers"]["i2"]["links"]["r1"]["interface"],
+ }
+
+ for src, src_intf in input_src.items():
+ result = app_helper.run_traffic(src, IGMP_JOIN_RANGE_1, bind_intf=src_intf)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("(*,G) IIF and OIL updated on both the nodes")
+
+ step(
+ "(S,G) IIF updated towards shortest path to source on both the nodes "
+ ", verify using 'show ip mroute' and 'show ip mroute json'"
+ )
+
+ source_i6 = topo["routers"]["i6"]["links"]["r4"]["ipv4"].split("/")[0]
+ source_i2 = topo["routers"]["i2"]["links"]["r1"]["ipv4"].split("/")[0]
+ input_dict_star_sg = [
+ {
+ "dut": "r1",
+ "src_address": "*",
+ "iif": r1_r2_links + r1_r3_links,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": "*",
+ "iif": r4_r2_links + r4_r3_links,
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ {
+ "dut": "r1",
+ "src_address": source_i6,
+ "iif": r1_r2_links + r1_r3_links,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r1",
+ "src_address": source_i2,
+ "iif": topo["routers"]["r1"]["links"]["i2"]["interface"],
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": source_i6,
+ "iif": topo["routers"]["r4"]["links"]["i6"]["interface"],
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": source_i2,
+ "iif": r4_r2_links + r4_r3_links,
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ ]
+
+ for data in input_dict_star_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "OIL is updated and traffic is received for all the groups on both "
+ "the nodes , verify using 'show ip multicast'; 'show ip multicast json'"
+ )
+
+ intf_r4_i6 = topo["routers"]["r4"]["links"]["i6"]["interface"]
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ input_traffic = {
+ "r1": {"traffic_sent": [intf_r1_i1]},
+ "r4": {"traffic_received": [intf_r4_i6]},
+ }
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Verify RP has (S,G) with none OIL or Upstream should be present using 'show ip mroute json'"
+ " 'show ip pim upstream json'"
+ )
+
+ source_i6 = topo["routers"]["i6"]["links"]["r4"]["ipv4"].split("/")[0]
+ source_i2 = topo["routers"]["i2"]["links"]["r1"]["ipv4"].split("/")[0]
+ input_dict_star_sg = [
+ {"dut": "r2", "src_address": source_i2, "iif": r2_r1_links, "oil": r2_r4_links},
+ {"dut": "r2", "src_address": source_i6, "iif": r2_r4_links, "oil": r2_r1_links},
+ ]
+
+ for data in input_dict_star_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Verify pim interface traffic before changing RP")
+
+ intf_traffic = topo["routers"]["r4"]["links"]["r3-link1"]["interface"]
+ state_dict = {"r4": {intf_traffic: ["registerStopRx"]}}
+ state_before = verify_pim_interface_traffic(tgen, state_dict)
+ assert isinstance(state_before, dict), \
+ ("Testcase{} : Failed \n state_before is not dictionary \n "
+ "Error: {}".\
+ format(tc_name, result))
+
+ step("Change the RP to R3 loopback for same group range (225.1.1.1-5)")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1,
+ "delete": True,
+ }
+ ]
+ }
+ },
+ "r3": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r3"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1 + GROUP_RANGE_2,
+ }
+ ]
+ }
+ },
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "After changing the RP to R3 , verify (S,G) with none OIL and "
+ "upstream got cleared from R2 and created on R3 verify using "
+ "'show ip mroute json'; 'show ip pim upstream json'"
+ )
+
+ for data in input_dict_star_sg:
+ if data["src_address"] != "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step("(*,G) IIF on DUT is changed towards R3, verify using 'show ip mroute json'")
+
+ input_dict_star_g = [
+ {
+ "dut": "r1",
+ "src_address": "*",
+ "iif": r1_r3_links,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ }
+ ]
+
+ for data in input_dict_star_g:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "R4 is sending null register packets to R3 'show ip pim multicast traffic json'"
+ )
+ step("Verify pim interface traffic after changing RP")
+
+ state_after = verify_pim_interface_traffic(tgen, state_dict)
+ assert isinstance(state_before, dict), \
+ ("Testcase{} : Failed \n state_before is not dictionary \n "
+ "Error: {}".\
+ format(tc_name, result))
+
+ result = verify_state_incremented(state_before, state_after)
+ assert result is True, "Testcase{} : Failed Error: {}".format(tc_name, result)
+
+ step("Send new IGMP join for new group range (226.1.1.1-5)")
+
+ input_join = {
+ "i1": topo["routers"]["i1"]["links"]["r1"]["interface"],
+ "i7": topo["routers"]["i7"]["links"]["r4"]["interface"],
+ }
+
+ for recvr, recvr_intf in input_join.items():
+ result = app_helper.run_join(recvr, IGMP_JOIN_RANGE_2, join_intf=recvr_intf)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Send traffic from R4 to same group range")
+
+ input_src = {
+ "i6": topo["routers"]["i6"]["links"]["r4"]["interface"],
+ "i2": topo["routers"]["i2"]["links"]["r1"]["interface"],
+ }
+
+ for src, src_intf in input_src.items():
+ result = app_helper.run_traffic(src, IGMP_JOIN_RANGE_2, bind_intf=src_intf)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("(*.G) and (S,G) on LHR for group range (226.1.1.1-5)")
+ step(
+ "(*,G) joins sent towards new RP (R3) , mroute created verify using "
+ "'show ip mroute json'"
+ )
+
+ for data in input_dict_star_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_2,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_2
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Traffic is received for groups (226.1.1.1-5) , (S,G) mroute updated "
+ "in DUT and R4 node verify using 'show ip multicast json'"
+ )
+
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Delete and Add the RP for group range 225.1.1.1-5 on DUT")
+
+ input_dict = {
+ "r3": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r3"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1,
+ "delete": True,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "After delete of RP verify mroute got uninstall from DUT IIF updated as "
+ "unknown in PIM state using 'show ip mroute' 'show ip pim state json'"
+ )
+ step(
+ "No impact seen to on data path as RP config removed after SPT switchover "
+ "verify uptime and traffic using 'show ip mroute' 'show ip mroute count json'"
+ )
+
+ for data in input_dict_star_sg:
+ if data["src_address"] == "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {} : Failed "
+ "(*,G) entried are still present \n Error: {}".format(tc_name, result)
+ )
+
+ else:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ iif = topo["routers"]["r1"]["links"]["i2"]["interface"]
+ oil = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ result = verify_pim_state(tgen, "r1", iif, oil, IGMP_JOIN_RANGE_1, expected=False)
+ assert result is not True, (
+ "Testcase {} :Failed "
+ "PIM state is not unknown after deleting RP \n Error: {}".format(
+ tc_name, result
+ )
+ )
+
+ input_dict = {
+ "r3": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r3"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "After Adding the RP verify IIF updated again towards RP , and DUT"
+ " sending register packets towards RP, verify using 'show ip mroute'"
+ " and 'show ip pim int traffic'"
+ )
+ step(
+ "No impact seen to on data path as RP config removed after SPT "
+ "switchover verify uptime and traffic using 'show ip mroute' "
+ "'show ip mroute count json'"
+ )
+
+ for data in input_dict_star_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_mroutes_after_restart_frr_services_p2(request):
+ """
+ Verify mroute entries after FRR service stop and start
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ app_helper.stop_all_hosts()
+ clear_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Enable IGMP on DUT and R4 interface")
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ intf_r4_i7 = topo["routers"]["r4"]["links"]["i7"]["interface"]
+ for dut, intf in zip(["r1", "r4"], [intf_r1_i1, intf_r4_i7]):
+ input_dict = {dut: {"igmp": {"interfaces": {intf: {"igmp": {"version": "2"}}}}}}
+
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Send IGMP joins from DUT and R4 for group range 225.1.1.1-5")
+ input_join = {
+ "i1": topo["routers"]["i1"]["links"]["r1"]["interface"],
+ "i7": topo["routers"]["i7"]["links"]["r4"]["interface"],
+ }
+
+ for recvr, recvr_intf in input_join.items():
+ result = app_helper.run_join(recvr, IGMP_JOIN_RANGE_1, join_intf=recvr_intf)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure RP as R2 for group range 225.1.1.1-5")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Done in base config: " "Configure EBGP peering between all the nodes")
+
+ step("Done in base config: " "Enable PIM on all the interfaces of all the nodes")
+
+ step("Send traffic from R4 for group range 225.1.1.1-5")
+
+ result = app_helper.run_traffic("i6", IGMP_JOIN_RANGE_1, "r4")
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Send traffic from DUT for group range 225.1.1.1-5")
+
+ result = app_helper.run_traffic("i2", IGMP_JOIN_RANGE_1, "r1")
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("(*,G) IIF and OIL updated on both the nodes")
+
+ step(
+ "(S,G) IIF updated towards shortest path to source on both the nodes "
+ ", verify using 'show ip mroute' and 'show ip mroute json'"
+ )
+
+ source_i6 = topo["routers"]["i6"]["links"]["r4"]["ipv4"].split("/")[0]
+ source_i2 = topo["routers"]["i2"]["links"]["r1"]["ipv4"].split("/")[0]
+ input_dict_star_sg = [
+ {
+ "dut": "r1",
+ "src_address": "*",
+ "iif": r1_r2_links + r1_r3_links,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": "*",
+ "iif": r4_r2_links + r4_r3_links,
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ {
+ "dut": "r1",
+ "src_address": source_i6,
+ "iif": r1_r2_links + r1_r3_links,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r1",
+ "src_address": source_i2,
+ "iif": r1_r2_links + [topo["routers"]["r1"]["links"]["i2"]["interface"]],
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": source_i6,
+ "iif": topo["routers"]["r4"]["links"]["i6"]["interface"],
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": source_i2,
+ "iif": r4_r2_links + r4_r3_links,
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ ]
+
+ for data in input_dict_star_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "OIL is updated and traffic is received for all the groups on both "
+ "the nodes , verify using 'show ip multicast'; 'show ip multicast json'"
+ )
+
+ intf_r4_i6 = topo["routers"]["r4"]["links"]["i6"]["interface"]
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ input_traffic = {
+ "r1": {"traffic_sent": [intf_r1_i1]},
+ "r4": {"traffic_received": [intf_r4_i6]},
+ }
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Stop the FRR services using kill -9 pid(s) from DUT")
+ stop_router(tgen, "r1")
+
+ step("Start the FRR services from DUT")
+ start_router(tgen, "r1")
+
+ step("(*,G) IIF and OIL updated on both the nodes")
+
+ for data in input_dict_star_sg:
+ if data["src_address"] == "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "(S,G) IIF updated towards shortest path to source on both the nodes "
+ ", verify using 'show ip mroute' and 'show ip mroute json'"
+ )
+
+ for data in input_dict_star_sg:
+ if data["src_address"] != "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "OIL is updated and traffic is received for all the groups on both "
+ "the nodes , verify using 'show ip multicast'; 'show ip multicast json'"
+ )
+
+ intf_r4_i6 = topo["routers"]["r4"]["links"]["i6"]["interface"]
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ input_traffic = {
+ "r1": {"traffic_sent": [intf_r1_i1]},
+ "r4": {"traffic_received": [intf_r4_i6]},
+ }
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Stop the traffic and do frr services stop/start")
+ app_helper.stop_all_hosts()
+
+ stop_router(tgen, "r1")
+ start_router(tgen, "r1")
+
+ step(
+ "FRR services started with new PID , (S,G) not present "
+ "on DUT and R4 , verify using 'show ip mroute json'"
+ )
+
+ for data in input_dict_star_sg:
+ if data["src_address"] != "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {}: Failed "
+ "mroutes are still present \n Error: {}".format(tc_name, result)
+ )
+
+ step("Stop FRR on R4 node")
+
+ stop_router(tgen, "r4")
+
+ step(
+ "After stop of FRR on R4 node verify mroute on DUT should be "
+ "pimreg/prune state"
+ )
+ step("No OIL created toward R2 on R11 node")
+
+ for data in input_dict_star_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {} : Failed "
+ " Mroutes are still present \n Error: {}".format(tc_name, result)
+ )
+
+ step("Start FRR on R4 node")
+
+ start_router(tgen, "r4")
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_single_area.py b/tests/topotests/ospf_basic_functionality/test_ospf_single_area.py
index c7ee723b3e..21a7d83845 100644
--- a/tests/topotests/ospf_basic_functionality/test_ospf_single_area.py
+++ b/tests/topotests/ospf_basic_functionality/test_ospf_single_area.py
@@ -596,93 +596,6 @@ def test_ospf_hello_tc10_p0(request):
ospf_covergence
)
- step(" Configure hello timer = 65535")
- topo1 = {
- "r0": {
- "links": {
- "r1": {
- "interface": topo["routers"]["r0"]["links"]["r1"]["interface"],
- "ospf": {"hello_interval": 65535, "dead_interval": 4},
- }
- }
- }
- }
-
- result = create_interfaces_cfg(tgen, topo1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
-
- topo1 = {
- "r1": {
- "links": {
- "r0": {
- "interface": topo["routers"]["r1"]["links"]["r0"]["interface"],
- "ospf": {"hello_interval": 65535, "dead_interval": 4},
- }
- }
- }
- }
-
- result = create_interfaces_cfg(tgen, topo1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
-
- step("verify that new timer value is configured.")
- input_dict = {
- "r0": {
- "links": {"r1": {"ospf": {"timerMsecs": 65535 * 1000, "timerDeadSecs": 4}}}
- }
- }
- dut = "r0"
- result = verify_ospf_interface(tgen, topo, dut=dut, input_dict=input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
-
- step("verify that ospf neighbours are full")
- ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
- ospf_covergence
- )
-
- step(" Try configuring timer values outside range for example 65536")
- topo1 = {
- "r0": {
- "links": {
- "r1": {
- "interface": topo["routers"]["r0"]["links"]["r1"]["interface"],
- "ospf": {"hello_interval": 65536, "dead_interval": 4},
- }
- }
- }
- }
-
- result = create_interfaces_cfg(tgen, topo1)
- assert result is not True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result
- )
-
- step("Unconfigure the hello timer from the interface from r1 and r2.")
-
- topo1 = {
- "r1": {
- "links": {
- "r0": {
- "interface": topo["routers"]["r1"]["links"]["r0"]["interface"],
- "ospf": {"hello_interval": 65535},
- "delete": True,
- }
- }
- }
- }
-
- result = create_interfaces_cfg(tgen, topo1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
-
- step(
- "Verify that timer value is deleted from intf & " "set to default value 40 sec."
- )
- input_dict = {"r1": {"links": {"r0": {"ospf": {"timerMsecs": 10 * 1000}}}}}
- dut = "r1"
- result = verify_ospf_interface(tgen, topo, dut=dut, input_dict=input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
-
write_test_footer(tc_name)
diff --git a/tests/topotests/ospfapi/ctester.py b/tests/topotests/ospfapi/ctester.py
index 243fc0613f..8fd202a023 100755
--- a/tests/topotests/ospfapi/ctester.py
+++ b/tests/topotests/ospfapi/ctester.py
@@ -42,6 +42,35 @@ sys.path[0:0] = [CLIENTDIR]
import ospfclient as api # pylint: disable=E0401 # noqa: E402
+async def do_monitor(c, args):
+ cv = asyncio.Condition()
+
+ async def cb(new_router_id, _):
+ assert new_router_id == c.router_id
+ logging.info("NEW ROUTER ID: %s", new_router_id)
+ sys.stdout.flush()
+ async with cv:
+ cv.notify_all()
+
+ logging.debug("API using monitor router ID callback")
+ await c.monitor_router_id(callback=cb)
+
+ for check in args.monitor:
+ logging.info("Waiting for %s", check)
+
+ while True:
+ async with cv:
+ got = c.router_id
+ if str(check) == str(got):
+ break
+ logging.debug("expected '%s' != '%s'\nwaiting on notify", check, got)
+ await cv.wait()
+
+ logging.info("SUCCESS: %s", check)
+ print("SUCCESS: {}".format(check))
+ sys.stdout.flush()
+
+
async def do_wait(c, args):
cv = asyncio.Condition()
@@ -51,7 +80,7 @@ async def do_wait(c, args):
async with cv:
cv.notify_all()
- logging.debug("API using callback")
+ logging.debug("API using monitor reachable callback")
await c.monitor_reachable(callback=cb)
for w in args.wait:
@@ -81,6 +110,8 @@ async def async_main(args):
c._handle_msg_loop() # pylint: disable=W0212
)
+ if args.monitor:
+ await do_monitor(c, args)
if args.wait:
await do_wait(c, args)
return 0
@@ -88,6 +119,9 @@ async def async_main(args):
def main(*args):
ap = argparse.ArgumentParser(args)
+ ap.add_argument(
+ "--monitor", action="append", help="monitor and wait for this router ID"
+ )
ap.add_argument("--server", default="localhost", help="OSPF API server")
ap.add_argument(
"--wait", action="append", help="wait for comma-sep set of reachable routers"
diff --git a/tests/topotests/ospfapi/test_ospf_clientapi.py b/tests/topotests/ospfapi/test_ospf_clientapi.py
index dca91412dc..631c8025b5 100644
--- a/tests/topotests/ospfapi/test_ospf_clientapi.py
+++ b/tests/topotests/ospfapi/test_ospf_clientapi.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python
# -*- coding: utf-8 eval: (blacken-mode 1) -*-
#
-# Copyright (c) 2021, LabN Consulting, L.L.C.
+# Copyright (c) 2021-2022, LabN Consulting, L.L.C.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
@@ -61,7 +61,10 @@ assert os.path.exists(
def _tgen(request):
"Setup/Teardown the environment and provide tgen argument to tests"
nrouters = request.param
- topodef = {f"sw{i}": (f"r{i}", f"r{i+1}") for i in range(1, nrouters)}
+ if nrouters == 1:
+ topodef = {"sw1:": ("r1",)}
+ else:
+ topodef = {f"sw{i}": (f"r{i}", f"r{i+1}") for i in range(1, nrouters)}
tgen = Topogen(topodef, request.module.__name__)
tgen.start_topology()
@@ -183,6 +186,52 @@ def test_ospf_reachability(tgen):
_test_reachability(tgen, testbin)
+def _test_router_id(tgen, testbin):
+ r1 = tgen.gears["r1"]
+ waitlist = [
+ "192.168.0.1",
+ "1.1.1.1",
+ "192.168.0.1",
+ ]
+
+ mon_args = [f"--monitor={x}" for x in waitlist]
+
+ p = None
+ try:
+ step("router id: check for initial router id")
+ p = r1.popen(
+ ["/usr/bin/timeout", "120", testbin, "-v", *mon_args],
+ encoding=None, # don't buffer
+ stdin=subprocess.DEVNULL,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ )
+ _wait_output(p, "SUCCESS: {}".format(waitlist[0]))
+
+ step("router id: check for modified router id")
+ r1.vtysh_multicmd("conf t\nrouter ospf\nospf router-id 1.1.1.1")
+ _wait_output(p, "SUCCESS: {}".format(waitlist[1]))
+
+ step("router id: check for restored router id")
+ r1.vtysh_multicmd("conf t\nrouter ospf\nospf router-id 192.168.0.1")
+ _wait_output(p, "SUCCESS: {}".format(waitlist[2]))
+ except Exception as error:
+ logging.error("ERROR: %s", error)
+ raise
+ finally:
+ if p:
+ p.terminate()
+ p.wait()
+
+
+@pytest.mark.parametrize("tgen", [2], indirect=True)
+def test_ospf_router_id(tgen):
+ testbin = os.path.join(TESTDIR, "ctester.py")
+ rc, o, e = tgen.gears["r1"].net.cmd_status([testbin, "--help"])
+ logging.info("%s --help: rc: %s stdout: '%s' stderr: '%s'", testbin, rc, o, e)
+ _test_router_id(tgen, testbin)
+
+
def _test_add_data(tgen, apibin):
"Test adding opaque data to domain"
diff --git a/tools/frrcommon.sh.in b/tools/frrcommon.sh.in
index c168edc50d..3344ff4954 100755
--- a/tools/frrcommon.sh.in
+++ b/tools/frrcommon.sh.in
@@ -73,7 +73,7 @@ chownfrr() {
vtysh_b () {
[ "$1" = "watchfrr" ] && return 0
- if [ -r "$C_PATH/frr.conf" ]; then
+ if [ ! -r "$C_PATH/frr.conf" ]; then
log_warning_msg "$C_PATH/frr.conf does not exist; skipping config apply"
return 0
fi
diff --git a/vtysh/vtysh_user.c b/vtysh/vtysh_user.c
index 665e6ca90d..1ed284809e 100644
--- a/vtysh/vtysh_user.c
+++ b/vtysh/vtysh_user.c
@@ -71,6 +71,10 @@ static int vtysh_pam(const char *user)
fprintf(stderr, "vtysh_pam: Failure to initialize pam: %s(%d)",
pam_strerror(pamh, ret), ret);
+ if (pam_acct_mgmt(pamh, 0) != PAM_SUCCESS)
+ fprintf(stderr, "%s: Failed in account validation: %s(%d)",
+ __func__, pam_strerror(pamh, ret), ret);
+
/* close Linux-PAM */
if (pam_end(pamh, ret) != PAM_SUCCESS) {
pamh = NULL;
diff --git a/zebra/debug_nl.c b/zebra/debug_nl.c
index 89ef7a2076..f8b866cd25 100644
--- a/zebra/debug_nl.c
+++ b/zebra/debug_nl.c
@@ -1083,8 +1083,9 @@ next_rta:
plen = RTA_PAYLOAD(rta);
zlog_debug(" rta [len=%d (payload=%zu) type=(%d) %s]", rta->rta_len,
- plen, rta->rta_type, rtm_rta2str(rta->rta_type));
- switch (rta->rta_type) {
+ plen, rta->rta_type & NLA_TYPE_MASK,
+ rtm_rta2str(rta->rta_type & NLA_TYPE_MASK));
+ switch (rta->rta_type & NLA_TYPE_MASK) {
case RTA_IIF:
case RTA_OIF:
case RTA_PRIORITY:
diff --git a/zebra/if_netlink.c b/zebra/if_netlink.c
index ffd52da8d8..7a4e1304bd 100644
--- a/zebra/if_netlink.c
+++ b/zebra/if_netlink.c
@@ -1182,6 +1182,14 @@ int interface_lookup_netlink(struct zebra_ns *zns)
if (ret < 0)
return ret;
+ ret = netlink_tunneldump_read(zns);
+ if (ret < 0)
+ return ret;
+ ret = netlink_parse_info(netlink_interface, netlink_cmd, &dp_info, 0,
+ true);
+ if (ret < 0)
+ return ret;
+
/* fixup linkages */
zebra_if_update_all_links(zns);
return 0;
@@ -2274,4 +2282,62 @@ uint8_t if_netlink_get_frr_protodown_r_bit(void)
return frr_protodown_r_bit;
}
+/**
+ * netlink_request_tunneldump() - Request all tunnels from the linux kernel
+ *
+ * @zns: Zebra namespace
+ * @family: AF_* netlink family
+ * @type: RTM_* (RTM_GETTUNNEL) route type
+ *
+ * Return: Result status
+ */
+static int netlink_request_tunneldump(struct zebra_ns *zns, int family,
+ int ifindex)
+{
+ struct {
+ struct nlmsghdr n;
+ struct tunnel_msg tmsg;
+ char buf[256];
+ } req;
+
+ /* Form the request */
+ memset(&req, 0, sizeof(req));
+ req.n.nlmsg_len = NLMSG_LENGTH(sizeof(struct tunnel_msg));
+ req.n.nlmsg_type = RTM_GETTUNNEL;
+ req.n.nlmsg_flags = NLM_F_ROOT | NLM_F_MATCH | NLM_F_REQUEST;
+ req.tmsg.family = family;
+ req.tmsg.ifindex = ifindex;
+
+ return netlink_request(&zns->netlink_cmd, &req);
+}
+
+/*
+ * Currently we only ask for vxlan l3svd vni information.
+ * In the future this can be expanded.
+ */
+int netlink_tunneldump_read(struct zebra_ns *zns)
+{
+ int ret = 0;
+ struct zebra_dplane_info dp_info;
+ struct route_node *rn;
+ struct interface *tmp_if = NULL;
+ struct zebra_if *zif;
+
+ zebra_dplane_info_from_zns(&dp_info, zns, true /*is_cmd*/);
+
+ for (rn = route_top(zns->if_table); rn; rn = route_next(rn)) {
+ tmp_if = (struct interface *)rn->info;
+ if (!tmp_if)
+ continue;
+ zif = tmp_if->info;
+ if (!zif || zif->zif_type != ZEBRA_IF_VXLAN)
+ continue;
+
+ ret = netlink_request_tunneldump(zns, PF_BRIDGE,
+ tmp_if->ifindex);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
#endif /* GNU_LINUX */
diff --git a/zebra/if_netlink.h b/zebra/if_netlink.h
index 46eac25377..21ae1713be 100644
--- a/zebra/if_netlink.h
+++ b/zebra/if_netlink.h
@@ -50,6 +50,7 @@ extern enum netlink_msg_status
netlink_put_address_update_msg(struct nl_batch *bth,
struct zebra_dplane_ctx *ctx);
+extern int netlink_tunneldump_read(struct zebra_ns *zns);
extern enum netlink_msg_status
netlink_put_intf_update_msg(struct nl_batch *bth, struct zebra_dplane_ctx *ctx);
diff --git a/zebra/interface.c b/zebra/interface.c
index 96e378444d..5f36b88a1c 100644
--- a/zebra/interface.c
+++ b/zebra/interface.c
@@ -184,13 +184,6 @@ static int if_zebra_new_hook(struct interface *ifp)
static void if_nhg_dependents_check_valid(struct nhg_hash_entry *nhe)
{
zebra_nhg_check_valid(nhe);
- if (!CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_VALID)) {
- /* If we're in shutdown, this interface event needs to clean
- * up installed NHGs, so don't clear that flag directly.
- */
- if (!zrouter.in_shutdown)
- UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED);
- }
}
static void if_down_nhg_dependents(const struct interface *ifp)
@@ -1414,7 +1407,7 @@ static void zebra_if_netconf_update_ctx(struct zebra_dplane_ctx *ctx,
struct interface *ifp)
{
struct zebra_if *zif;
- enum dplane_netconf_status_e mpls;
+ enum dplane_netconf_status_e mpls, linkdown;
zif = ifp->info;
if (!zif) {
@@ -1431,10 +1424,17 @@ static void zebra_if_netconf_update_ctx(struct zebra_dplane_ctx *ctx,
else if (mpls == DPLANE_NETCONF_STATUS_DISABLED)
zif->mpls = false;
+ linkdown = dplane_ctx_get_netconf_linkdown(ctx);
+ if (linkdown == DPLANE_NETCONF_STATUS_ENABLED)
+ zif->linkdown = true;
+ else if (linkdown == DPLANE_NETCONF_STATUS_DISABLED)
+ zif->linkdown = false;
+
if (IS_ZEBRA_DEBUG_KERNEL)
- zlog_debug("%s: if %s, ifindex %d, mpls %s",
+ zlog_debug("%s: if %s, ifindex %d, mpls %s linkdown %s",
__func__, ifp->name, ifp->ifindex,
- (zif->mpls ? "ON" : "OFF"));
+ (zif->mpls ? "ON" : "OFF"),
+ (zif->linkdown ? "ON" : "OFF"));
}
void zebra_if_dplane_result(struct zebra_dplane_ctx *ctx)
@@ -1897,6 +1897,9 @@ static void if_dump_vty(struct vty *vty, struct interface *ifp)
if (zebra_if->mpls)
vty_out(vty, " MPLS enabled\n");
+ if (zebra_if->linkdown)
+ vty_out(vty, " Ignore all routes with linkdown\n");
+
/* Hardware address. */
vty_out(vty, " Type: %s\n", if_link_type_str(ifp->ll_type));
if (ifp->hw_addr_len != 0) {
@@ -2218,6 +2221,7 @@ static void if_dump_vty_json(struct vty *vty, struct interface *ifp,
zebra_if->desc);
json_object_boolean_add(json_if, "mplsEnabled", zebra_if->mpls);
+ json_object_boolean_add(json_if, "linkDown", zebra_if->linkdown);
if (ifp->ifindex == IFINDEX_INTERNAL) {
json_object_boolean_add(json_if, "pseudoInterface", true);
diff --git a/zebra/interface.h b/zebra/interface.h
index 5569711aa7..54ad91a0b2 100644
--- a/zebra/interface.h
+++ b/zebra/interface.h
@@ -129,6 +129,9 @@ struct zebra_if {
/* MPLS status. */
bool mpls;
+ /* Linkdown status */
+ bool linkdown;
+
/* Router advertise configuration. */
uint8_t rtadv_enable;
diff --git a/zebra/kernel_netlink.c b/zebra/kernel_netlink.c
index 7e47822a2c..4bd0ac27f6 100644
--- a/zebra/kernel_netlink.c
+++ b/zebra/kernel_netlink.c
@@ -111,6 +111,9 @@ static const struct message nlmsg_str[] = {{RTM_NEWROUTE, "RTM_NEWROUTE"},
{RTM_GETNEXTHOP, "RTM_GETNEXTHOP"},
{RTM_NEWNETCONF, "RTM_NEWNETCONF"},
{RTM_DELNETCONF, "RTM_DELNETCONF"},
+ {RTM_NEWTUNNEL, "RTM_NEWTUNNEL"},
+ {RTM_DELTUNNEL, "RTM_DELTUNNEL"},
+ {RTM_GETTUNNEL, "RTM_GETTUNNEL"},
{0}};
static const struct message rtproto_str[] = {
@@ -393,8 +396,10 @@ static int netlink_information_fetch(struct nlmsghdr *h, ns_id_t ns_id,
case RTM_DELADDR:
case RTM_NEWNETCONF:
case RTM_DELNETCONF:
+ case RTM_NEWTUNNEL:
+ case RTM_DELTUNNEL:
+ case RTM_GETTUNNEL:
return 0;
-
default:
/*
* If we have received this message then
diff --git a/zebra/netconf_netlink.c b/zebra/netconf_netlink.c
index 587f6c749e..cc6a1201a5 100644
--- a/zebra/netconf_netlink.c
+++ b/zebra/netconf_netlink.c
@@ -45,19 +45,22 @@ static struct rtattr *netconf_rta(struct netconfmsg *ncm)
* Handle netconf update about a single interface: create dplane
* context, and enqueue for processing in the main zebra pthread.
*/
-static int netlink_netconf_dplane_update(ns_id_t ns_id, ifindex_t ifindex,
- enum dplane_netconf_status_e mpls_on,
- enum dplane_netconf_status_e mcast_on)
+static int
+netlink_netconf_dplane_update(ns_id_t ns_id, ifindex_t ifindex,
+ enum dplane_netconf_status_e mpls_on,
+ enum dplane_netconf_status_e mcast_on,
+ enum dplane_netconf_status_e linkdown_on)
{
struct zebra_dplane_ctx *ctx;
ctx = dplane_ctx_alloc();
dplane_ctx_set_op(ctx, DPLANE_OP_INTF_NETCONFIG);
- dplane_ctx_set_netconf_ns_id(ctx, ns_id);
- dplane_ctx_set_netconf_ifindex(ctx, ifindex);
+ dplane_ctx_set_ns_id(ctx, ns_id);
+ dplane_ctx_set_ifindex(ctx, ifindex);
dplane_ctx_set_netconf_mpls(ctx, mpls_on);
dplane_ctx_set_netconf_mcast(ctx, mcast_on);
+ dplane_ctx_set_netconf_linkdown(ctx, linkdown_on);
/* Enqueue ctx for main pthread to process */
dplane_provider_enqueue_to_zebra(ctx);
@@ -77,6 +80,8 @@ int netlink_netconf_change(struct nlmsghdr *h, ns_id_t ns_id, int startup)
uint32_t ival;
enum dplane_netconf_status_e mpls_on = DPLANE_NETCONF_STATUS_UNKNOWN;
enum dplane_netconf_status_e mcast_on = DPLANE_NETCONF_STATUS_UNKNOWN;
+ enum dplane_netconf_status_e linkdown_on =
+ DPLANE_NETCONF_STATUS_UNKNOWN;
if (h->nlmsg_type != RTM_NEWNETCONF && h->nlmsg_type != RTM_DELNETCONF)
return 0;
@@ -133,12 +138,23 @@ int netlink_netconf_change(struct nlmsghdr *h, ns_id_t ns_id, int startup)
mcast_on = DPLANE_NETCONF_STATUS_DISABLED;
}
+ if (tb[NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN]) {
+ ival = *(uint32_t *)RTA_DATA(
+ tb[NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN]);
+ if (ival != 0)
+ linkdown_on = DPLANE_NETCONF_STATUS_ENABLED;
+ else
+ linkdown_on = DPLANE_NETCONF_STATUS_DISABLED;
+ }
+
if (IS_ZEBRA_DEBUG_KERNEL)
- zlog_debug("%s: interface %u is mpls on: %d multicast on: %d",
- __func__, ifindex, mpls_on, mcast_on);
+ zlog_debug(
+ "%s: interface %u is mpls on: %d multicast on: %d linkdown: %d",
+ __func__, ifindex, mpls_on, mcast_on, linkdown_on);
/* Create a dplane context and pass it along for processing */
- netlink_netconf_dplane_update(ns_id, ifindex, mpls_on, mcast_on);
+ netlink_netconf_dplane_update(ns_id, ifindex, mpls_on, mcast_on,
+ linkdown_on);
return 0;
}
diff --git a/zebra/rt_netlink.c b/zebra/rt_netlink.c
index 93b2d94671..ad9e13a0f8 100644
--- a/zebra/rt_netlink.c
+++ b/zebra/rt_netlink.c
@@ -535,6 +535,9 @@ parse_nexthop_unicast(ns_id_t ns_id, struct rtmsg *rtm, struct rtattr **tb,
if (rtm->rtm_flags & RTNH_F_ONLINK)
SET_FLAG(nh.flags, NEXTHOP_FLAG_ONLINK);
+ if (rtm->rtm_flags & RTNH_F_LINKDOWN)
+ SET_FLAG(nh.flags, NEXTHOP_FLAG_LINKDOWN);
+
if (num_labels)
nexthop_add_labels(&nh, ZEBRA_LSP_STATIC, num_labels, labels);
@@ -1638,6 +1641,27 @@ static bool _netlink_route_build_singlepath(const struct prefix *p,
return true;
}
+/* This function appends tag value as rtnl flow attribute
+ * to the given netlink msg only if value is less than 256.
+ * Used only if SUPPORT_REALMS enabled.
+ *
+ * @param nlmsg: nlmsghdr structure to fill in.
+ * @param maxlen: The size allocated for the message.
+ * @param tag: The route tag.
+ *
+ * The function returns true if the flow attribute could
+ * be added to the message, otherwise false is returned.
+ */
+static inline bool _netlink_set_tag(struct nlmsghdr *n, unsigned int maxlen,
+ route_tag_t tag)
+{
+ if (tag > 0 && tag <= 255) {
+ if (!nl_attr_put32(n, maxlen, RTA_FLOW, tag))
+ return false;
+ }
+ return true;
+}
+
/* This function takes a nexthop as argument and
* appends to the given netlink msg. If the nexthop
* defines a preferred source, the src parameter
@@ -1656,12 +1680,10 @@ static bool _netlink_route_build_singlepath(const struct prefix *p,
* The function returns true if the nexthop could be added
* to the message, otherwise false is returned.
*/
-static bool _netlink_route_build_multipath(const struct prefix *p,
- const char *routedesc, int bytelen,
- const struct nexthop *nexthop,
- struct nlmsghdr *nlmsg,
- size_t req_size, struct rtmsg *rtmsg,
- const union g_addr **src)
+static bool _netlink_route_build_multipath(
+ const struct prefix *p, const char *routedesc, int bytelen,
+ const struct nexthop *nexthop, struct nlmsghdr *nlmsg, size_t req_size,
+ struct rtmsg *rtmsg, const union g_addr **src, route_tag_t tag)
{
char label_buf[256];
struct vrf *vrf;
@@ -1767,6 +1789,9 @@ static bool _netlink_route_build_multipath(const struct prefix *p,
if (nexthop->weight)
rtnh->rtnh_hops = nexthop->weight - 1;
+ if (!_netlink_set_tag(nlmsg, req_size, tag))
+ return false;
+
nl_attr_rtnh_end(nlmsg, rtnh);
return true;
}
@@ -1801,7 +1826,7 @@ _netlink_mpls_build_multipath(const struct prefix *p, const char *routedesc,
bytelen = (family == AF_INET ? 4 : 16);
return _netlink_route_build_multipath(p, routedesc, bytelen,
nhlfe->nexthop, nlmsg, req_size,
- rtmsg, src);
+ rtmsg, src, 0);
}
static void _netlink_mpls_debug(int cmd, uint32_t label, const char *routedesc)
@@ -1925,6 +1950,7 @@ ssize_t netlink_route_multipath_msg_encode(int cmd,
const struct prefix *p, *src_p;
uint32_t table_id;
struct nlsock *nl;
+ route_tag_t tag = 0;
struct {
struct nlmsghdr n;
@@ -1996,20 +2022,12 @@ ssize_t netlink_route_multipath_msg_encode(int cmd,
return 0;
#if defined(SUPPORT_REALMS)
- {
- route_tag_t tag;
-
- if (cmd == RTM_DELROUTE)
- tag = dplane_ctx_get_old_tag(ctx);
- else
- tag = dplane_ctx_get_tag(ctx);
-
- if (tag > 0 && tag <= 255) {
- if (!nl_attr_put32(&req->n, datalen, RTA_FLOW, tag))
- return 0;
- }
- }
+ if (cmd == RTM_DELROUTE)
+ tag = dplane_ctx_get_old_tag(ctx);
+ else
+ tag = dplane_ctx_get_tag(ctx);
#endif
+
/* Table corresponding to this route. */
table_id = dplane_ctx_get_table(ctx);
if (table_id < 256)
@@ -2032,8 +2050,11 @@ ssize_t netlink_route_multipath_msg_encode(int cmd,
* prefix information to tell the kernel to schwack
* it.
*/
- if (cmd == RTM_DELROUTE)
+ if (cmd == RTM_DELROUTE) {
+ if (!_netlink_set_tag(&req->n, datalen, tag))
+ return 0;
return NLMSG_ALIGN(req->n.nlmsg_len);
+ }
if (dplane_ctx_get_mtu(ctx) || dplane_ctx_get_nh_mtu(ctx)) {
struct rtattr *nest;
@@ -2148,6 +2169,9 @@ ssize_t netlink_route_multipath_msg_encode(int cmd,
? "recursive, single-path"
: "single-path";
+ if (!_netlink_set_tag(&req->n, datalen, tag))
+ return 0;
+
if (!_netlink_route_build_singlepath(
p, routedesc, bytelen, nexthop,
&req->n, &req->r, datalen, cmd))
@@ -2207,7 +2231,8 @@ ssize_t netlink_route_multipath_msg_encode(int cmd,
if (!_netlink_route_build_multipath(
p, routedesc, bytelen, nexthop,
- &req->n, datalen, &req->r, &src1))
+ &req->n, datalen, &req->r, &src1,
+ tag))
return 0;
if (!setsrc && src1) {
diff --git a/zebra/rtadv.c b/zebra/rtadv.c
index b24dc89a68..5d4ed1e424 100644
--- a/zebra/rtadv.c
+++ b/zebra/rtadv.c
@@ -1205,6 +1205,29 @@ void rtadv_delete_prefix(struct zebra_if *zif, const struct prefix *p)
rtadv_prefix_reset(zif, &rp);
}
+static void rtadv_start_interface_events(struct zebra_vrf *zvrf,
+ struct zebra_if *zif)
+{
+ struct adv_if *adv_if = NULL;
+
+ if (zif->ifp->ifindex == IFINDEX_INTERNAL) {
+ if (IS_ZEBRA_DEBUG_EVENT)
+ zlog_debug(
+ "%s(%s) has not configured an ifindex yet, delaying until we have one",
+ zif->ifp->name, zvrf->vrf->name);
+ return;
+ }
+
+ adv_if = adv_if_add(zvrf, zif->ifp->name);
+ if (adv_if != NULL)
+ return; /* Already added */
+
+ if_join_all_router(zvrf->rtadv.sock, zif->ifp);
+
+ if (adv_if_list_count(&zvrf->rtadv.adv_if) == 1)
+ rtadv_event(zvrf, RTADV_START, 0);
+}
+
static void ipv6_nd_suppress_ra_set(struct interface *ifp,
enum ipv6_nd_suppress_ra_status status)
{
@@ -1249,14 +1272,7 @@ static void ipv6_nd_suppress_ra_set(struct interface *ifp,
RTADV_NUM_FAST_REXMITS;
}
- adv_if = adv_if_add(zvrf, ifp->name);
- if (adv_if != NULL)
- return; /* Alread added */
-
- if_join_all_router(zvrf->rtadv.sock, ifp);
-
- if (adv_if_list_count(&zvrf->rtadv.adv_if) == 1)
- rtadv_event(zvrf, RTADV_START, 0);
+ rtadv_start_interface_events(zvrf, zif);
}
}
}
@@ -2780,6 +2796,8 @@ static void rtadv_event(struct zebra_vrf *zvrf, enum rtadv_event event, int val)
void rtadv_if_up(struct zebra_if *zif)
{
+ struct zebra_vrf *zvrf = rtadv_interface_get_zvrf(zif->ifp);
+
/* Enable fast tx of RA if enabled && RA interval is not in msecs */
if (zif->rtadv.AdvSendAdvertisements &&
(zif->rtadv.MaxRtrAdvInterval >= 1000) &&
@@ -2787,6 +2805,13 @@ void rtadv_if_up(struct zebra_if *zif)
zif->rtadv.inFastRexmit = 1;
zif->rtadv.NumFastReXmitsRemain = RTADV_NUM_FAST_REXMITS;
}
+
+ /*
+ * startup the state machine, if it hasn't been already
+ * due to a delayed ifindex on startup ordering
+ */
+ if (zif->rtadv.AdvSendAdvertisements)
+ rtadv_start_interface_events(zvrf, zif);
}
void rtadv_if_init(struct zebra_if *zif)
diff --git a/zebra/zapi_msg.c b/zebra/zapi_msg.c
index 9a30c2b78f..9895943016 100644
--- a/zebra/zapi_msg.c
+++ b/zebra/zapi_msg.c
@@ -1606,13 +1606,14 @@ static struct nexthop *nexthop_from_zapi(const struct zapi_nexthop *api_nh,
/* Special handling for IPv4 routes sourced from EVPN:
* the nexthop and associated MAC need to be installed.
*/
- if (CHECK_FLAG(flags, ZEBRA_FLAG_EVPN_ROUTE)) {
+ if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_EVPN)) {
memset(&vtep_ip, 0, sizeof(vtep_ip));
vtep_ip.ipa_type = IPADDR_V4;
memcpy(&(vtep_ip.ipaddr_v4), &(api_nh->gate.ipv4),
sizeof(struct in_addr));
zebra_rib_queue_evpn_route_add(
api_nh->vrf_id, &api_nh->rmac, &vtep_ip, p);
+ SET_FLAG(nexthop->flags, NEXTHOP_FLAG_EVPN);
}
break;
case NEXTHOP_TYPE_IPV6:
@@ -1639,13 +1640,14 @@ static struct nexthop *nexthop_from_zapi(const struct zapi_nexthop *api_nh,
/* Special handling for IPv6 routes sourced from EVPN:
* the nexthop and associated MAC need to be installed.
*/
- if (CHECK_FLAG(flags, ZEBRA_FLAG_EVPN_ROUTE)) {
+ if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_EVPN)) {
memset(&vtep_ip, 0, sizeof(vtep_ip));
vtep_ip.ipa_type = IPADDR_V6;
memcpy(&vtep_ip.ipaddr_v6, &(api_nh->gate.ipv6),
sizeof(struct in6_addr));
zebra_rib_queue_evpn_route_add(
api_nh->vrf_id, &api_nh->rmac, &vtep_ip, p);
+ SET_FLAG(nexthop->flags, NEXTHOP_FLAG_EVPN);
}
break;
case NEXTHOP_TYPE_BLACKHOLE:
diff --git a/zebra/zebra_dplane.c b/zebra/zebra_dplane.c
index 0da44e3c4e..3a3bac6c74 100644
--- a/zebra/zebra_dplane.c
+++ b/zebra/zebra_dplane.c
@@ -301,10 +301,9 @@ struct dplane_gre_ctx {
* info. The flags values are public, in the dplane.h file...
*/
struct dplane_netconf_info {
- ns_id_t ns_id;
- ifindex_t ifindex;
enum dplane_netconf_status_e mpls_val;
enum dplane_netconf_status_e mcast_val;
+ enum dplane_netconf_status_e linkdown_val;
};
/*
@@ -2334,49 +2333,28 @@ dplane_ctx_neightable_get_mcast_probes(const struct zebra_dplane_ctx *ctx)
return ctx->u.neightable.mcast_probes;
}
-ifindex_t dplane_ctx_get_netconf_ifindex(const struct zebra_dplane_ctx *ctx)
-{
- DPLANE_CTX_VALID(ctx);
-
- return ctx->u.netconf.ifindex;
-}
-
-ns_id_t dplane_ctx_get_netconf_ns_id(const struct zebra_dplane_ctx *ctx)
-{
- DPLANE_CTX_VALID(ctx);
-
- return ctx->u.netconf.ns_id;
-}
-
-void dplane_ctx_set_netconf_ifindex(struct zebra_dplane_ctx *ctx,
- ifindex_t ifindex)
-{
- DPLANE_CTX_VALID(ctx);
-
- ctx->u.netconf.ifindex = ifindex;
-}
-
-void dplane_ctx_set_netconf_ns_id(struct zebra_dplane_ctx *ctx, ns_id_t ns_id)
+enum dplane_netconf_status_e
+dplane_ctx_get_netconf_mpls(const struct zebra_dplane_ctx *ctx)
{
DPLANE_CTX_VALID(ctx);
- ctx->u.netconf.ns_id = ns_id;
+ return ctx->u.netconf.mpls_val;
}
enum dplane_netconf_status_e
-dplane_ctx_get_netconf_mpls(const struct zebra_dplane_ctx *ctx)
+dplane_ctx_get_netconf_mcast(const struct zebra_dplane_ctx *ctx)
{
DPLANE_CTX_VALID(ctx);
- return ctx->u.netconf.mpls_val;
+ return ctx->u.netconf.mcast_val;
}
enum dplane_netconf_status_e
-dplane_ctx_get_netconf_mcast(const struct zebra_dplane_ctx *ctx)
+dplane_ctx_get_netconf_linkdown(const struct zebra_dplane_ctx *ctx)
{
DPLANE_CTX_VALID(ctx);
- return ctx->u.netconf.mcast_val;
+ return ctx->u.netconf.linkdown_val;
}
void dplane_ctx_set_netconf_mpls(struct zebra_dplane_ctx *ctx,
@@ -2395,6 +2373,15 @@ void dplane_ctx_set_netconf_mcast(struct zebra_dplane_ctx *ctx,
ctx->u.netconf.mcast_val = val;
}
+void dplane_ctx_set_netconf_linkdown(struct zebra_dplane_ctx *ctx,
+ enum dplane_netconf_status_e val)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ ctx->u.netconf.linkdown_val = val;
+}
+
+
/*
* Retrieve the limit on the number of pending, unprocessed updates.
*/
@@ -2562,7 +2549,7 @@ int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
}
/* Check for available evpn encapsulations. */
- if (!CHECK_FLAG(re->flags, ZEBRA_FLAG_EVPN_ROUTE))
+ if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_EVPN))
continue;
zl3vni = zl3vni_from_vrf(nexthop->vrf_id);
@@ -5439,7 +5426,7 @@ static void kernel_dplane_log_detail(struct zebra_dplane_ctx *ctx)
case DPLANE_OP_INTF_NETCONFIG:
zlog_debug("%s: ifindex %d, mpls %d, mcast %d",
dplane_op2str(dplane_ctx_get_op(ctx)),
- dplane_ctx_get_netconf_ifindex(ctx),
+ dplane_ctx_get_ifindex(ctx),
dplane_ctx_get_netconf_mpls(ctx),
dplane_ctx_get_netconf_mcast(ctx));
break;
diff --git a/zebra/zebra_dplane.h b/zebra/zebra_dplane.h
index 334d440a2f..d147a3e21c 100644
--- a/zebra/zebra_dplane.h
+++ b/zebra/zebra_dplane.h
@@ -592,19 +592,19 @@ const struct zebra_l2info_gre *
dplane_ctx_gre_get_info(const struct zebra_dplane_ctx *ctx);
/* Interface netconf info */
-ifindex_t dplane_ctx_get_netconf_ifindex(const struct zebra_dplane_ctx *ctx);
-ns_id_t dplane_ctx_get_netconf_ns_id(const struct zebra_dplane_ctx *ctx);
-void dplane_ctx_set_netconf_ifindex(struct zebra_dplane_ctx *ctx,
- ifindex_t ifindex);
-void dplane_ctx_set_netconf_ns_id(struct zebra_dplane_ctx *ctx, ns_id_t ns_id);
enum dplane_netconf_status_e
dplane_ctx_get_netconf_mpls(const struct zebra_dplane_ctx *ctx);
enum dplane_netconf_status_e
dplane_ctx_get_netconf_mcast(const struct zebra_dplane_ctx *ctx);
+enum dplane_netconf_status_e
+dplane_ctx_get_netconf_linkdown(const struct zebra_dplane_ctx *ctx);
+
void dplane_ctx_set_netconf_mpls(struct zebra_dplane_ctx *ctx,
enum dplane_netconf_status_e val);
void dplane_ctx_set_netconf_mcast(struct zebra_dplane_ctx *ctx,
enum dplane_netconf_status_e val);
+void dplane_ctx_set_netconf_linkdown(struct zebra_dplane_ctx *ctx,
+ enum dplane_netconf_status_e val);
/* Namespace fd info - esp. for netlink communication */
const struct zebra_dplane_info *dplane_ctx_get_ns(
diff --git a/zebra/zebra_fpm_netlink.c b/zebra/zebra_fpm_netlink.c
index d4aced47f9..ca897251e2 100644
--- a/zebra/zebra_fpm_netlink.c
+++ b/zebra/zebra_fpm_netlink.c
@@ -208,7 +208,7 @@ static int netlink_route_info_add_nh(struct netlink_route_info *ri,
if (!nhi.gateway && nhi.if_index == 0)
return 0;
- if (re && CHECK_FLAG(re->flags, ZEBRA_FLAG_EVPN_ROUTE)) {
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_EVPN)) {
nhi.encap_info.encap_type = FPM_NH_ENCAP_VXLAN;
/* Extract VNI id for the nexthop SVI interface */
diff --git a/zebra/zebra_netns_id.c b/zebra/zebra_netns_id.c
index 739ba33036..73d585c1a3 100644
--- a/zebra/zebra_netns_id.c
+++ b/zebra/zebra_netns_id.c
@@ -255,66 +255,59 @@ ns_id_t zebra_ns_id_get(const char *netnspath, int fd_param)
}
}
- if (ret <= 0) {
- if (errno != EEXIST && ret != 0) {
- flog_err(
- EC_LIB_SOCKET,
- "netlink( %u) recvfrom() error 2 when reading: %s",
- fd, safe_strerror(errno));
- close(sock);
- if (netnspath)
- close(fd);
- if (errno == ENOTSUP) {
- zlog_debug("NEWNSID locally generated");
- return zebra_ns_id_get_fallback(netnspath);
- }
- return NS_UNKNOWN;
- }
- /* message to send to netlink : GETNSID */
- memset(buf, 0, NETLINK_SOCKET_BUFFER_SIZE);
- nlh = initiate_nlh(buf, &seq, RTM_GETNSID);
- rt = (struct rtgenmsg *)(buf + nlh->nlmsg_len);
- nlh->nlmsg_len += NETLINK_ALIGN(sizeof(struct rtgenmsg));
- rt->rtgen_family = AF_UNSPEC;
-
- nl_attr_put32(nlh, NETLINK_SOCKET_BUFFER_SIZE, NETNSA_FD, fd);
- nl_attr_put32(nlh, NETLINK_SOCKET_BUFFER_SIZE, NETNSA_NSID,
- ns_id);
-
- ret = send_receive(sock, nlh, seq, buf);
- if (ret < 0) {
- close(sock);
- if (netnspath)
- close(fd);
- return NS_UNKNOWN;
+ if (errno != EEXIST && ret != 0) {
+ flog_err(EC_LIB_SOCKET,
+ "netlink( %u) recvfrom() error 2 when reading: %s", fd,
+ safe_strerror(errno));
+ close(sock);
+ if (netnspath)
+ close(fd);
+ if (errno == ENOTSUP) {
+ zlog_debug("NEWNSID locally generated");
+ return zebra_ns_id_get_fallback(netnspath);
}
- nlh = (struct nlmsghdr *)buf;
- len = ret;
- ret = 0;
- do {
- if (nlh->nlmsg_type >= NLMSG_MIN_TYPE) {
- return_nsid = extract_nsid(nlh, buf);
- if (return_nsid != NS_UNKNOWN)
- break;
- } else if (nlh->nlmsg_type == NLMSG_ERROR) {
- struct nlmsgerr *err =
- (struct nlmsgerr
- *)((char *)nlh
- + NETLINK_ALIGN(sizeof(
- struct
- nlmsghdr)));
- if (err->error < 0)
- errno = -err->error;
- else
- errno = err->error;
- break;
- }
- len = len - NETLINK_ALIGN(nlh->nlmsg_len);
- nlh = (struct nlmsghdr *)((char *)nlh
- + NETLINK_ALIGN(
- nlh->nlmsg_len));
- } while (len != 0 && ret == 0);
+ return NS_UNKNOWN;
}
+ /* message to send to netlink : GETNSID */
+ memset(buf, 0, NETLINK_SOCKET_BUFFER_SIZE);
+ nlh = initiate_nlh(buf, &seq, RTM_GETNSID);
+ rt = (struct rtgenmsg *)(buf + nlh->nlmsg_len);
+ nlh->nlmsg_len += NETLINK_ALIGN(sizeof(struct rtgenmsg));
+ rt->rtgen_family = AF_UNSPEC;
+
+ nl_attr_put32(nlh, NETLINK_SOCKET_BUFFER_SIZE, NETNSA_FD, fd);
+ nl_attr_put32(nlh, NETLINK_SOCKET_BUFFER_SIZE, NETNSA_NSID, ns_id);
+
+ ret = send_receive(sock, nlh, seq, buf);
+ if (ret < 0) {
+ close(sock);
+ if (netnspath)
+ close(fd);
+ return NS_UNKNOWN;
+ }
+ nlh = (struct nlmsghdr *)buf;
+ len = ret;
+ ret = 0;
+ do {
+ if (nlh->nlmsg_type >= NLMSG_MIN_TYPE) {
+ return_nsid = extract_nsid(nlh, buf);
+ if (return_nsid != NS_UNKNOWN)
+ break;
+ } else if (nlh->nlmsg_type == NLMSG_ERROR) {
+ struct nlmsgerr *err =
+ (struct nlmsgerr *)((char *)nlh +
+ NETLINK_ALIGN(sizeof(
+ struct nlmsghdr)));
+ if (err->error < 0)
+ errno = -err->error;
+ else
+ errno = err->error;
+ break;
+ }
+ len = len - NETLINK_ALIGN(nlh->nlmsg_len);
+ nlh = (struct nlmsghdr *)((char *)nlh +
+ NETLINK_ALIGN(nlh->nlmsg_len));
+ } while (len != 0 && ret == 0);
if (netnspath)
close(fd);
diff --git a/zebra/zebra_nhg.c b/zebra/zebra_nhg.c
index 500f4b0f1b..9a0f48158f 100644
--- a/zebra/zebra_nhg.c
+++ b/zebra/zebra_nhg.c
@@ -1055,6 +1055,12 @@ static void zebra_nhg_set_invalid(struct nhg_hash_entry *nhe)
UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_VALID);
+ /* If we're in shutdown, this interface event needs to clean
+ * up installed NHGs, so don't clear that flag directly.
+ */
+ if (!zrouter.in_shutdown)
+ UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED);
+
/* Update validity of nexthops depending on it */
frr_each(nhg_connected_tree, &nhe->nhg_dependents, rb_node_dep)
zebra_nhg_check_valid(rb_node_dep->nhe);
@@ -1619,6 +1625,17 @@ void zebra_nhg_hash_free(void *p)
zebra_nhg_free((struct nhg_hash_entry *)p);
}
+static void zebra_nhg_timer(struct thread *thread)
+{
+ struct nhg_hash_entry *nhe = THREAD_ARG(thread);
+
+ if (IS_ZEBRA_DEBUG_NHG_DETAIL)
+ zlog_debug("Nexthop Timer for nhe: %pNG", nhe);
+
+ if (nhe->refcnt == 1)
+ zebra_nhg_decrement_ref(nhe);
+}
+
void zebra_nhg_decrement_ref(struct nhg_hash_entry *nhe)
{
if (IS_ZEBRA_DEBUG_NHG_DETAIL)
@@ -1627,6 +1644,15 @@ void zebra_nhg_decrement_ref(struct nhg_hash_entry *nhe)
nhe->refcnt--;
+ if (!zrouter.in_shutdown && nhe->refcnt <= 0 &&
+ CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED) &&
+ !CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_KEEP_AROUND)) {
+ nhe->refcnt = 1;
+ SET_FLAG(nhe->flags, NEXTHOP_GROUP_KEEP_AROUND);
+ thread_add_timer(zrouter.master, zebra_nhg_timer, nhe,
+ zrouter.nhg_keep, &nhe->timer);
+ }
+
if (!zebra_nhg_depends_is_empty(nhe))
nhg_connected_tree_decrement_ref(&nhe->nhg_depends);
@@ -1642,6 +1668,12 @@ void zebra_nhg_increment_ref(struct nhg_hash_entry *nhe)
nhe->refcnt++;
+ if (thread_is_scheduled(nhe->timer)) {
+ THREAD_OFF(nhe->timer);
+ nhe->refcnt--;
+ UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_KEEP_AROUND);
+ }
+
if (!zebra_nhg_depends_is_empty(nhe))
nhg_connected_tree_increment_ref(&nhe->nhg_depends);
}
@@ -2052,11 +2084,7 @@ static int nexthop_active(struct nexthop *nexthop, struct nhg_hash_entry *nhe,
* route and interface is up, its active. We trust kernel routes
* to be good.
*/
- if (ifp
- && (if_is_operative(ifp)
- || (if_is_up(ifp)
- && (type == ZEBRA_ROUTE_KERNEL
- || type == ZEBRA_ROUTE_SYSTEM))))
+ if (ifp && (if_is_operative(ifp)))
return 1;
else
return 0;
@@ -2425,20 +2453,19 @@ static unsigned nexthop_active_check(struct route_node *rn,
zlog_debug("%s: re %p, nexthop %pNHv", __func__, re, nexthop);
/*
- * If the kernel has sent us a NEW route, then
+ * If this is a kernel route, then if the interface is *up* then
* by golly gee whiz it's a good route.
- *
- * If its an already INSTALLED route we have already handled, then the
- * kernel route's nexthop might have became unreachable
- * and we have to handle that.
*/
- if (!CHECK_FLAG(re->status, ROUTE_ENTRY_INSTALLED) &&
- (re->type == ZEBRA_ROUTE_KERNEL ||
- re->type == ZEBRA_ROUTE_SYSTEM)) {
- SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
- goto skip_check;
- }
+ if (re->type == ZEBRA_ROUTE_KERNEL || re->type == ZEBRA_ROUTE_SYSTEM) {
+ struct interface *ifp;
+ ifp = if_lookup_by_index(nexthop->ifindex, nexthop->vrf_id);
+
+ if (ifp && (if_is_operative(ifp) || if_is_up(ifp))) {
+ SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
+ goto skip_check;
+ }
+ }
vrf_id = zvrf_id(rib_dest_vrf(rib_dest_from_rnode(rn)));
switch (nexthop->type) {
@@ -3290,9 +3317,6 @@ struct nhg_hash_entry *zebra_nhg_proto_add(uint32_t id, int type,
rib_handle_nhg_replace(old, new);
- /* if this != 1 at this point, we have a bug */
- assert(old->refcnt == 1);
-
/* We have to decrement its singletons
* because some might not exist in NEW.
*/
@@ -3304,6 +3328,7 @@ struct nhg_hash_entry *zebra_nhg_proto_add(uint32_t id, int type,
/* Dont call the dec API, we dont want to uninstall the ID */
old->refcnt = 0;
+ THREAD_OFF(old->timer);
zebra_nhg_free(old);
old = NULL;
}
diff --git a/zebra/zebra_nhg.h b/zebra/zebra_nhg.h
index 0863d90a7e..6d2ab248f9 100644
--- a/zebra/zebra_nhg.h
+++ b/zebra/zebra_nhg.h
@@ -79,16 +79,34 @@ struct nhg_hash_entry {
uint32_t flags;
- /* Dependency tree for other entries.
+ /* Dependency trees for other entries.
* For instance a group with two
* nexthops will have two dependencies
* pointing to those nhg_hash_entries.
*
* Using a rb tree here to make lookups
* faster with ID's.
+ *
+ * nhg_depends the RB tree of entries that this
+ * group contains.
+ *
+ * nhg_dependents the RB tree of entries that
+ * this group is being used by
+ *
+ * NHG id 3 with nexthops id 1/2
+ * nhg(3)->nhg_depends has 1 and 2 in the tree
+ * nhg(3)->nhg_dependents is empty
+ *
+ * nhg(1)->nhg_depends is empty
+ * nhg(1)->nhg_dependents is 3 in the tree
+ *
+ * nhg(2)->nhg_depends is empty
+ * nhg(3)->nhg_dependents is 3 in the tree
*/
struct nhg_connected_tree_head nhg_depends, nhg_dependents;
+ struct thread *timer;
+
/*
* Is this nexthop group valid, ie all nexthops are fully resolved.
* What is fully resolved? It's a nexthop that is either self contained
@@ -130,6 +148,15 @@ struct nhg_hash_entry {
#define NEXTHOP_GROUP_PROTO_RELEASED (1 << 5)
/*
+ * When deleting a NHG notice that it is still installed
+ * and if it is, slightly delay the actual removal to
+ * the future. So that upper level protocols might
+ * be able to take advantage of some NHG's that
+ * are there
+ */
+#define NEXTHOP_GROUP_KEEP_AROUND (1 << 6)
+
+/*
* Track FPM installation status..
*/
#define NEXTHOP_GROUP_FPM (1 << 6)
diff --git a/zebra/zebra_rib.c b/zebra/zebra_rib.c
index 6801280012..63f15b0f20 100644
--- a/zebra/zebra_rib.c
+++ b/zebra/zebra_rib.c
@@ -3302,7 +3302,7 @@ static void _route_entry_dump_nh(const struct route_entry *re,
if (nexthop->weight)
snprintf(wgt_str, sizeof(wgt_str), "wgt %d,", nexthop->weight);
- zlog_debug("%s: %s %s[%u] %svrf %s(%u) %s%s with flags %s%s%s%s%s%s%s%s",
+ zlog_debug("%s: %s %s[%u] %svrf %s(%u) %s%s with flags %s%s%s%s%s%s%s%s%s",
straddr, (nexthop->rparent ? " NH" : "NH"), nhname,
nexthop->ifindex, label_str, vrf ? vrf->name : "Unknown",
nexthop->vrf_id,
@@ -3327,7 +3327,9 @@ static void _route_entry_dump_nh(const struct route_entry *re,
(CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_HAS_BACKUP)
? "BACKUP " : ""),
(CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_SRTE)
- ? "SRTE " : ""));
+ ? "SRTE " : ""),
+ (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_EVPN)
+ ? "EVPN " : ""));
}
@@ -3764,6 +3766,8 @@ void rib_delete(afi_t afi, safi_t safi, vrf_id_t vrf_id, int type,
}
if (same) {
+ struct nexthop *tmp_nh;
+
if (fromkernel && CHECK_FLAG(flags, ZEBRA_FLAG_SELFROUTE)
&& !allow_delete) {
rib_install_kernel(rn, same, NULL);
@@ -3776,12 +3780,10 @@ void rib_delete(afi_t afi, safi_t safi, vrf_id_t vrf_id, int type,
* EVPN - the nexthop (and associated MAC) need to be
* uninstalled if no more refs.
*/
- if (CHECK_FLAG(flags, ZEBRA_FLAG_EVPN_ROUTE)) {
- struct nexthop *tmp_nh;
-
- for (ALL_NEXTHOPS(re->nhe->nhg, tmp_nh)) {
- struct ipaddr vtep_ip;
+ for (ALL_NEXTHOPS(re->nhe->nhg, tmp_nh)) {
+ struct ipaddr vtep_ip;
+ if (CHECK_FLAG(tmp_nh->flags, NEXTHOP_FLAG_EVPN)) {
memset(&vtep_ip, 0, sizeof(struct ipaddr));
if (afi == AFI_IP) {
vtep_ip.ipa_type = IPADDR_V4;
@@ -4171,21 +4173,15 @@ unsigned long rib_score_proto(uint8_t proto, unsigned short instance)
void rib_close_table(struct route_table *table)
{
struct route_node *rn;
- struct rib_table_info *info;
rib_dest_t *dest;
if (!table)
return;
- info = route_table_get_info(table);
-
for (rn = route_top(table); rn; rn = srcdest_route_next(rn)) {
dest = rib_dest_from_rnode(rn);
if (dest && dest->selected_fib) {
- if (info->safi == SAFI_UNICAST)
- hook_call(rib_update, rn, NULL);
-
rib_uninstall_kernel(rn, dest->selected_fib);
dest->selected_fib = NULL;
}
diff --git a/zebra/zebra_router.c b/zebra/zebra_router.c
index 6b4a7543cd..92d519bad1 100644
--- a/zebra/zebra_router.c
+++ b/zebra/zebra_router.c
@@ -278,6 +278,8 @@ void zebra_router_init(bool asic_offload, bool notify_on_ack)
zrouter.packets_to_process = ZEBRA_ZAPI_PACKETS_TO_PROCESS;
+ zrouter.nhg_keep = ZEBRA_DEFAULT_NHG_KEEP_TIMER;
+
zebra_vxlan_init();
zebra_mlag_init();
diff --git a/zebra/zebra_router.h b/zebra/zebra_router.h
index 7aca91959c..c96c8e5f46 100644
--- a/zebra/zebra_router.h
+++ b/zebra/zebra_router.h
@@ -219,6 +219,9 @@ struct zebra_router {
bool notify_on_ack;
bool supports_nhgs;
+
+#define ZEBRA_DEFAULT_NHG_KEEP_TIMER 180
+ uint32_t nhg_keep;
};
#define GRACEFUL_RESTART_TIME 60
diff --git a/zebra/zebra_vty.c b/zebra/zebra_vty.c
index be19b07d9d..011fa2a1e5 100644
--- a/zebra/zebra_vty.c
+++ b/zebra/zebra_vty.c
@@ -376,6 +376,9 @@ static void show_nexthop_detail_helper(struct vty *vty,
if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK))
vty_out(vty, " onlink");
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_LINKDOWN))
+ vty_out(vty, " linkdown");
+
if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE))
vty_out(vty, " (recursive)");
@@ -657,6 +660,9 @@ static void show_route_nexthop_helper(struct vty *vty,
if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK))
vty_out(vty, " onlink");
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_LINKDOWN))
+ vty_out(vty, " linkdown");
+
if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE))
vty_out(vty, " (recursive)");
@@ -837,6 +843,9 @@ static void show_nexthop_json_helper(json_object *json_nexthop,
json_object_boolean_true_add(json_nexthop,
"onLink");
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_LINKDOWN))
+ json_object_boolean_true_add(json_nexthop, "linkDown");
+
if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE))
json_object_boolean_true_add(json_nexthop,
"recursive");
@@ -1433,14 +1442,22 @@ static void show_nexthop_group_out(struct vty *vty, struct nhg_hash_entry *nhe)
struct nhg_connected *rb_node_dep = NULL;
struct nexthop_group *backup_nhg;
char up_str[MONOTIME_STRLEN];
+ char time_left[MONOTIME_STRLEN];
uptime2str(nhe->uptime, up_str, sizeof(up_str));
vty_out(vty, "ID: %u (%s)\n", nhe->id, zebra_route_string(nhe->type));
- vty_out(vty, " RefCnt: %u\n", nhe->refcnt);
+ vty_out(vty, " RefCnt: %u", nhe->refcnt);
+ if (thread_is_scheduled(nhe->timer))
+ vty_out(vty, " Time to Deletion: %s",
+ thread_timer_to_hhmmss(time_left, sizeof(time_left),
+ nhe->timer));
+ vty_out(vty, "\n");
+
vty_out(vty, " Uptime: %s\n", up_str);
vty_out(vty, " VRF: %s\n", vrf_id_to_name(nhe->vrf_id));
+
if (CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_VALID)) {
vty_out(vty, " Valid");
if (CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED))
@@ -3842,11 +3859,31 @@ DEFUN (no_ip_zebra_import_table,
return (zebra_import_table(AFI_IP, VRF_DEFAULT, table_id, 0, NULL, 0));
}
+DEFPY (zebra_nexthop_group_keep,
+ zebra_nexthop_group_keep_cmd,
+ "[no] zebra nexthop-group keep (1-3600)",
+ NO_STR
+ ZEBRA_STR
+ "Nexthop-Group\n"
+ "How long to keep\n"
+ "Time in seconds from 1-3600\n")
+{
+ if (no)
+ zrouter.nhg_keep = ZEBRA_DEFAULT_NHG_KEEP_TIMER;
+ else
+ zrouter.nhg_keep = keep;
+
+ return CMD_SUCCESS;
+}
+
static int config_write_protocol(struct vty *vty)
{
if (allow_delete)
vty_out(vty, "allow-external-route-update\n");
+ if (zrouter.nhg_keep != ZEBRA_DEFAULT_NHG_KEEP_TIMER)
+ vty_out(vty, "zebra nexthop-group keep %u\n", zrouter.nhg_keep);
+
if (zrouter.ribq->spec.hold != ZEBRA_RIB_PROCESS_HOLD_TIME)
vty_out(vty, "zebra work-queue %u\n", zrouter.ribq->spec.hold);
@@ -4425,6 +4462,7 @@ void zebra_vty_init(void)
install_element(CONFIG_NODE, &ip_multicast_mode_cmd);
install_element(CONFIG_NODE, &no_ip_multicast_mode_cmd);
+ install_element(CONFIG_NODE, &zebra_nexthop_group_keep_cmd);
install_element(CONFIG_NODE, &ip_zebra_import_table_distance_cmd);
install_element(CONFIG_NODE, &no_ip_zebra_import_table_cmd);
install_element(CONFIG_NODE, &zebra_workqueue_timer_cmd);
diff --git a/zebra/zebra_vxlan.c b/zebra/zebra_vxlan.c
index 927df14fbe..4cf309f7fc 100644
--- a/zebra/zebra_vxlan.c
+++ b/zebra/zebra_vxlan.c
@@ -5273,10 +5273,6 @@ int zebra_vxlan_process_vrf_vni_cmd(struct zebra_vrf *zvrf, vni_t vni,
add ? "ADD" : "DEL");
if (add) {
-
- /* Remove L2VNI if present */
- zebra_vxlan_handle_vni_transition(zvrf, vni, add);
-
/* check if the vni is already present under zvrf */
if (zvrf->l3vni) {
snprintf(err, err_str_sz,
@@ -5292,6 +5288,9 @@ int zebra_vxlan_process_vrf_vni_cmd(struct zebra_vrf *zvrf, vni_t vni,
return -1;
}
+ /* Remove L2VNI if present */
+ zebra_vxlan_handle_vni_transition(zvrf, vni, add);
+
/* add the L3-VNI to the global table */
zl3vni = zl3vni_add(vni, zvrf_id(zvrf));