summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--bgpd/bgp_evpn_mh.c28
-rw-r--r--bgpd/bgp_fsm.h8
-rw-r--r--bgpd/bgp_route.c29
-rw-r--r--bgpd/bgp_table.c10
-rw-r--r--bgpd/bgp_vty.c52
-rw-r--r--doc/developer/building-frr-for-ubuntu2004.rst2
-rw-r--r--doc/user/isisd.rst24
-rw-r--r--doc/user/ospf6d.rst9
-rw-r--r--isisd/isis_lfa.c23
-rw-r--r--lib/nexthop.c10
-rw-r--r--lib/northbound_cli.c2
-rw-r--r--lib/prefix.c54
-rw-r--r--lib/sockunion.c59
-rw-r--r--lib/srcdest_table.c9
-rw-r--r--lib/thread.c203
-rw-r--r--lib/thread.h15
-rw-r--r--ospf6d/ospf6_area.c48
-rw-r--r--ospf6d/ospf6_spf.c53
-rw-r--r--ospf6d/ospf6_spf.h4
-rw-r--r--pimd/pim_nb_config.c8
-rw-r--r--tests/isisd/test_isis_spf.refout86
-rw-r--r--tests/topotests/all-protocol-startup/test_all_protocol_startup.py4
-rw-r--r--tests/topotests/bfd-bgp-cbit-topo3/test_bfd_bgp_cbit_topo3.py4
-rw-r--r--tests/topotests/bfd-isis-topo1/test_bfd_isis_topo1.py1
-rwxr-xr-xtests/topotests/bfd-ospf-topo1/test_bfd_ospf_topo1.py1
-rw-r--r--tests/topotests/bfd-profiles-topo1/test_bfd_profiles_topo1.py3
-rw-r--r--tests/topotests/bfd-topo1/test_bfd_topo1.py3
-rw-r--r--tests/topotests/bfd-topo2/test_bfd_topo2.py3
-rw-r--r--tests/topotests/bfd-topo3/test_bfd_topo3.py2
-rw-r--r--tests/topotests/bfd-vrf-topo1/test_bfd_vrf_topo1.py3
-rw-r--r--tests/topotests/bgp-aggregator-zero/test_bgp_aggregator_zero.py2
-rw-r--r--tests/topotests/bgp-basic-functionality-topo1/test_bgp_basic_functionality.py3
-rw-r--r--tests/topotests/bgp-ecmp-topo2/test_ebgp_ecmp_topo2.py5
-rw-r--r--tests/topotests/bgp-ecmp-topo2/test_ibgp_ecmp_topo2.py5
-rw-r--r--tests/topotests/bgp-evpn-mh/test_evpn_mh.py1
-rwxr-xr-xtests/topotests/bgp-evpn-vxlan_topo1/test_bgp_evpn_vxlan.py1
-rwxr-xr-xtests/topotests/bgp-snmp-mplsl3vpn/test_bgp_snmp_mplsvpn.py24
-rw-r--r--tests/topotests/bgp_aggregate-address_origin/test_bgp_aggregate-address_origin.py2
-rw-r--r--tests/topotests/bgp_aggregate-address_route-map/test_bgp_aggregate-address_route-map.py2
-rw-r--r--tests/topotests/bgp_as_allow_in/test_bgp_as_allow_in.py3
-rw-r--r--tests/topotests/bgp_as_wide_bgp_identifier/test_bgp_as_wide_bgp_identifier.py2
-rw-r--r--tests/topotests/bgp_blackhole_community/test_bgp_blackhole_community.py2
-rw-r--r--tests/topotests/bgp_comm-list_delete/test_bgp_comm-list_delete.py2
-rw-r--r--tests/topotests/bgp_communities_topo1/test_bgp_communities.py3
-rw-r--r--tests/topotests/bgp_communities_topo1/test_bgp_communities_topo2.py3
-rw-r--r--tests/topotests/bgp_community_change_update/test_bgp_community_change_update.py2
-rw-r--r--tests/topotests/bgp_features/peer1/exa_readpipe.py2
-rw-r--r--tests/topotests/bgp_features/peer2/exa_readpipe.py2
-rw-r--r--tests/topotests/bgp_features/peer3/exa_readpipe.py2
-rw-r--r--tests/topotests/bgp_features/peer4/exa_readpipe.py2
-rwxr-xr-xtests/topotests/bgp_l3vpn_to_bgp_direct/test_bgp_l3vpn_to_bgp_direct.py1
-rwxr-xr-xtests/topotests/bgp_l3vpn_to_bgp_vrf/test_bgp_l3vpn_to_bgp_vrf.py1
-rwxr-xr-xtests/topotests/bgp_listen_on_multiple_addresses/test_bgp_listen_on_multiple_addresses.py2
-rw-r--r--tests/topotests/bgp_lu_topo1/test_bgp_lu.py34
-rwxr-xr-xtests/topotests/bgp_rfapi_basic_sanity/test_bgp_rfapi_basic_sanity.py1
-rw-r--r--tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo1.py24
-rw-r--r--tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py8
-rw-r--r--tests/topotests/example-test/test_template.py2
-rwxr-xr-xtests/topotests/example-topojson-test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py2
-rwxr-xr-xtests/topotests/example-topojson-test/test_topo_json_single_link/test_example_topojson.py2
-rwxr-xr-xtests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py2
-rwxr-xr-xtests/topotests/isis-lfa-topo1/test_isis_lfa_topo1.py1
-rwxr-xr-xtests/topotests/isis-lsp-bits-topo1/test_isis_lsp_bits_topo1.py135
-rwxr-xr-xtests/topotests/isis-rlfa-topo1/test_isis_rlfa_topo1.py1
-rwxr-xr-xtests/topotests/isis-sr-te-topo1/test_isis_sr_te_topo1.py463
-rw-r--r--tests/topotests/isis-sr-topo1/test_isis_sr_topo1.py1
-rwxr-xr-xtests/topotests/isis-tilfa-topo1/test_isis_tilfa_topo1.py1
-rw-r--r--tests/topotests/isis-topo1-vrf/test_isis_topo1_vrf.py1
-rw-r--r--tests/topotests/isis-topo1/test_isis_topo1.py1
-rw-r--r--tests/topotests/ldp-oc-acl-topo1/test_ldp_oc_acl_topo1.py1
-rw-r--r--tests/topotests/ldp-oc-topo1/test_ldp_oc_topo1.py1
-rw-r--r--tests/topotests/ldp-sync-ospf-topo1/test_ldp_sync_ospf_topo1.py1
-rw-r--r--tests/topotests/ldp-vpls-topo1/test_ldp_vpls_topo1.py1
-rw-r--r--tests/topotests/lib/ospf.py4
-rw-r--r--tests/topotests/lib/pim.py106
-rw-r--r--tests/topotests/lib/snmptest.py13
-rw-r--r--tests/topotests/lib/topogen.py2
-rw-r--r--tests/topotests/lib/topotest.py8
-rw-r--r--tests/topotests/multicast-pim-bsm-topo1/test_mcast_pim_bsmp_01.py23
-rw-r--r--tests/topotests/multicast-pim-bsm-topo2/test_mcast_pim_bsmp_02.py3
-rwxr-xr-xtests/topotests/multicast-pim-sm-topo1/test_multicast_pim_sm_topo1.py2
-rwxr-xr-xtests/topotests/multicast-pim-sm-topo2/test_multicast_pim_sm_topo2.py16
-rwxr-xr-xtests/topotests/multicast-pim-sm-topo3/test_multicast_pim_sm_topo3.py218
-rwxr-xr-xtests/topotests/multicast-pim-sm-topo3/test_multicast_pim_sm_topo4.py18
-rwxr-xr-xtests/topotests/multicast-pim-static-rp-topo1/test_multicast_pim_static_rp.py214
-rw-r--r--tests/topotests/ospf-sr-topo1/test_ospf_sr_topo1.py1
-rw-r--r--tests/topotests/ospf-tilfa-topo1/test_ospf_tilfa_topo1.py8
-rw-r--r--tests/topotests/ospf-topo1-vrf/test_ospf_topo1_vrf.py1
-rw-r--r--tests/topotests/ospf-topo1/test_ospf_topo1.py1
-rw-r--r--tests/topotests/ospf6-topo1/test_ospf6_topo1.py5
-rw-r--r--tests/topotests/ospf_basic_functionality/test_ospf_chaos.py332
-rw-r--r--tests/topotests/ospf_basic_functionality/test_ospf_p2mp.py6
-rw-r--r--tests/topotests/pbr-topo1/test_pbr_topo1.py1
-rw-r--r--tests/topotests/pim-basic/test_pim.py1
-rw-r--r--tests/topotests/pytest.ini1
-rwxr-xr-xtests/topotests/simple-snmp-test/test_simple_snmp.py2
-rw-r--r--tests/topotests/static_routing_with_ebgp/test_static_routes_topo1_ebgp.py10
-rw-r--r--tests/topotests/static_routing_with_ebgp/test_static_routes_topo2_ebgp.py40
-rw-r--r--tests/topotests/static_routing_with_ebgp/test_static_routes_topo4_ebgp.py9
-rw-r--r--tests/topotests/static_routing_with_ibgp/test_static_routes_topo1_ibgp.py8
-rw-r--r--tests/topotests/static_routing_with_ibgp/test_static_routes_topo2_ibgp.py67
-rw-r--r--tests/topotests/static_routing_with_ibgp/test_static_routes_topo3_ibgp.py8
-rw-r--r--tests/topotests/static_routing_with_ibgp/test_static_routes_topo4_ibgp.py10
-rw-r--r--tests/topotests/zebra_rib/test_zebra_rib.py62
-rw-r--r--vtysh/vtysh.c19
-rw-r--r--zebra/rt_netlink.c56
106 files changed, 1787 insertions, 1005 deletions
diff --git a/bgpd/bgp_evpn_mh.c b/bgpd/bgp_evpn_mh.c
index 0b02669211..175514f639 100644
--- a/bgpd/bgp_evpn_mh.c
+++ b/bgpd/bgp_evpn_mh.c
@@ -74,6 +74,7 @@ bgp_evpn_es_path_update_on_vtep_chg(struct bgp_evpn_es_vtep *es_vtep,
bool active);
esi_t zero_esi_buf, *zero_esi = &zero_esi_buf;
+static int bgp_evpn_run_consistency_checks(struct thread *t);
/******************************************************************************
* per-ES (Ethernet Segment) routing table
@@ -1701,7 +1702,7 @@ static void bgp_evpn_local_es_activate(struct bgp *bgp, struct bgp_evpn_es *es,
/* generate EAD-ES */
build_evpn_type1_prefix(&p, BGP_EVPN_AD_ES_ETH_TAG, &es->esi,
es->originator_ip);
- bgp_evpn_type1_route_update(bgp, es, NULL, &p);
+ (void)bgp_evpn_type1_route_update(bgp, es, NULL, &p);
}
}
@@ -3578,6 +3579,19 @@ void bgp_evpn_es_evi_show_vni(struct vty *vty, vni_t vni,
* show commands) at this point. A more drastic action can be executed (based
* on user config) in the future.
*/
+static void bgp_evpn_es_cons_checks_timer_start(void)
+{
+ if (!bgp_mh_info->consistency_checking || bgp_mh_info->t_cons_check)
+ return;
+
+ if (BGP_DEBUG(evpn_mh, EVPN_MH_ES))
+ zlog_debug("periodic consistency checking started");
+
+ thread_add_timer(bm->master, bgp_evpn_run_consistency_checks, NULL,
+ BGP_EVPN_CONS_CHECK_INTERVAL,
+ &bgp_mh_info->t_cons_check);
+}
+
/* queue up the es for background consistency checks */
static void bgp_evpn_es_cons_checks_pend_add(struct bgp_evpn_es *es)
{
@@ -3589,6 +3603,10 @@ static void bgp_evpn_es_cons_checks_pend_add(struct bgp_evpn_es *es)
/* already queued for consistency checking */
return;
+ /* start the periodic timer for consistency checks if it is not
+ * already running */
+ bgp_evpn_es_cons_checks_timer_start();
+
SET_FLAG(es->flags, BGP_EVPNES_CONS_CHECK_PEND);
listnode_init(&es->pend_es_listnode, es);
listnode_add_after(bgp_mh_info->pend_es_list,
@@ -3807,11 +3825,6 @@ void bgp_evpn_mh_init(void)
bgp_mh_info->install_l3nhg = false;
bgp_mh_info->host_routes_use_l3nhg = BGP_EVPN_MH_USE_ES_L3NHG_DEF;
- if (bgp_mh_info->consistency_checking)
- thread_add_timer(bm->master, bgp_evpn_run_consistency_checks,
- NULL, BGP_EVPN_CONS_CHECK_INTERVAL,
- &bgp_mh_info->t_cons_check);
-
memset(&zero_esi_buf, 0, sizeof(esi_t));
}
@@ -3827,7 +3840,8 @@ void bgp_evpn_mh_finish(void)
es_next) {
bgp_evpn_es_local_info_clear(es);
}
- thread_cancel(&bgp_mh_info->t_cons_check);
+ if (bgp_mh_info->t_cons_check)
+ thread_cancel(&bgp_mh_info->t_cons_check);
list_delete(&bgp_mh_info->local_es_list);
list_delete(&bgp_mh_info->pend_es_list);
diff --git a/bgpd/bgp_fsm.h b/bgpd/bgp_fsm.h
index cd464d8c58..bf4966c839 100644
--- a/bgpd/bgp_fsm.h
+++ b/bgpd/bgp_fsm.h
@@ -31,7 +31,7 @@
#define BGP_TIMER_OFF(T) \
do { \
- THREAD_OFF(T); \
+ THREAD_OFF((T)); \
} while (0)
#define BGP_EVENT_ADD(P, E) \
@@ -44,7 +44,7 @@
#define BGP_EVENT_FLUSH(P) \
do { \
assert(peer); \
- thread_cancel_event(bm->master, (P)); \
+ thread_cancel_event_ready(bm->master, (P)); \
} while (0)
#define BGP_UPDATE_GROUP_TIMER_ON(T, F) \
@@ -53,10 +53,10 @@
PEER_ROUTE_ADV_DELAY(peer)) \
thread_add_timer_msec(bm->master, (F), peer, \
(BGP_DEFAULT_UPDATE_ADVERTISEMENT_TIME * 1000),\
- T); \
+ (T)); \
else \
thread_add_timer_msec(bm->master, (F), peer, \
- 0, T); \
+ 0, (T)); \
} while (0) \
#define BGP_MSEC_JITTER 10
diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c
index 18a0b3fb7d..ea6bf95d14 100644
--- a/bgpd/bgp_route.c
+++ b/bgpd/bgp_route.c
@@ -3597,19 +3597,6 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id,
if (has_valid_label)
assert(label != NULL);
- /* The flag BGP_NODE_FIB_INSTALL_PENDING is for the following
- * condition :
- * Suppress fib is enabled
- * BGP_OPT_NO_FIB is not enabled
- * Route type is BGP_ROUTE_NORMAL (peer learnt routes)
- * Route is being installed first time (BGP_NODE_FIB_INSTALLED not set)
- */
- if (BGP_SUPPRESS_FIB_ENABLED(bgp) &&
- (sub_type == BGP_ROUTE_NORMAL) &&
- (!bgp_option_check(BGP_OPT_NO_FIB)) &&
- (!CHECK_FLAG(dest->flags, BGP_NODE_FIB_INSTALLED)))
- SET_FLAG(dest->flags, BGP_NODE_FIB_INSTALL_PENDING);
-
/* When peer's soft reconfiguration enabled. Record input packet in
Adj-RIBs-In. */
if (!soft_reconfig
@@ -3791,6 +3778,19 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id,
evpn == NULL ? NULL : &evpn->gw_ip);
}
+ /* The flag BGP_NODE_FIB_INSTALL_PENDING is for the following
+ * condition :
+ * Suppress fib is enabled
+ * BGP_OPT_NO_FIB is not enabled
+ * Route type is BGP_ROUTE_NORMAL (peer learnt routes)
+ * Route is being installed first time (BGP_NODE_FIB_INSTALLED not set)
+ */
+ if (bgp_fibupd_safi(safi) && BGP_SUPPRESS_FIB_ENABLED(bgp)
+ && (sub_type == BGP_ROUTE_NORMAL)
+ && (!bgp_option_check(BGP_OPT_NO_FIB))
+ && (!CHECK_FLAG(dest->flags, BGP_NODE_FIB_INSTALLED)))
+ SET_FLAG(dest->flags, BGP_NODE_FIB_INSTALL_PENDING);
+
attr_new = bgp_attr_intern(&new_attr);
/* If maximum prefix count is configured and current prefix
@@ -12385,6 +12385,9 @@ static int bgp_table_stats_walker(struct thread *t)
case AFI_IP6:
space = IPV6_MAX_BITLEN;
break;
+ case AFI_L2VPN:
+ space = EVPN_ROUTE_PREFIXLEN;
+ break;
default:
return 0;
}
diff --git a/bgpd/bgp_table.c b/bgpd/bgp_table.c
index 022a6413e2..7e3aa2a48a 100644
--- a/bgpd/bgp_table.c
+++ b/bgpd/bgp_table.c
@@ -205,8 +205,14 @@ static ssize_t printfrr_bd(char *buf, size_t bsz, const char *fmt,
int prec, const void *ptr)
{
const struct bgp_dest *dest = ptr;
- const struct prefix *p = bgp_dest_get_prefix(dest);
+ const struct prefix *p;
+
+ if (dest) {
+ p = bgp_dest_get_prefix(dest);
+ prefix2str(p, buf, bsz);
+ } else {
+ strlcpy(buf, "NULL", bsz);
+ }
- prefix2str(p, buf, bsz);
return 2;
}
diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c
index 9e8065691e..6a76237410 100644
--- a/bgpd/bgp_vty.c
+++ b/bgpd/bgp_vty.c
@@ -19060,7 +19060,7 @@ static void community_list_perror(struct vty *vty, int ret)
/*community-list standard */
DEFUN (community_list_standard,
bgp_community_list_standard_cmd,
- "bgp community-list <(1-99)|standard WORD> [seq (1-4294967295)] <deny|permit> AA:NN...",
+ "bgp community-list <(1-99)|standard WORD> [seq (0-4294967295)] <deny|permit> AA:NN...",
BGP_STR
COMMUNITY_LIST_STR
"Community list number (standard)\n"
@@ -19078,7 +19078,7 @@ DEFUN (community_list_standard,
int style = COMMUNITY_LIST_STANDARD;
int idx = 0;
- argv_find(argv, argc, "(1-4294967295)", &idx);
+ argv_find(argv, argc, "(0-4294967295)", &idx);
if (idx)
seq = argv[idx]->arg;
@@ -19107,7 +19107,7 @@ DEFUN (community_list_standard,
DEFUN (no_community_list_standard_all,
no_bgp_community_list_standard_all_cmd,
- "no bgp community-list <(1-99)|standard WORD> [seq (1-4294967295)] <deny|permit> AA:NN...",
+ "no bgp community-list <(1-99)|standard WORD> [seq (0-4294967295)] <deny|permit> AA:NN...",
NO_STR
BGP_STR
COMMUNITY_LIST_STR
@@ -19127,7 +19127,7 @@ DEFUN (no_community_list_standard_all,
char *seq = NULL;
int idx = 0;
- argv_find(argv, argc, "(1-4294967295)", &idx);
+ argv_find(argv, argc, "(0-4294967295)", &idx);
if (idx)
seq = argv[idx]->arg;
@@ -19173,7 +19173,7 @@ ALIAS(no_community_list_standard_all, no_bgp_community_list_standard_all_list_cm
/*community-list expanded */
DEFUN (community_list_expanded_all,
bgp_community_list_expanded_all_cmd,
- "bgp community-list <(100-500)|expanded WORD> [seq (1-4294967295)] <deny|permit> AA:NN...",
+ "bgp community-list <(100-500)|expanded WORD> [seq (0-4294967295)] <deny|permit> AA:NN...",
BGP_STR
COMMUNITY_LIST_STR
"Community list number (expanded)\n"
@@ -19191,7 +19191,7 @@ DEFUN (community_list_expanded_all,
int style = COMMUNITY_LIST_EXPANDED;
int idx = 0;
- argv_find(argv, argc, "(1-4294967295)", &idx);
+ argv_find(argv, argc, "(0-4294967295)", &idx);
if (idx)
seq = argv[idx]->arg;
@@ -19221,7 +19221,7 @@ DEFUN (community_list_expanded_all,
DEFUN (no_community_list_expanded_all,
no_bgp_community_list_expanded_all_cmd,
- "no bgp community-list <(100-500)|expanded WORD> [seq (1-4294967295)] <deny|permit> AA:NN...",
+ "no bgp community-list <(100-500)|expanded WORD> [seq (0-4294967295)] <deny|permit> AA:NN...",
NO_STR
BGP_STR
COMMUNITY_LIST_STR
@@ -19241,7 +19241,7 @@ DEFUN (no_community_list_expanded_all,
int style = COMMUNITY_LIST_EXPANDED;
int idx = 0;
- argv_find(argv, argc, "(1-4294967295)", &idx);
+ argv_find(argv, argc, "(0-4294967295)", &idx);
if (idx)
seq = argv[idx]->arg;
@@ -19394,7 +19394,7 @@ static int lcommunity_list_set_vty(struct vty *vty, int argc,
char *cl_name;
char *seq = NULL;
- if (argv_find(argv, argc, "(1-4294967295)", &idx))
+ if (argv_find(argv, argc, "(0-4294967295)", &idx))
seq = argv[idx]->arg;
idx = 0;
@@ -19443,7 +19443,7 @@ static int lcommunity_list_unset_vty(struct vty *vty, int argc,
int idx = 0;
char *seq = NULL;
- if (argv_find(argv, argc, "(1-4294967295)", &idx))
+ if (argv_find(argv, argc, "(0-4294967295)", &idx))
seq = argv[idx]->arg;
idx = 0;
@@ -19491,7 +19491,7 @@ static int lcommunity_list_unset_vty(struct vty *vty, int argc,
DEFUN (lcommunity_list_standard,
bgp_lcommunity_list_standard_cmd,
- "bgp large-community-list (1-99) [seq (1-4294967295)] <deny|permit> AA:BB:CC...",
+ "bgp large-community-list (1-99) [seq (0-4294967295)] <deny|permit> AA:BB:CC...",
BGP_STR
LCOMMUNITY_LIST_STR
"Large Community list number (standard)\n"
@@ -19507,7 +19507,7 @@ DEFUN (lcommunity_list_standard,
DEFUN (lcommunity_list_expanded,
bgp_lcommunity_list_expanded_cmd,
- "bgp large-community-list (100-500) [seq (1-4294967295)] <deny|permit> LINE...",
+ "bgp large-community-list (100-500) [seq (0-4294967295)] <deny|permit> LINE...",
BGP_STR
LCOMMUNITY_LIST_STR
"Large Community list number (expanded)\n"
@@ -19523,7 +19523,7 @@ DEFUN (lcommunity_list_expanded,
DEFUN (lcommunity_list_name_standard,
bgp_lcommunity_list_name_standard_cmd,
- "bgp large-community-list standard WORD [seq (1-4294967295)] <deny|permit> AA:BB:CC...",
+ "bgp large-community-list standard WORD [seq (0-4294967295)] <deny|permit> AA:BB:CC...",
BGP_STR
LCOMMUNITY_LIST_STR
"Specify standard large-community-list\n"
@@ -19540,7 +19540,7 @@ DEFUN (lcommunity_list_name_standard,
DEFUN (lcommunity_list_name_expanded,
bgp_lcommunity_list_name_expanded_cmd,
- "bgp large-community-list expanded WORD [seq (1-4294967295)] <deny|permit> LINE...",
+ "bgp large-community-list expanded WORD [seq (0-4294967295)] <deny|permit> LINE...",
BGP_STR
LCOMMUNITY_LIST_STR
"Specify expanded large-community-list\n"
@@ -19597,7 +19597,7 @@ DEFUN (no_lcommunity_list_name_expanded_all,
DEFUN (no_lcommunity_list_standard,
no_bgp_lcommunity_list_standard_cmd,
- "no bgp large-community-list (1-99) [seq (1-4294967295)] <deny|permit> AA:AA:NN...",
+ "no bgp large-community-list (1-99) [seq (0-4294967295)] <deny|permit> AA:AA:NN...",
NO_STR
BGP_STR
LCOMMUNITY_LIST_STR
@@ -19614,7 +19614,7 @@ DEFUN (no_lcommunity_list_standard,
DEFUN (no_lcommunity_list_expanded,
no_bgp_lcommunity_list_expanded_cmd,
- "no bgp large-community-list (100-500) [seq (1-4294967295)] <deny|permit> LINE...",
+ "no bgp large-community-list (100-500) [seq (0-4294967295)] <deny|permit> LINE...",
NO_STR
BGP_STR
LCOMMUNITY_LIST_STR
@@ -19631,7 +19631,7 @@ DEFUN (no_lcommunity_list_expanded,
DEFUN (no_lcommunity_list_name_standard,
no_bgp_lcommunity_list_name_standard_cmd,
- "no bgp large-community-list standard WORD [seq (1-4294967295)] <deny|permit> AA:AA:NN...",
+ "no bgp large-community-list standard WORD [seq (0-4294967295)] <deny|permit> AA:AA:NN...",
NO_STR
BGP_STR
LCOMMUNITY_LIST_STR
@@ -19649,7 +19649,7 @@ DEFUN (no_lcommunity_list_name_standard,
DEFUN (no_lcommunity_list_name_expanded,
no_bgp_lcommunity_list_name_expanded_cmd,
- "no bgp large-community-list expanded WORD [seq (1-4294967295)] <deny|permit> LINE...",
+ "no bgp large-community-list expanded WORD [seq (0-4294967295)] <deny|permit> LINE...",
NO_STR
BGP_STR
LCOMMUNITY_LIST_STR
@@ -19751,7 +19751,7 @@ DEFUN (show_lcommunity_list_arg,
DEFUN (extcommunity_list_standard,
bgp_extcommunity_list_standard_cmd,
- "bgp extcommunity-list <(1-99)|standard WORD> [seq (1-4294967295)] <deny|permit> AA:NN...",
+ "bgp extcommunity-list <(1-99)|standard WORD> [seq (0-4294967295)] <deny|permit> AA:NN...",
BGP_STR
EXTCOMMUNITY_LIST_STR
"Extended Community list number (standard)\n"
@@ -19774,7 +19774,7 @@ DEFUN (extcommunity_list_standard,
argv_find(argv, argc, "WORD", &idx);
cl_number_or_name = argv[idx]->arg;
- if (argv_find(argv, argc, "(1-4294967295)", &idx))
+ if (argv_find(argv, argc, "(0-4294967295)", &idx))
seq = argv[idx]->arg;
direct = argv_find(argv, argc, "permit", &idx) ? COMMUNITY_PERMIT
@@ -19797,7 +19797,7 @@ DEFUN (extcommunity_list_standard,
DEFUN (extcommunity_list_name_expanded,
bgp_extcommunity_list_name_expanded_cmd,
- "bgp extcommunity-list <(100-500)|expanded WORD> [seq (1-4294967295)] <deny|permit> LINE...",
+ "bgp extcommunity-list <(100-500)|expanded WORD> [seq (0-4294967295)] <deny|permit> LINE...",
BGP_STR
EXTCOMMUNITY_LIST_STR
"Extended Community list number (expanded)\n"
@@ -19819,7 +19819,7 @@ DEFUN (extcommunity_list_name_expanded,
argv_find(argv, argc, "WORD", &idx);
cl_number_or_name = argv[idx]->arg;
- if (argv_find(argv, argc, "(1-4294967295)", &idx))
+ if (argv_find(argv, argc, "(0-4294967295)", &idx))
seq = argv[idx]->arg;
direct = argv_find(argv, argc, "permit", &idx) ? COMMUNITY_PERMIT
@@ -19842,7 +19842,7 @@ DEFUN (extcommunity_list_name_expanded,
DEFUN (no_extcommunity_list_standard_all,
no_bgp_extcommunity_list_standard_all_cmd,
- "no bgp extcommunity-list <(1-99)|standard WORD> [seq (1-4294967295)] <deny|permit> AA:NN...",
+ "no bgp extcommunity-list <(1-99)|standard WORD> [seq (0-4294967295)] <deny|permit> AA:NN...",
NO_STR
BGP_STR
EXTCOMMUNITY_LIST_STR
@@ -19862,7 +19862,7 @@ DEFUN (no_extcommunity_list_standard_all,
char *seq = NULL;
int idx = 0;
- if (argv_find(argv, argc, "(1-4294967295)", &idx))
+ if (argv_find(argv, argc, "(0-4294967295)", &idx))
seq = argv[idx]->arg;
idx = 0;
@@ -19906,7 +19906,7 @@ ALIAS(no_extcommunity_list_standard_all,
DEFUN (no_extcommunity_list_expanded_all,
no_bgp_extcommunity_list_expanded_all_cmd,
- "no bgp extcommunity-list <(100-500)|expanded WORD> [seq (1-4294967295)] <deny|permit> LINE...",
+ "no bgp extcommunity-list <(100-500)|expanded WORD> [seq (0-4294967295)] <deny|permit> LINE...",
NO_STR
BGP_STR
EXTCOMMUNITY_LIST_STR
@@ -19926,7 +19926,7 @@ DEFUN (no_extcommunity_list_expanded_all,
char *seq = NULL;
int idx = 0;
- if (argv_find(argv, argc, "(1-4294967295)", &idx))
+ if (argv_find(argv, argc, "(0-4294967295)", &idx))
seq = argv[idx]->arg;
idx = 0;
diff --git a/doc/developer/building-frr-for-ubuntu2004.rst b/doc/developer/building-frr-for-ubuntu2004.rst
index f7f8c63e5b..ef5d8da551 100644
--- a/doc/developer/building-frr-for-ubuntu2004.rst
+++ b/doc/developer/building-frr-for-ubuntu2004.rst
@@ -27,7 +27,7 @@ ubuntu apt repositories; in order to install it:
.. code-block:: shell
- curl https://bootstrap.pypa.io/get-pip.py --output get-pip.py
+ curl https://bootstrap.pypa.io/2.7/get-pip.py --output get-pip.py
sudo python2 ./get-pip.py
# And verify the installation
diff --git a/doc/user/isisd.rst b/doc/user/isisd.rst
index 457f841118..ebcf21b04c 100644
--- a/doc/user/isisd.rst
+++ b/doc/user/isisd.rst
@@ -121,17 +121,23 @@ ISIS Timer
ISIS Fast-Reroute
=================
+Unless stated otherwise, commands in this section apply to all LFA
+flavors (local LFA, Remote LFA and TI-LFA).
+
.. clicmd:: spf prefix-priority [critical | high | medium] WORD
Assign a priority to the prefixes that match the specified access-list.
+ By default loopback prefixes have medium priority and non-loopback prefixes
+ have low priority.
+
.. clicmd:: fast-reroute priority-limit [critical | high | medium] [level-1 | level-2]
Limit LFA backup computation up to the specified prefix priority.
.. clicmd:: fast-reroute lfa tiebreaker [downstream | lowest-backup-metric | node-protecting] index (1-255) [level-1 | level-2]
- Configure a tie-breaker for multiple LFA backups. Lower indexes are
+ Configure a tie-breaker for multiple local LFA backups. Lower indexes are
processed first.
.. clicmd:: fast-reroute load-sharing disable [level-1 | level-2]
@@ -140,8 +146,8 @@ ISIS Fast-Reroute
.. clicmd:: fast-reroute remote-lfa prefix-list [WORD] [level-1 | level-2]
- Configure a prefix-list to select eligible PQ nodes (valid for all protected
- interfaces).
+ Configure a prefix-list to select eligible PQ nodes for remote LFA
+ backups (valid for all protected interfaces).
.. _isis-region:
@@ -239,15 +245,11 @@ ISIS interface
.. clicmd:: isis fast-reroute lfa [level-1 | level-2]
- Enable per-prefix LFA fast reroute link protection.
+ Enable per-prefix local LFA fast reroute link protection.
.. clicmd:: isis fast-reroute lfa [level-1 | level-2] exclude interface IFNAME
- Exclude an interface from the LFA backup nexthop computation.
-
-.. clicmd:: isis fast-reroute ti-lfa [level-1|level-2] [node-protection]
-
- Enable per-prefix TI-LFA fast reroute link or node protection.
+ Exclude an interface from the local LFA backup nexthop computation.
.. clicmd:: isis fast-reroute remote-lfa tunnel mpls-ldp [level-1 | level-2]
@@ -259,6 +261,10 @@ ISIS interface
Limit Remote LFA PQ node selection within the specified metric.
+.. clicmd:: isis fast-reroute ti-lfa [level-1|level-2] [node-protection]
+
+ Enable per-prefix TI-LFA fast reroute link or node protection.
+
.. _showing-isis-information:
diff --git a/doc/user/ospf6d.rst b/doc/user/ospf6d.rst
index 6f8ac978de..00571487d7 100644
--- a/doc/user/ospf6d.rst
+++ b/doc/user/ospf6d.rst
@@ -229,6 +229,15 @@ Showing OSPF6 information
Interface name can also be given. JSON output can be obtained by appending
'json' to the end of command.
+.. index:: show ipv6 ospf6 spf tree [json]
+.. clicmd:: show ipv6 ospf6 spf tree [json]
+
+ This commands shows the spf tree from the recent spf calculation with the
+ calling router as the root. If json is appended in the end, we can get the
+ tree in JSON format. Each area that the router belongs to has it's own
+ JSON object, with each router having "cost", "isLeafNode" and "children" as
+ arguments.
+
OSPF6 Configuration Examples
============================
diff --git a/isisd/isis_lfa.c b/isisd/isis_lfa.c
index 5b3a3827a2..3ebac8aaa9 100644
--- a/isisd/isis_lfa.c
+++ b/isisd/isis_lfa.c
@@ -541,10 +541,16 @@ static int tilfa_repair_list_apply(struct isis_spftree *spftree,
struct isis_spf_adj *sadj = vadj->sadj;
struct mpls_label_stack *label_stack;
+ /*
+ * Don't try to apply the repair list if one was already applied
+ * before (can't have ECMP past the P-node).
+ */
+ if (vadj->label_stack)
+ continue;
+
if (!isis_vertex_adj_exists(spftree, vertex_pnode, sadj))
continue;
- assert(!vadj->label_stack);
label_stack = tilfa_compute_label_stack(spftree->lspdb, sadj,
repair_list);
if (!label_stack) {
@@ -663,6 +669,21 @@ static int tilfa_build_repair_list(struct isis_spftree *spftree_pc,
if ((!is_qnode
|| spftree_pc->lfa.protected_resource.type == LFA_NODE_PROTECTION)
&& vertex_child) {
+ /*
+ * If vertex is the penultimate hop router, then pushing an
+ * Adj-SID towards the final hop means that the No-PHP flag of
+ * the original Prefix-SID must be honored. We do that by
+ * removing the previously added Prefix-SID from the repair list
+ * when those conditions are met.
+ */
+ if (vertex->depth == (vertex_dest->depth - 2)
+ && VTYPE_IP(vertex_dest->type)
+ && vertex_dest->N.ip.sr.present
+ && !CHECK_FLAG(vertex_dest->N.ip.sr.sid.flags,
+ ISIS_PREFIX_SID_NO_PHP)) {
+ list_delete_all_node(repair_list);
+ }
+
label_qnode = tilfa_find_qnode_adj_sid(spftree_pc, vertex->N.id,
vertex_child->N.id);
if (label_qnode == MPLS_INVALID_LABEL) {
diff --git a/lib/nexthop.c b/lib/nexthop.c
index b2fa945690..dd8c108205 100644
--- a/lib/nexthop.c
+++ b/lib/nexthop.c
@@ -739,6 +739,16 @@ static ssize_t printfrr_nh(char *buf, size_t bsz, const char *fmt,
const char *s, *v_is = "", *v_via = "", *v_viaif = "via ";
ssize_t ret = 3;
+ /* NULL-check */
+ if (nexthop == NULL) {
+ if (fmt[2] == 'v' && fmt[3] == 'v')
+ ret++;
+
+ strlcpy(buf, "NULL", bsz);
+
+ return ret;
+ }
+
switch (fmt[2]) {
case 'v':
if (fmt[3] == 'v') {
diff --git a/lib/northbound_cli.c b/lib/northbound_cli.c
index 1416b758d8..a2c8bc8633 100644
--- a/lib/northbound_cli.c
+++ b/lib/northbound_cli.c
@@ -1906,7 +1906,6 @@ void nb_cli_init(struct thread_master *tm)
if (frr_get_cli_mode() == FRR_CLI_TRANSACTIONAL) {
install_element(ENABLE_NODE, &config_exclusive_cmd);
install_element(ENABLE_NODE, &config_private_cmd);
- install_element(ENABLE_NODE, &show_config_running_cmd);
install_element(ENABLE_NODE,
&show_config_compare_without_candidate_cmd);
install_element(ENABLE_NODE, &show_config_transaction_cmd);
@@ -1919,6 +1918,7 @@ void nb_cli_init(struct thread_master *tm)
}
/* Other commands. */
+ install_element(ENABLE_NODE, &show_config_running_cmd);
install_element(CONFIG_NODE, &yang_module_translator_load_cmd);
install_element(CONFIG_NODE, &yang_module_translator_unload_cmd);
install_element(ENABLE_NODE, &show_yang_operational_data_cmd);
diff --git a/lib/prefix.c b/lib/prefix.c
index c98e0c1c72..5e5c2d89a8 100644
--- a/lib/prefix.c
+++ b/lib/prefix.c
@@ -1366,7 +1366,11 @@ static ssize_t printfrr_ea(char *buf, size_t bsz, const char *fmt,
{
const struct ethaddr *mac = ptr;
- prefix_mac2str(mac, buf, bsz);
+ if (mac)
+ prefix_mac2str(mac, buf, bsz);
+ else
+ strlcpy(buf, "NULL", bsz);
+
return 2;
}
@@ -1376,7 +1380,11 @@ static ssize_t printfrr_ia(char *buf, size_t bsz, const char *fmt,
{
const struct ipaddr *ipa = ptr;
- ipaddr2str(ipa, buf, bsz);
+ if (ipa)
+ ipaddr2str(ipa, buf, bsz);
+ else
+ strlcpy(buf, "NULL", bsz);
+
return 2;
}
@@ -1384,7 +1392,11 @@ printfrr_ext_autoreg_p("I4", printfrr_i4)
static ssize_t printfrr_i4(char *buf, size_t bsz, const char *fmt,
int prec, const void *ptr)
{
- inet_ntop(AF_INET, ptr, buf, bsz);
+ if (ptr)
+ inet_ntop(AF_INET, ptr, buf, bsz);
+ else
+ strlcpy(buf, "NULL", bsz);
+
return 2;
}
@@ -1392,7 +1404,11 @@ printfrr_ext_autoreg_p("I6", printfrr_i6)
static ssize_t printfrr_i6(char *buf, size_t bsz, const char *fmt,
int prec, const void *ptr)
{
- inet_ntop(AF_INET6, ptr, buf, bsz);
+ if (ptr)
+ inet_ntop(AF_INET6, ptr, buf, bsz);
+ else
+ strlcpy(buf, "NULL", bsz);
+
return 2;
}
@@ -1400,7 +1416,11 @@ printfrr_ext_autoreg_p("FX", printfrr_pfx)
static ssize_t printfrr_pfx(char *buf, size_t bsz, const char *fmt,
int prec, const void *ptr)
{
- prefix2str(ptr, buf, bsz);
+ if (ptr)
+ prefix2str(ptr, buf, bsz);
+ else
+ strlcpy(buf, "NULL", bsz);
+
return 2;
}
@@ -1411,16 +1431,22 @@ static ssize_t printfrr_psg(char *buf, size_t bsz, const char *fmt,
const struct prefix_sg *sg = ptr;
struct fbuf fb = { .buf = buf, .pos = buf, .len = bsz - 1 };
- if (sg->src.s_addr == INADDR_ANY)
- bprintfrr(&fb, "(*,");
- else
- bprintfrr(&fb, "(%pI4,", &sg->src);
+ if (sg) {
+ if (sg->src.s_addr == INADDR_ANY)
+ bprintfrr(&fb, "(*,");
+ else
+ bprintfrr(&fb, "(%pI4,", &sg->src);
- if (sg->grp.s_addr == INADDR_ANY)
- bprintfrr(&fb, "*)");
- else
- bprintfrr(&fb, "%pI4)", &sg->grp);
+ if (sg->grp.s_addr == INADDR_ANY)
+ bprintfrr(&fb, "*)");
+ else
+ bprintfrr(&fb, "%pI4)", &sg->grp);
+
+ fb.pos[0] = '\0';
+
+ } else {
+ strlcpy(buf, "NULL", bsz);
+ }
- fb.pos[0] = '\0';
return 3;
}
diff --git a/lib/sockunion.c b/lib/sockunion.c
index 1dbf77efa4..c701da1e03 100644
--- a/lib/sockunion.c
+++ b/lib/sockunion.c
@@ -673,39 +673,44 @@ static ssize_t printfrr_psu(char *buf, size_t bsz, const char *fmt,
bool endflags = false;
ssize_t consumed = 2;
- while (!endflags) {
- switch (fmt[consumed++]) {
- case 'p':
- include_port = true;
+ if (su) {
+ while (!endflags) {
+ switch (fmt[consumed++]) {
+ case 'p':
+ include_port = true;
+ break;
+ default:
+ consumed--;
+ endflags = true;
+ break;
+ }
+ };
+
+ switch (sockunion_family(su)) {
+ case AF_UNSPEC:
+ bprintfrr(&fb, "(unspec)");
break;
- default:
- consumed--;
- endflags = true;
+ case AF_INET:
+ inet_ntop(AF_INET, &su->sin.sin_addr, buf, bsz);
+ fb.pos += strlen(fb.buf);
+ if (include_port)
+ bprintfrr(&fb, ":%d", su->sin.sin_port);
+ break;
+ case AF_INET6:
+ inet_ntop(AF_INET6, &su->sin6.sin6_addr, buf, bsz);
+ fb.pos += strlen(fb.buf);
+ if (include_port)
+ bprintfrr(&fb, ":%d", su->sin6.sin6_port);
break;
+ default:
+ bprintfrr(&fb, "(af %d)", sockunion_family(su));
}
- };
- switch (sockunion_family(su)) {
- case AF_UNSPEC:
- bprintfrr(&fb, "(unspec)");
- break;
- case AF_INET:
- inet_ntop(AF_INET, &su->sin.sin_addr, buf, bsz);
- fb.pos += strlen(fb.buf);
- if (include_port)
- bprintfrr(&fb, ":%d", su->sin.sin_port);
- break;
- case AF_INET6:
- inet_ntop(AF_INET6, &su->sin6.sin6_addr, buf, bsz);
- fb.pos += strlen(fb.buf);
- if (include_port)
- bprintfrr(&fb, ":%d", su->sin6.sin6_port);
- break;
- default:
- bprintfrr(&fb, "(af %d)", sockunion_family(su));
+ fb.pos[0] = '\0';
+ } else {
+ strlcpy(buf, "NULL", bsz);
}
- fb.pos[0] = '\0';
return consumed;
}
diff --git a/lib/srcdest_table.c b/lib/srcdest_table.c
index 8ffa0e9709..ef82b7ac01 100644
--- a/lib/srcdest_table.c
+++ b/lib/srcdest_table.c
@@ -313,8 +313,13 @@ static ssize_t printfrr_rn(char *buf, size_t bsz, const char *fmt,
const struct route_node *rn = ptr;
const struct prefix *dst_p, *src_p;
- srcdest_rnode_prefixes(rn, &dst_p, &src_p);
- srcdest2str(dst_p, (const struct prefix_ipv6 *)src_p, buf, bsz);
+ if (rn) {
+ srcdest_rnode_prefixes(rn, &dst_p, &src_p);
+ srcdest2str(dst_p, (const struct prefix_ipv6 *)src_p, buf, bsz);
+ } else {
+ strlcpy(buf, "NULL", bsz);
+ }
+
return 2;
}
diff --git a/lib/thread.c b/lib/thread.c
index 5c06c6ddb5..af01c75a44 100644
--- a/lib/thread.c
+++ b/lib/thread.c
@@ -45,6 +45,16 @@ DEFINE_MTYPE_STATIC(LIB, THREAD_STATS, "Thread stats")
DECLARE_LIST(thread_list, struct thread, threaditem)
+struct cancel_req {
+ int flags;
+ struct thread *thread;
+ void *eventobj;
+ struct thread **threadref;
+};
+
+/* Flags for task cancellation */
+#define THREAD_CANCEL_FLAG_READY 0x01
+
static int thread_timer_cmp(const struct thread *a, const struct thread *b)
{
if (a->u.sands.tv_sec < b->u.sands.tv_sec)
@@ -1050,21 +1060,29 @@ struct thread *_thread_add_event(const struct xref_threadsched *xref,
* - POLLIN
* - POLLOUT
*/
-static void thread_cancel_rw(struct thread_master *master, int fd, short state)
+static void thread_cancel_rw(struct thread_master *master, int fd, short state,
+ int idx_hint)
{
bool found = false;
- /* Cancel POLLHUP too just in case some bozo set it */
- state |= POLLHUP;
-
/* find the index of corresponding pollfd */
nfds_t i;
- for (i = 0; i < master->handler.pfdcount; i++)
- if (master->handler.pfds[i].fd == fd) {
- found = true;
- break;
- }
+ /* Cancel POLLHUP too just in case some bozo set it */
+ state |= POLLHUP;
+
+ /* Some callers know the index of the pfd already */
+ if (idx_hint >= 0) {
+ i = idx_hint;
+ found = true;
+ } else {
+ /* Have to look for the fd in the pfd array */
+ for (i = 0; i < master->handler.pfdcount; i++)
+ if (master->handler.pfds[i].fd == fd) {
+ found = true;
+ break;
+ }
+ }
if (!found) {
zlog_debug(
@@ -1104,6 +1122,95 @@ static void thread_cancel_rw(struct thread_master *master, int fd, short state)
}
}
+/*
+ * Process task cancellation given a task argument: iterate through the
+ * various lists of tasks, looking for any that match the argument.
+ */
+static void cancel_arg_helper(struct thread_master *master,
+ const struct cancel_req *cr)
+{
+ struct thread *t;
+ nfds_t i;
+ int fd;
+ struct pollfd *pfd;
+
+ /* We're only processing arg-based cancellations here. */
+ if (cr->eventobj == NULL)
+ return;
+
+ /* First process the ready lists. */
+ frr_each_safe(thread_list, &master->event, t) {
+ if (t->arg != cr->eventobj)
+ continue;
+ thread_list_del(&master->event, t);
+ if (t->ref)
+ *t->ref = NULL;
+ thread_add_unuse(master, t);
+ }
+
+ frr_each_safe(thread_list, &master->ready, t) {
+ if (t->arg != cr->eventobj)
+ continue;
+ thread_list_del(&master->ready, t);
+ if (t->ref)
+ *t->ref = NULL;
+ thread_add_unuse(master, t);
+ }
+
+ /* If requested, stop here and ignore io and timers */
+ if (CHECK_FLAG(cr->flags, THREAD_CANCEL_FLAG_READY))
+ return;
+
+ /* Check the io tasks */
+ for (i = 0; i < master->handler.pfdcount;) {
+ pfd = master->handler.pfds + i;
+
+ if (pfd->events & POLLIN)
+ t = master->read[pfd->fd];
+ else
+ t = master->write[pfd->fd];
+
+ if (t && t->arg == cr->eventobj) {
+ fd = pfd->fd;
+
+ /* Found a match to cancel: clean up fd arrays */
+ thread_cancel_rw(master, pfd->fd, pfd->events, i);
+
+ /* Clean up thread arrays */
+ master->read[fd] = NULL;
+ master->write[fd] = NULL;
+
+ /* Clear caller's ref */
+ if (t->ref)
+ *t->ref = NULL;
+
+ thread_add_unuse(master, t);
+
+ /* Don't increment 'i' since the cancellation will have
+ * removed the entry from the pfd array
+ */
+ } else
+ i++;
+ }
+
+ /* Check the timer tasks */
+ t = thread_timer_list_first(&master->timer);
+ while (t) {
+ struct thread *t_next;
+
+ t_next = thread_timer_list_next(&master->timer, t);
+
+ if (t->arg == cr->eventobj) {
+ thread_timer_list_del(&master->timer, t);
+ if (t->ref)
+ *t->ref = NULL;
+ thread_add_unuse(master, t);
+ }
+
+ t = t_next;
+ }
+}
+
/**
* Process cancellation requests.
*
@@ -1122,31 +1229,12 @@ static void do_thread_cancel(struct thread_master *master)
struct listnode *ln;
for (ALL_LIST_ELEMENTS_RO(master->cancel_req, ln, cr)) {
/*
- * If this is an event object cancellation, linear search
- * through event list deleting any events which have the
- * specified argument. We also need to check every thread
- * in the ready queue.
+ * If this is an event object cancellation, search
+ * through task lists deleting any tasks which have the
+ * specified argument - use this handy helper function.
*/
if (cr->eventobj) {
- struct thread *t;
-
- frr_each_safe(thread_list, &master->event, t) {
- if (t->arg != cr->eventobj)
- continue;
- thread_list_del(&master->event, t);
- if (t->ref)
- *t->ref = NULL;
- thread_add_unuse(master, t);
- }
-
- frr_each_safe(thread_list, &master->ready, t) {
- if (t->arg != cr->eventobj)
- continue;
- thread_list_del(&master->ready, t);
- if (t->ref)
- *t->ref = NULL;
- thread_add_unuse(master, t);
- }
+ cancel_arg_helper(master, cr);
continue;
}
@@ -1164,11 +1252,11 @@ static void do_thread_cancel(struct thread_master *master)
/* Determine the appropriate queue to cancel the thread from */
switch (thread->type) {
case THREAD_READ:
- thread_cancel_rw(master, thread->u.fd, POLLIN);
+ thread_cancel_rw(master, thread->u.fd, POLLIN, -1);
thread_array = master->read;
break;
case THREAD_WRITE:
- thread_cancel_rw(master, thread->u.fd, POLLOUT);
+ thread_cancel_rw(master, thread->u.fd, POLLOUT, -1);
thread_array = master->write;
break;
case THREAD_TIMER:
@@ -1206,6 +1294,30 @@ static void do_thread_cancel(struct thread_master *master)
pthread_cond_broadcast(&master->cancel_cond);
}
+/*
+ * Helper function used for multiple flavors of arg-based cancellation.
+ */
+static void cancel_event_helper(struct thread_master *m, void *arg, int flags)
+{
+ struct cancel_req *cr;
+
+ assert(m->owner == pthread_self());
+
+ /* Only worth anything if caller supplies an arg. */
+ if (arg == NULL)
+ return;
+
+ cr = XCALLOC(MTYPE_TMP, sizeof(struct cancel_req));
+
+ cr->flags = flags;
+
+ frr_with_mutex(&m->mtx) {
+ cr->eventobj = arg;
+ listnode_add(m->cancel_req, cr);
+ do_thread_cancel(m);
+ }
+}
+
/**
* Cancel any events which have the specified argument.
*
@@ -1216,15 +1328,22 @@ static void do_thread_cancel(struct thread_master *master)
*/
void thread_cancel_event(struct thread_master *master, void *arg)
{
- assert(master->owner == pthread_self());
+ cancel_event_helper(master, arg, 0);
+}
- frr_with_mutex(&master->mtx) {
- struct cancel_req *cr =
- XCALLOC(MTYPE_TMP, sizeof(struct cancel_req));
- cr->eventobj = arg;
- listnode_add(master->cancel_req, cr);
- do_thread_cancel(master);
- }
+/*
+ * Cancel ready tasks with an arg matching 'arg'
+ *
+ * MT-Unsafe
+ *
+ * @param m the thread_master to cancel from
+ * @param arg the argument passed when creating the event
+ */
+void thread_cancel_event_ready(struct thread_master *m, void *arg)
+{
+
+ /* Only cancel ready/event tasks */
+ cancel_event_helper(m, arg, THREAD_CANCEL_FLAG_READY);
}
/**
diff --git a/lib/thread.h b/lib/thread.h
index b23c6a9865..1777a6b6f7 100644
--- a/lib/thread.h
+++ b/lib/thread.h
@@ -46,8 +46,8 @@ PREDECL_HEAP(thread_timer_list)
struct fd_handler {
/* number of pfd that fit in the allocated space of pfds. This is a
- * constant
- * and is the same for both pfds and copy. */
+ * constant and is the same for both pfds and copy.
+ */
nfds_t pfdsize;
/* file descriptors to monitor for i/o */
@@ -61,12 +61,6 @@ struct fd_handler {
nfds_t copycount;
};
-struct cancel_req {
- struct thread *thread;
- void *eventobj;
- struct thread **threadref;
-};
-
struct xref_threadsched {
struct xref xref;
@@ -240,7 +234,10 @@ extern void _thread_execute(const struct xref_threadsched *xref,
extern void thread_cancel(struct thread **event);
extern void thread_cancel_async(struct thread_master *, struct thread **,
void *);
-extern void thread_cancel_event(struct thread_master *, void *);
+/* Cancel ready tasks with an arg matching 'arg' */
+extern void thread_cancel_event_ready(struct thread_master *m, void *arg);
+/* Cancel all tasks with an arg matching 'arg', including timers and io */
+extern void thread_cancel_event(struct thread_master *m, void *arg);
extern struct thread *thread_fetch(struct thread_master *, struct thread *);
extern void thread_call(struct thread *);
extern unsigned long thread_timer_remain_second(struct thread *);
diff --git a/ospf6d/ospf6_area.c b/ospf6d/ospf6_area.c
index 778bcb9a45..898567b4f0 100644
--- a/ospf6d/ospf6_area.c
+++ b/ospf6d/ospf6_area.c
@@ -44,6 +44,7 @@
#include "ospf6_abr.h"
#include "ospf6_asbr.h"
#include "ospf6d.h"
+#include "lib/json.h"
DEFINE_MTYPE_STATIC(OSPF6D, OSPF6_PLISTNAME, "Prefix list name")
@@ -850,12 +851,13 @@ DEFUN (no_area_export_list,
DEFUN (show_ipv6_ospf6_spf_tree,
show_ipv6_ospf6_spf_tree_cmd,
- "show ipv6 ospf6 spf tree",
+ "show ipv6 ospf6 spf tree [json]",
SHOW_STR
IP6_STR
OSPF6_STR
"Shortest Path First calculation\n"
- "Show SPF tree\n")
+ "Show SPF tree\n"
+ JSON_STR)
{
struct listnode *node;
struct ospf6_area *oa;
@@ -863,20 +865,52 @@ DEFUN (show_ipv6_ospf6_spf_tree,
struct ospf6_route *route;
struct prefix prefix;
struct ospf6 *ospf6;
+ json_object *json = NULL;
+ json_object *json_area = NULL;
+ json_object *json_head = NULL;
+ bool uj = use_json(argc, argv);
ospf6 = ospf6_lookup_by_vrf_name(VRF_DEFAULT_NAME);
OSPF6_CMD_CHECK_RUNNING(ospf6);
+
+ if (uj)
+ json = json_object_new_object();
ospf6_linkstate_prefix(ospf6->router_id, htonl(0), &prefix);
for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, node, oa)) {
+ if (uj) {
+ json_area = json_object_new_object();
+ json_head = json_object_new_object();
+ }
route = ospf6_route_lookup(&prefix, oa->spf_table);
if (route == NULL) {
- vty_out(vty, "LS entry for root not found in area %s\n",
- oa->name);
+ if (uj) {
+ json_object_string_add(
+ json, oa->name,
+ "LS entry for not not found");
+ json_object_free(json_head);
+ json_object_free(json_area);
+ } else
+ vty_out(vty,
+ "LS entry for root not found in area %s\n",
+ oa->name);
continue;
}
root = (struct ospf6_vertex *)route->route_option;
- ospf6_spf_display_subtree(vty, "", 0, root);
+ ospf6_spf_display_subtree(vty, "", 0, root, json_head, uj);
+
+ if (uj) {
+ json_object_object_add(json_area, root->name,
+ json_head);
+ json_object_object_add(json, oa->name, json_area);
+ }
+ }
+
+ if (uj) {
+ vty_out(vty, "%s\n",
+ json_object_to_json_string_ext(
+ json, JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
}
return CMD_SUCCESS;
@@ -924,7 +958,7 @@ DEFUN (show_ipv6_ospf6_area_spf_tree,
return CMD_SUCCESS;
}
root = (struct ospf6_vertex *)route->route_option;
- ospf6_spf_display_subtree(vty, "", 0, root);
+ ospf6_spf_display_subtree(vty, "", 0, root, NULL, false);
return CMD_SUCCESS;
}
@@ -985,7 +1019,7 @@ DEFUN (show_ipv6_ospf6_simulate_spf_tree_root,
return CMD_SUCCESS;
}
root = (struct ospf6_vertex *)route->route_option;
- ospf6_spf_display_subtree(vty, "", 0, root);
+ ospf6_spf_display_subtree(vty, "", 0, root, NULL, false);
ospf6_spf_table_finish(spf_table);
ospf6_route_table_delete(spf_table);
diff --git a/ospf6d/ospf6_spf.c b/ospf6d/ospf6_spf.c
index f94252991c..121e846843 100644
--- a/ospf6d/ospf6_spf.c
+++ b/ospf6d/ospf6_spf.c
@@ -649,14 +649,10 @@ static int ospf6_spf_calculation_thread(struct thread *t)
ospf6_spf_reason_string(ospf6->spf_reason, rbuf, sizeof(rbuf));
if (IS_OSPF6_DEBUG_SPF(PROCESS) || IS_OSPF6_DEBUG_SPF(TIME))
- zlog_debug("SPF runtime: %lld sec %lld usec",
- (long long)runtime.tv_sec,
- (long long)runtime.tv_usec);
-
- zlog_info(
- "SPF processing: # Areas: %d, SPF runtime: %lld sec %lld usec, Reason: %s",
- areas_processed, (long long)runtime.tv_sec,
- (long long)runtime.tv_usec, rbuf);
+ zlog_debug(
+ "SPF processing: # Areas: %d, SPF runtime: %lld sec %lld usec, Reason: %s",
+ areas_processed, (long long)runtime.tv_sec,
+ (long long)runtime.tv_usec, rbuf);
ospf6->last_spf_reason = ospf6->spf_reason;
ospf6_reset_spf_reason(ospf6);
@@ -718,9 +714,7 @@ void ospf6_spf_schedule(struct ospf6 *ospf6, unsigned int reason)
}
if (IS_OSPF6_DEBUG_SPF(PROCESS) || IS_OSPF6_DEBUG_SPF(TIME))
- zlog_debug("SPF: calculation timer delay = %ld", delay);
-
- zlog_info("SPF: Scheduled in %ld msec", delay);
+ zlog_debug("SPF: Rescheduling in %ld msec", delay);
ospf6->t_spf_calc = NULL;
thread_add_timer_msec(master, ospf6_spf_calculation_thread, ospf6,
@@ -728,16 +722,24 @@ void ospf6_spf_schedule(struct ospf6 *ospf6, unsigned int reason)
}
void ospf6_spf_display_subtree(struct vty *vty, const char *prefix, int rest,
- struct ospf6_vertex *v)
+ struct ospf6_vertex *v, json_object *json_obj,
+ bool use_json)
{
struct listnode *node, *nnode;
struct ospf6_vertex *c;
char *next_prefix;
int len;
int restnum;
+ json_object *json_childs = NULL;
+ json_object *json_child = NULL;
- /* "prefix" is the space prefix of the display line */
- vty_out(vty, "%s+-%s [%d]\n", prefix, v->name, v->cost);
+ if (use_json) {
+ json_childs = json_object_new_object();
+ json_object_int_add(json_obj, "cost", v->cost);
+ } else {
+ /* "prefix" is the space prefix of the display line */
+ vty_out(vty, "%s+-%s [%d]\n", prefix, v->name, v->cost);
+ }
len = strlen(prefix) + 4;
next_prefix = (char *)malloc(len);
@@ -749,10 +751,27 @@ void ospf6_spf_display_subtree(struct vty *vty, const char *prefix, int rest,
restnum = listcount(v->child_list);
for (ALL_LIST_ELEMENTS(v->child_list, node, nnode, c)) {
- restnum--;
- ospf6_spf_display_subtree(vty, next_prefix, restnum, c);
- }
+ if (use_json)
+ json_child = json_object_new_object();
+ else
+ restnum--;
+
+ ospf6_spf_display_subtree(vty, next_prefix, restnum, c,
+ json_child, use_json);
+ if (use_json)
+ json_object_object_add(json_childs, c->name,
+ json_child);
+ }
+ if (use_json) {
+ json_object_boolean_add(json_obj, "isLeafNode",
+ !listcount(v->child_list));
+ if (listcount(v->child_list))
+ json_object_object_add(json_obj, "children",
+ json_childs);
+ else
+ json_object_free(json_childs);
+ }
free(next_prefix);
}
diff --git a/ospf6d/ospf6_spf.h b/ospf6d/ospf6_spf.h
index 253888d8ce..36e2b27912 100644
--- a/ospf6d/ospf6_spf.h
+++ b/ospf6d/ospf6_spf.h
@@ -23,6 +23,7 @@
#include "typesafe.h"
#include "ospf6_top.h"
+#include "lib/json.h"
/* Debug option */
extern unsigned char conf_debug_ospf6_spf;
@@ -147,7 +148,8 @@ extern void ospf6_spf_calculation(uint32_t router_id,
extern void ospf6_spf_schedule(struct ospf6 *ospf, unsigned int reason);
extern void ospf6_spf_display_subtree(struct vty *vty, const char *prefix,
- int rest, struct ospf6_vertex *v);
+ int rest, struct ospf6_vertex *v,
+ json_object *json_obj, bool use_json);
extern void ospf6_spf_config_write(struct vty *vty, struct ospf6 *ospf6);
extern int config_write_ospf6_debug_spf(struct vty *vty);
diff --git a/pimd/pim_nb_config.c b/pimd/pim_nb_config.c
index 11cfe09964..e933207c71 100644
--- a/pimd/pim_nb_config.c
+++ b/pimd/pim_nb_config.c
@@ -1181,7 +1181,7 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_ms
case NB_EV_APPLY:
vrf = nb_running_get_entry(args->dnode, NULL, true);
pim = vrf->info;
- mesh_group_name = yang_dnode_get_string(args->dnode, ".");
+ mesh_group_name = yang_dnode_get_string(args->dnode, "mesh-group-name");
result = ip_no_msdp_mesh_group_cmd_worker(pim, mesh_group_name,
args->errmsg,
@@ -2411,7 +2411,7 @@ int lib_interface_pim_address_family_mroute_oif_modify(
}
#ifdef PIM_ENFORCE_LOOPFREE_MFC
- if (iif->ifindex == oif->ifindex) {
+ if (oif && iif && (iif->ifindex == oif->ifindex)) {
strlcpy(args->errmsg,
"% IIF same as OIF and loopfree enforcement is enabled; rejecting",
args->errmsg_len);
@@ -2716,11 +2716,11 @@ int lib_interface_igmp_igmp_enable_modify(struct nb_cb_modify_args *args)
switch (args->event) {
case NB_EV_VALIDATE:
if_dnode = yang_dnode_get_parent(args->dnode, "interface");
- ifp_name = yang_dnode_get_string(if_dnode, ".");
mcast_if_count =
yang_get_list_elements_count(if_dnode);
/* Limiting mcast interfaces to number of VIFs */
if (mcast_if_count == MAXVIFS) {
+ ifp_name = yang_dnode_get_string(if_dnode, "name");
snprintf(args->errmsg, args->errmsg_len,
"Max multicast interfaces(%d) Reached. Could not enable IGMP on interface %s",
MAXVIFS, ifp_name);
@@ -2991,7 +2991,7 @@ int lib_interface_igmp_address_family_static_group_create(
case NB_EV_VALIDATE:
if_dnode = yang_dnode_get_parent(args->dnode, "interface");
if (!is_pim_interface(if_dnode)) {
- ifp_name = yang_dnode_get_string(if_dnode, ".");
+ ifp_name = yang_dnode_get_string(if_dnode, "name");
snprintf(args->errmsg, args->errmsg_len,
"multicast not enabled on interface %s",
ifp_name);
diff --git a/tests/isisd/test_isis_spf.refout b/tests/isisd/test_isis_spf.refout
index 024f7256e0..bdd5b2e439 100644
--- a/tests/isisd/test_isis_spf.refout
+++ b/tests/isisd/test_isis_spf.refout
@@ -3146,9 +3146,9 @@ rt3 TE-IS 50 rt5 - rt5(4)
IS-IS L1 IPv4 routing table:
- Prefix Metric Interface Nexthop Label(s)
- -----------------------------------------------------------
- 10.0.255.3/32 60 - rt5 16050/18/16030
+ Prefix Metric Interface Nexthop Label(s)
+ -----------------------------------------------------
+ 10.0.255.3/32 60 - rt5 16050/18
P-space (self):
rt2
@@ -3194,9 +3194,9 @@ rt3 TE-IS 50 rt5 - rt5(4)
IS-IS L1 IPv6 routing table:
- Prefix Metric Interface Nexthop Label(s)
- -------------------------------------------------------------
- 2001:db8::3/128 60 - rt5 16051/19/16031
+ Prefix Metric Interface Nexthop Label(s)
+ -------------------------------------------------------
+ 2001:db8::3/128 60 - rt5 16051/19
test# test isis topology 2 root rt1 ti-lfa system-id rt1 pseudonode-id 1
P-space (self):
@@ -3236,7 +3236,7 @@ IS-IS L1 IPv4 routing table:
Prefix Metric Interface Nexthop Label(s)
-----------------------------------------------------------
- 10.0.255.4/32 65 - rt2 16020/18/16040
+ 10.0.255.4/32 65 - rt2 16020/18
10.0.255.5/32 75 - rt2 16020/18/16050
10.0.255.6/32 75 - rt2 16020/18/16060
@@ -3277,7 +3277,7 @@ IS-IS L1 IPv6 routing table:
Prefix Metric Interface Nexthop Label(s)
-------------------------------------------------------------
- 2001:db8::4/128 65 - rt2 16021/19/16041
+ 2001:db8::4/128 65 - rt2 16021/19
2001:db8::5/128 75 - rt2 16021/19/16051
2001:db8::6/128 75 - rt2 16021/19/16061
@@ -3508,7 +3508,7 @@ IS-IS L1 IPv4 routing table:
-----------------------------------------------------------
10.0.255.2/32 100 - rt3 16050/17/16020
10.0.255.4/32 90 - rt3 16050/17/16040
- 10.0.255.6/32 80 - rt3 16050/17/16060
+ 10.0.255.6/32 80 - rt3 16050/17
10.0.255.8/32 90 - rt3 16050/17/16080
test# test isis topology 4 root rt4 ti-lfa system-id rt6 ipv4-only
@@ -3553,7 +3553,7 @@ IS-IS L1 IPv4 routing table:
Prefix Metric Interface Nexthop Label(s)
-----------------------------------------------------------
- 10.0.255.6/32 100 - rt2 16050/17/16060
+ 10.0.255.6/32 100 - rt2 16050/17
10.0.255.8/32 110 - rt2 16050/17/16080
test# test isis topology 5 root rt1 ti-lfa system-id rt2 ipv4-only
@@ -3865,7 +3865,7 @@ IS-IS L1 IPv4 routing table:
10.0.255.1/32 100 - rt5 16110/17/16010
10.0.255.4/32 90 - rt5 16110/17/16040
10.0.255.7/32 80 - rt5 16110/17/16070
- 10.0.255.10/32 70 - rt5 16110/17/16100
+ 10.0.255.10/32 70 - rt5 16110/17
test# test isis topology 8 root rt2 ti-lfa system-id rt5 ipv4-only
P-space (self):
@@ -3979,9 +3979,9 @@ rt3 TE-IS 120 rt2 - rt4(4)
IS-IS L1 IPv4 routing table:
- Prefix Metric Interface Nexthop Label(s)
- -----------------------------------------------------------
- 10.0.255.3/32 130 - rt2 16040/18/16030
+ Prefix Metric Interface Nexthop Label(s)
+ -----------------------------------------------------
+ 10.0.255.3/32 130 - rt2 16040/18
P-space (self):
rt2
@@ -4030,9 +4030,9 @@ rt3 TE-IS 120 rt2 - rt4(4)
IS-IS L1 IPv6 routing table:
- Prefix Metric Interface Nexthop Label(s)
- -------------------------------------------------------------
- 2001:db8::3/128 130 - rt2 16041/19/16031
+ Prefix Metric Interface Nexthop Label(s)
+ -------------------------------------------------------
+ 2001:db8::3/128 130 - rt2 16041/19
test# test isis topology 9 root rt1 ti-lfa system-id rt2
P-space (self):
@@ -4079,7 +4079,7 @@ IS-IS L1 IPv4 routing table:
Prefix Metric Interface Nexthop Label(s)
-----------------------------------------------------------
10.0.255.2/32 130 - rt3 16030/18/16020
- 10.0.255.4/32 120 - rt3 16030/18/16040
+ 10.0.255.4/32 120 - rt3 16030/18
10.0.255.5/32 130 - rt3 16030/18/16050
10.0.255.6/32 150 - rt3 16030/18/16060
10.0.255.7/32 150 - rt3 16030/18/16070
@@ -4130,7 +4130,7 @@ IS-IS L1 IPv6 routing table:
Prefix Metric Interface Nexthop Label(s)
-------------------------------------------------------------
2001:db8::2/128 130 - rt3 16031/19/16021
- 2001:db8::4/128 120 - rt3 16031/19/16041
+ 2001:db8::4/128 120 - rt3 16031/19
2001:db8::5/128 130 - rt3 16031/19/16051
2001:db8::6/128 150 - rt3 16031/19/16061
2001:db8::7/128 150 - rt3 16031/19/16071
@@ -4213,9 +4213,9 @@ IS-IS L1 IPv4 routing table:
10.0.255.3/32 80 - rt6 16060/16/16030
- rt7 16070/16/16030
- rt8 16080/16/16030
- 10.0.255.4/32 50 - rt6 16060/16/16040
- - rt7 16070/16/16040
- - rt8 16080/16/16040
+ 10.0.255.4/32 50 - rt6 16060/16
+ - rt7 16070/16
+ - rt8 16080/16
10.0.255.5/32 60 - rt6 16060/16/16050
- rt7 16070/16/16050
- rt8 16080/16/16050
@@ -4295,9 +4295,9 @@ IS-IS L1 IPv6 routing table:
2001:db8::3/128 80 - rt6 16061/17/16031
- rt7 16071/17/16031
- rt8 16081/17/16031
- 2001:db8::4/128 50 - rt6 16061/17/16041
- - rt7 16071/17/16041
- - rt8 16081/17/16041
+ 2001:db8::4/128 50 - rt6 16061/17
+ - rt7 16071/17
+ - rt8 16081/17
2001:db8::5/128 60 - rt6 16061/17/16051
- rt7 16071/17/16051
- rt8 16081/17/16051
@@ -4351,9 +4351,9 @@ rt3 TE-IS 50 rt5 - rt1(4)
IS-IS L1 IPv4 routing table:
- Prefix Metric Interface Nexthop Label(s)
- -----------------------------------------------------------
- 10.0.255.8/32 60 - rt5 16040/26/16080
+ Prefix Metric Interface Nexthop Label(s)
+ -----------------------------------------------------
+ 10.0.255.8/32 60 - rt5 16040/26
P-space (self):
rt1
@@ -4403,9 +4403,9 @@ rt3 TE-IS 50 rt5 - rt1(4)
IS-IS L1 IPv6 routing table:
- Prefix Metric Interface Nexthop Label(s)
- -------------------------------------------------------------
- 2001:db8::8/128 60 - rt5 16041/27/16081
+ Prefix Metric Interface Nexthop Label(s)
+ -------------------------------------------------------
+ 2001:db8::8/128 60 - rt5 16041/27
test# test isis topology 10 root rt1 ti-lfa system-id rt2
P-space (self):
@@ -4460,8 +4460,8 @@ IS-IS L1 IPv4 routing table:
- rt4 16070/18/16020
10.0.255.5/32 100 - rt3 20060/18/16050
- rt4 16070/18/16050
- 10.0.255.8/32 90 - rt3 20060/18/16080
- - rt4 16070/18/16080
+ 10.0.255.8/32 90 - rt3 20060/18
+ - rt4 16070/18
P-space (self):
rt3
@@ -4515,8 +4515,8 @@ IS-IS L1 IPv6 routing table:
- rt4 16071/19/16021
2001:db8::5/128 100 - rt3 20061/19/16051
- rt4 16071/19/16051
- 2001:db8::8/128 90 - rt3 20061/19/16081
- - rt4 16071/19/16081
+ 2001:db8::8/128 90 - rt3 20061/19
+ - rt4 16071/19
test# test isis topology 10 root rt1 ti-lfa system-id rt4
P-space (self):
@@ -4563,7 +4563,7 @@ IS-IS L1 IPv4 routing table:
Prefix Metric Interface Nexthop Label(s)
-----------------------------------------------------------
10.0.255.4/32 100 - rt2 16080/20/16040
- 10.0.255.7/32 90 - rt2 16080/20/16070
+ 10.0.255.7/32 90 - rt2 16080/20
P-space (self):
rt2
@@ -4609,7 +4609,7 @@ IS-IS L1 IPv6 routing table:
Prefix Metric Interface Nexthop Label(s)
-------------------------------------------------------------
2001:db8::4/128 100 - rt2 16081/21/16041
- 2001:db8::7/128 90 - rt2 16081/21/16071
+ 2001:db8::7/128 90 - rt2 16081/21
test# test isis topology 11 root rt2 ti-lfa system-id rt4
P-space (self):
@@ -4747,12 +4747,12 @@ rt3 TE-IS 740 rt2 - rt5(4)
IS-IS L1 IPv4 routing table:
- Prefix Metric Interface Nexthop Label(s)
- -----------------------------------------------------------------
- 10.0.255.3/32 750 - rt2 16080/17/16/16/16030
- 10.0.255.5/32 350 - rt2 16080/17/16/16050
- 10.0.255.7/32 150 - rt2 16080/17/16070
- 10.0.255.9/32 160 - rt2 16080/17/18/16090
+ Prefix Metric Interface Nexthop Label(s)
+ -----------------------------------------------------------
+ 10.0.255.3/32 750 - rt2 16080/17/16/16
+ 10.0.255.5/32 350 - rt2 16080/17/16
+ 10.0.255.7/32 150 - rt2 16080/17
+ 10.0.255.9/32 160 - rt2 16080/17/18
test# test isis topology 13 root rt1 ti-lfa system-id rt3 ipv4-only
P-space (self):
diff --git a/tests/topotests/all-protocol-startup/test_all_protocol_startup.py b/tests/topotests/all-protocol-startup/test_all_protocol_startup.py
index f7ed29782d..afe546d502 100644
--- a/tests/topotests/all-protocol-startup/test_all_protocol_startup.py
+++ b/tests/topotests/all-protocol-startup/test_all_protocol_startup.py
@@ -352,7 +352,7 @@ def test_converge_protocols():
actual = (
net["r%s" % i]
.cmd(
- 'vtysh -c "show ip route" | sed -e \'/^Codes: /,/^\s*$/d\' | env LC_ALL=en_US.UTF-8 sort 2> /dev/null'
+ "vtysh -c \"show ip route\" | sed -e '/^Codes: /,/^\s*$/d' | env LC_ALL=en_US.UTF-8 sort 2> /dev/null"
)
.rstrip()
)
@@ -383,7 +383,7 @@ def test_converge_protocols():
actual = (
net["r%s" % i]
.cmd(
- 'vtysh -c "show ipv6 route" | sed -e \'/^Codes: /,/^\s*$/d\' | env LC_ALL=en_US.UTF-8 sort 2> /dev/null'
+ "vtysh -c \"show ipv6 route\" | sed -e '/^Codes: /,/^\s*$/d' | env LC_ALL=en_US.UTF-8 sort 2> /dev/null"
)
.rstrip()
)
diff --git a/tests/topotests/bfd-bgp-cbit-topo3/test_bfd_bgp_cbit_topo3.py b/tests/topotests/bfd-bgp-cbit-topo3/test_bfd_bgp_cbit_topo3.py
index 4c3bad1280..560d6eebec 100644
--- a/tests/topotests/bfd-bgp-cbit-topo3/test_bfd_bgp_cbit_topo3.py
+++ b/tests/topotests/bfd-bgp-cbit-topo3/test_bfd_bgp_cbit_topo3.py
@@ -44,7 +44,8 @@ from lib.topolog import logger
# Required to instantiate the topology builder class.
from mininet.topo import Topo
-pytestmark = [pytest.mark.bfdd]
+pytestmark = [pytest.mark.bgpd, pytest.mark.bfdd]
+
class BFDTopo(Topo):
"Test topology builder"
@@ -65,6 +66,7 @@ class BFDTopo(Topo):
switch.add_link(tgen.gears["r2"])
switch.add_link(tgen.gears["r3"])
+
def setup_module(mod):
"Sets up the pytest environment"
tgen = Topogen(BFDTopo, mod.__name__)
diff --git a/tests/topotests/bfd-isis-topo1/test_bfd_isis_topo1.py b/tests/topotests/bfd-isis-topo1/test_bfd_isis_topo1.py
index bb930141ac..fcb5672dce 100644
--- a/tests/topotests/bfd-isis-topo1/test_bfd_isis_topo1.py
+++ b/tests/topotests/bfd-isis-topo1/test_bfd_isis_topo1.py
@@ -92,6 +92,7 @@ from mininet.topo import Topo
pytestmark = [pytest.mark.bfdd, pytest.mark.isisd]
+
class TemplateTopo(Topo):
"Test topology builder"
diff --git a/tests/topotests/bfd-ospf-topo1/test_bfd_ospf_topo1.py b/tests/topotests/bfd-ospf-topo1/test_bfd_ospf_topo1.py
index 9ce14dd75e..ae148f948c 100755
--- a/tests/topotests/bfd-ospf-topo1/test_bfd_ospf_topo1.py
+++ b/tests/topotests/bfd-ospf-topo1/test_bfd_ospf_topo1.py
@@ -92,6 +92,7 @@ from mininet.topo import Topo
pytestmark = [pytest.mark.bfdd, pytest.mark.ospfd]
+
class TemplateTopo(Topo):
"Test topology builder"
diff --git a/tests/topotests/bfd-profiles-topo1/test_bfd_profiles_topo1.py b/tests/topotests/bfd-profiles-topo1/test_bfd_profiles_topo1.py
index 76e9ef247f..4a2c8ee002 100644
--- a/tests/topotests/bfd-profiles-topo1/test_bfd_profiles_topo1.py
+++ b/tests/topotests/bfd-profiles-topo1/test_bfd_profiles_topo1.py
@@ -45,7 +45,8 @@ from lib.topolog import logger
# Required to instantiate the topology builder class.
from mininet.topo import Topo
-pytestmark = [pytest.mark.bfdd, pytest.mark.isisd, pytest.mark.ospfd]
+pytestmark = [pytest.mark.bfdd, pytest.mark.bgpd, pytest.mark.isisd, pytest.mark.ospfd]
+
class BFDProfTopo(Topo):
"Test topology builder"
diff --git a/tests/topotests/bfd-topo1/test_bfd_topo1.py b/tests/topotests/bfd-topo1/test_bfd_topo1.py
index 76078b5d7d..86bdcfed04 100644
--- a/tests/topotests/bfd-topo1/test_bfd_topo1.py
+++ b/tests/topotests/bfd-topo1/test_bfd_topo1.py
@@ -45,7 +45,8 @@ from lib.topolog import logger
# Required to instantiate the topology builder class.
from mininet.topo import Topo
-pytestmark = [pytest.mark.bfdd]
+pytestmark = [pytest.mark.bfdd, pytest.mark.bgpd]
+
class BFDTopo(Topo):
"Test topology builder"
diff --git a/tests/topotests/bfd-topo2/test_bfd_topo2.py b/tests/topotests/bfd-topo2/test_bfd_topo2.py
index c31cc02b3a..e85b2644dd 100644
--- a/tests/topotests/bfd-topo2/test_bfd_topo2.py
+++ b/tests/topotests/bfd-topo2/test_bfd_topo2.py
@@ -46,7 +46,8 @@ from lib.topolog import logger
# Required to instantiate the topology builder class.
from mininet.topo import Topo
-pytestmark = [pytest.mark.bfdd, pytest.mark.ospfd]
+pytestmark = [pytest.mark.bfdd, pytest.mark.bgpd, pytest.mark.ospfd]
+
class BFDTopo(Topo):
"Test topology builder"
diff --git a/tests/topotests/bfd-topo3/test_bfd_topo3.py b/tests/topotests/bfd-topo3/test_bfd_topo3.py
index f473b67108..6bb223e203 100644
--- a/tests/topotests/bfd-topo3/test_bfd_topo3.py
+++ b/tests/topotests/bfd-topo3/test_bfd_topo3.py
@@ -45,6 +45,8 @@ from lib.topolog import logger
# Required to instantiate the topology builder class.
from mininet.topo import Topo
+pytestmark = [pytest.mark.bfdd, pytest.mark.bgpd]
+
class BFDTopo(Topo):
"Test topology builder"
diff --git a/tests/topotests/bfd-vrf-topo1/test_bfd_vrf_topo1.py b/tests/topotests/bfd-vrf-topo1/test_bfd_vrf_topo1.py
index a238ff8da3..8a1ffe085d 100644
--- a/tests/topotests/bfd-vrf-topo1/test_bfd_vrf_topo1.py
+++ b/tests/topotests/bfd-vrf-topo1/test_bfd_vrf_topo1.py
@@ -46,7 +46,8 @@ from lib.topolog import logger
# Required to instantiate the topology builder class.
from mininet.topo import Topo
-pytestmark = [pytest.mark.bfdd]
+pytestmark = [pytest.mark.bfdd, pytest.mark.bgpd]
+
class BFDTopo(Topo):
"Test topology builder"
diff --git a/tests/topotests/bgp-aggregator-zero/test_bgp_aggregator_zero.py b/tests/topotests/bgp-aggregator-zero/test_bgp_aggregator_zero.py
index 0db47da3f2..c4bbdce2c3 100644
--- a/tests/topotests/bgp-aggregator-zero/test_bgp_aggregator_zero.py
+++ b/tests/topotests/bgp-aggregator-zero/test_bgp_aggregator_zero.py
@@ -40,6 +40,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
from mininet.topo import Topo
+pytestmark = [pytest.mark.bgpd]
+
class BgpAggregatorAsnZero(Topo):
def build(self, *_args, **_opts):
diff --git a/tests/topotests/bgp-basic-functionality-topo1/test_bgp_basic_functionality.py b/tests/topotests/bgp-basic-functionality-topo1/test_bgp_basic_functionality.py
index b701a0d61e..374cce21f6 100644
--- a/tests/topotests/bgp-basic-functionality-topo1/test_bgp_basic_functionality.py
+++ b/tests/topotests/bgp-basic-functionality-topo1/test_bgp_basic_functionality.py
@@ -91,6 +91,9 @@ from lib.bgp import (
)
from lib.topojson import build_topo_from_json, build_config_from_json
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
+
# Reading the data from JSON File for topology creation
jsonFile = "{}/bgp_basic_functionality.json".format(CWD)
try:
diff --git a/tests/topotests/bgp-ecmp-topo2/test_ebgp_ecmp_topo2.py b/tests/topotests/bgp-ecmp-topo2/test_ebgp_ecmp_topo2.py
index 353df0684b..dfe6a8074d 100644
--- a/tests/topotests/bgp-ecmp-topo2/test_ebgp_ecmp_topo2.py
+++ b/tests/topotests/bgp-ecmp-topo2/test_ebgp_ecmp_topo2.py
@@ -324,6 +324,7 @@ def test_modify_ecmp_max_paths(request, ecmp_num, test_type):
write_test_footer(tc_name)
+
@pytest.mark.parametrize("ecmp_num", ["8", "16", "32"])
@pytest.mark.parametrize("test_type", ["redist_static", "advertise_nw"])
def test_ecmp_after_clear_bgp(request, ecmp_num, test_type):
@@ -349,7 +350,7 @@ def test_ecmp_after_clear_bgp(request, ecmp_num, test_type):
addr_type,
dut,
input_dict_1,
- next_hop=NEXT_HOPS[addr_type][:int(ecmp_num)],
+ next_hop=NEXT_HOPS[addr_type][: int(ecmp_num)],
protocol=protocol,
)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
@@ -372,7 +373,7 @@ def test_ecmp_after_clear_bgp(request, ecmp_num, test_type):
addr_type,
dut,
input_dict_1,
- next_hop=NEXT_HOPS[addr_type][:int(ecmp_num)],
+ next_hop=NEXT_HOPS[addr_type][: int(ecmp_num)],
protocol=protocol,
)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
diff --git a/tests/topotests/bgp-ecmp-topo2/test_ibgp_ecmp_topo2.py b/tests/topotests/bgp-ecmp-topo2/test_ibgp_ecmp_topo2.py
index 2f73bdb1b8..2bde52af1d 100644
--- a/tests/topotests/bgp-ecmp-topo2/test_ibgp_ecmp_topo2.py
+++ b/tests/topotests/bgp-ecmp-topo2/test_ibgp_ecmp_topo2.py
@@ -325,6 +325,7 @@ def test_modify_ecmp_max_paths(request, ecmp_num, test_type):
write_test_footer(tc_name)
+
@pytest.mark.parametrize("ecmp_num", ["8", "16", "32"])
@pytest.mark.parametrize("test_type", ["redist_static", "advertise_nw"])
def test_ecmp_after_clear_bgp(request, ecmp_num, test_type):
@@ -350,7 +351,7 @@ def test_ecmp_after_clear_bgp(request, ecmp_num, test_type):
addr_type,
dut,
input_dict_1,
- next_hop=NEXT_HOPS[addr_type][:int(ecmp_num)],
+ next_hop=NEXT_HOPS[addr_type][: int(ecmp_num)],
protocol=protocol,
)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
@@ -373,7 +374,7 @@ def test_ecmp_after_clear_bgp(request, ecmp_num, test_type):
addr_type,
dut,
input_dict_1,
- next_hop=NEXT_HOPS[addr_type][:int(ecmp_num)],
+ next_hop=NEXT_HOPS[addr_type][: int(ecmp_num)],
protocol=protocol,
)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
diff --git a/tests/topotests/bgp-evpn-mh/test_evpn_mh.py b/tests/topotests/bgp-evpn-mh/test_evpn_mh.py
index 48ad8144a2..2744920272 100644
--- a/tests/topotests/bgp-evpn-mh/test_evpn_mh.py
+++ b/tests/topotests/bgp-evpn-mh/test_evpn_mh.py
@@ -603,6 +603,7 @@ def ping_anycast_gw(tgen):
local_host.run(cmd_str)
remote_host.run(cmd_str)
+
def check_mac(dut, vni, mac, m_type, esi, intf, ping_gw=False, tgen=None):
"""
checks if mac is present and if desination matches the one provided
diff --git a/tests/topotests/bgp-evpn-vxlan_topo1/test_bgp_evpn_vxlan.py b/tests/topotests/bgp-evpn-vxlan_topo1/test_bgp_evpn_vxlan.py
index 785a3acbf9..086bad6481 100755
--- a/tests/topotests/bgp-evpn-vxlan_topo1/test_bgp_evpn_vxlan.py
+++ b/tests/topotests/bgp-evpn-vxlan_topo1/test_bgp_evpn_vxlan.py
@@ -47,6 +47,7 @@ from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd, pytest.mark.ospfd]
+
class TemplateTopo(Topo):
"Test topology builder"
diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/test_bgp_snmp_mplsvpn.py b/tests/topotests/bgp-snmp-mplsl3vpn/test_bgp_snmp_mplsvpn.py
index 5eb1738632..df6c76539c 100755
--- a/tests/topotests/bgp-snmp-mplsl3vpn/test_bgp_snmp_mplsvpn.py
+++ b/tests/topotests/bgp-snmp-mplsl3vpn/test_bgp_snmp_mplsvpn.py
@@ -47,6 +47,8 @@ from lib.snmptest import SnmpTester
# Required to instantiate the topology builder class.
from mininet.topo import Topo
+pytestmark = [pytest.mark.bgpd, pytest.mark.isisd, pytest.mark.snmp]
+
class TemplateTopo(Topo):
"Test topology builder"
@@ -266,7 +268,7 @@ def test_pe1_converge_evpn():
break
count += 1
sleep(1)
- #tgen.mininet_cli()
+ # tgen.mininet_cli()
assertmsg = "BGP Peer 10.4.4.4 did not connect"
assert passed, assertmsg
@@ -503,8 +505,10 @@ def test_r1_mplsvpn_VrfTable():
associated_int = r1_snmp.get(
"mplsL3VpnVrfAssociatedInterfaces.{}".format(snmp_str_to_oid("VRF-a"))
)
- assertmsg = "mplsL3VpnVrfAssociatedInterfaces incorrect should be 3 value {}".format(
- associated_int
+ assertmsg = (
+ "mplsL3VpnVrfAssociatedInterfaces incorrect should be 3 value {}".format(
+ associated_int
+ )
)
assert associated_int == "3", assertmsg
@@ -620,7 +624,7 @@ rte_table_test = {
"unknown(0)",
"ipv4(1)",
"unknown(0)",
- ],
+ ],
"mplsL3VpnVrfRteInetCidrNextHop": [
"C0 A8 64 0A",
"C0 A8 C8 0A",
@@ -649,7 +653,15 @@ rte_table_test = {
"bgp(14)",
"local(2)",
],
- "mplsL3VpnVrfRteInetCidrNextHopAS": ["65001", "65001", "0", "65001", "0", "65001", "0"],
+ "mplsL3VpnVrfRteInetCidrNextHopAS": [
+ "65001",
+ "65001",
+ "0",
+ "65001",
+ "0",
+ "65001",
+ "0",
+ ],
"mplsL3VpnVrfRteInetCidrMetric1": ["0", "0", "20", "0", "0", "0", "0"],
"mplsL3VpnVrfRteInetCidrMetric2": ["-1", "-1", "-1", "-1", "-1", "-1", "-1"],
"mplsL3VpnVrfRteInetCidrMetric3": ["-1", "-1", "-1", "-1", "-1", "-1", "-1"],
@@ -663,7 +675,7 @@ rte_table_test = {
"active(1)",
"active(1)",
"active(1)",
- "active(1)",
+ "active(1)",
],
}
diff --git a/tests/topotests/bgp_aggregate-address_origin/test_bgp_aggregate-address_origin.py b/tests/topotests/bgp_aggregate-address_origin/test_bgp_aggregate-address_origin.py
index 86fd4b601f..be07fab87b 100644
--- a/tests/topotests/bgp_aggregate-address_origin/test_bgp_aggregate-address_origin.py
+++ b/tests/topotests/bgp_aggregate-address_origin/test_bgp_aggregate-address_origin.py
@@ -47,6 +47,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
from mininet.topo import Topo
+pytestmark = [pytest.mark.bgpd]
+
class TemplateTopo(Topo):
def build(self, *_args, **_opts):
diff --git a/tests/topotests/bgp_aggregate-address_route-map/test_bgp_aggregate-address_route-map.py b/tests/topotests/bgp_aggregate-address_route-map/test_bgp_aggregate-address_route-map.py
index c7d9f13f3f..484f40251f 100644
--- a/tests/topotests/bgp_aggregate-address_route-map/test_bgp_aggregate-address_route-map.py
+++ b/tests/topotests/bgp_aggregate-address_route-map/test_bgp_aggregate-address_route-map.py
@@ -50,6 +50,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
from mininet.topo import Topo
+pytestmark = [pytest.mark.bgpd]
+
class TemplateTopo(Topo):
def build(self, *_args, **_opts):
diff --git a/tests/topotests/bgp_as_allow_in/test_bgp_as_allow_in.py b/tests/topotests/bgp_as_allow_in/test_bgp_as_allow_in.py
index 544bda145c..4d41c7a321 100644
--- a/tests/topotests/bgp_as_allow_in/test_bgp_as_allow_in.py
+++ b/tests/topotests/bgp_as_allow_in/test_bgp_as_allow_in.py
@@ -76,6 +76,9 @@ from lib.bgp import (
)
from lib.topojson import build_topo_from_json, build_config_from_json
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
+
# Reading the data from JSON File for topology creation
jsonFile = "{}/bgp_as_allow_in.json".format(CWD)
try:
diff --git a/tests/topotests/bgp_as_wide_bgp_identifier/test_bgp_as_wide_bgp_identifier.py b/tests/topotests/bgp_as_wide_bgp_identifier/test_bgp_as_wide_bgp_identifier.py
index 02edb62ca0..a736463927 100644
--- a/tests/topotests/bgp_as_wide_bgp_identifier/test_bgp_as_wide_bgp_identifier.py
+++ b/tests/topotests/bgp_as_wide_bgp_identifier/test_bgp_as_wide_bgp_identifier.py
@@ -45,6 +45,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
from mininet.topo import Topo
+pytestmark = [pytest.mark.bgpd]
+
class TemplateTopo(Topo):
def build(self, *_args, **_opts):
diff --git a/tests/topotests/bgp_blackhole_community/test_bgp_blackhole_community.py b/tests/topotests/bgp_blackhole_community/test_bgp_blackhole_community.py
index a856c9278f..6512e4d4c6 100644
--- a/tests/topotests/bgp_blackhole_community/test_bgp_blackhole_community.py
+++ b/tests/topotests/bgp_blackhole_community/test_bgp_blackhole_community.py
@@ -40,6 +40,8 @@ from lib.topolog import logger
from mininet.topo import Topo
from lib.common_config import step
+pytestmark = [pytest.mark.bgpd]
+
class TemplateTopo(Topo):
def build(self, *_args, **_opts):
diff --git a/tests/topotests/bgp_comm-list_delete/test_bgp_comm-list_delete.py b/tests/topotests/bgp_comm-list_delete/test_bgp_comm-list_delete.py
index fe7052b80f..81bf8da31a 100644
--- a/tests/topotests/bgp_comm-list_delete/test_bgp_comm-list_delete.py
+++ b/tests/topotests/bgp_comm-list_delete/test_bgp_comm-list_delete.py
@@ -45,6 +45,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
from mininet.topo import Topo
+pytestmark = [pytest.mark.bgpd]
+
class TemplateTopo(Topo):
def build(self, *_args, **_opts):
diff --git a/tests/topotests/bgp_communities_topo1/test_bgp_communities.py b/tests/topotests/bgp_communities_topo1/test_bgp_communities.py
index f2e54b24d6..6d4a7d82e5 100644
--- a/tests/topotests/bgp_communities_topo1/test_bgp_communities.py
+++ b/tests/topotests/bgp_communities_topo1/test_bgp_communities.py
@@ -66,6 +66,9 @@ from lib.bgp import (
from lib.topojson import build_topo_from_json, build_config_from_json
from copy import deepcopy
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
+
# Reading the data from JSON File for topology creation
jsonFile = "{}/bgp_communities.json".format(CWD)
try:
diff --git a/tests/topotests/bgp_communities_topo1/test_bgp_communities_topo2.py b/tests/topotests/bgp_communities_topo1/test_bgp_communities_topo2.py
index c0842148f1..3415789068 100644
--- a/tests/topotests/bgp_communities_topo1/test_bgp_communities_topo2.py
+++ b/tests/topotests/bgp_communities_topo1/test_bgp_communities_topo2.py
@@ -70,6 +70,9 @@ from lib.bgp import (
from lib.topojson import build_topo_from_json, build_config_from_json
from copy import deepcopy
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
+
# Reading the data from JSON File for topology creation
jsonFile = "{}/bgp_communities_topo2.json".format(CWD)
try:
diff --git a/tests/topotests/bgp_community_change_update/test_bgp_community_change_update.py b/tests/topotests/bgp_community_change_update/test_bgp_community_change_update.py
index 5fc4310266..95e63c617e 100644
--- a/tests/topotests/bgp_community_change_update/test_bgp_community_change_update.py
+++ b/tests/topotests/bgp_community_change_update/test_bgp_community_change_update.py
@@ -59,6 +59,8 @@ from mininet.topo import Topo
from lib.common_config import step
from time import sleep
+pytestmark = [pytest.mark.bgpd, pytest.mark.bgpd]
+
class TemplateTopo(Topo):
def build(self, *_args, **_opts):
diff --git a/tests/topotests/bgp_features/peer1/exa_readpipe.py b/tests/topotests/bgp_features/peer1/exa_readpipe.py
index dba1536388..9e689a27e3 100644
--- a/tests/topotests/bgp_features/peer1/exa_readpipe.py
+++ b/tests/topotests/bgp_features/peer1/exa_readpipe.py
@@ -8,7 +8,7 @@ if len(sys.argv) != 2:
fifo = sys.argv[1]
while True:
- pipe = open(fifo, 'r')
+ pipe = open(fifo, "r")
with pipe:
line = pipe.readline().strip()
if line != "":
diff --git a/tests/topotests/bgp_features/peer2/exa_readpipe.py b/tests/topotests/bgp_features/peer2/exa_readpipe.py
index dba1536388..9e689a27e3 100644
--- a/tests/topotests/bgp_features/peer2/exa_readpipe.py
+++ b/tests/topotests/bgp_features/peer2/exa_readpipe.py
@@ -8,7 +8,7 @@ if len(sys.argv) != 2:
fifo = sys.argv[1]
while True:
- pipe = open(fifo, 'r')
+ pipe = open(fifo, "r")
with pipe:
line = pipe.readline().strip()
if line != "":
diff --git a/tests/topotests/bgp_features/peer3/exa_readpipe.py b/tests/topotests/bgp_features/peer3/exa_readpipe.py
index dba1536388..9e689a27e3 100644
--- a/tests/topotests/bgp_features/peer3/exa_readpipe.py
+++ b/tests/topotests/bgp_features/peer3/exa_readpipe.py
@@ -8,7 +8,7 @@ if len(sys.argv) != 2:
fifo = sys.argv[1]
while True:
- pipe = open(fifo, 'r')
+ pipe = open(fifo, "r")
with pipe:
line = pipe.readline().strip()
if line != "":
diff --git a/tests/topotests/bgp_features/peer4/exa_readpipe.py b/tests/topotests/bgp_features/peer4/exa_readpipe.py
index dba1536388..9e689a27e3 100644
--- a/tests/topotests/bgp_features/peer4/exa_readpipe.py
+++ b/tests/topotests/bgp_features/peer4/exa_readpipe.py
@@ -8,7 +8,7 @@ if len(sys.argv) != 2:
fifo = sys.argv[1]
while True:
- pipe = open(fifo, 'r')
+ pipe = open(fifo, "r")
with pipe:
line = pipe.readline().strip()
if line != "":
diff --git a/tests/topotests/bgp_l3vpn_to_bgp_direct/test_bgp_l3vpn_to_bgp_direct.py b/tests/topotests/bgp_l3vpn_to_bgp_direct/test_bgp_l3vpn_to_bgp_direct.py
index d863f9c3ed..be12cfde37 100755
--- a/tests/topotests/bgp_l3vpn_to_bgp_direct/test_bgp_l3vpn_to_bgp_direct.py
+++ b/tests/topotests/bgp_l3vpn_to_bgp_direct/test_bgp_l3vpn_to_bgp_direct.py
@@ -31,6 +31,7 @@ from lib.ltemplate import *
pytestmark = [pytest.mark.bgpd, pytest.mark.ospfd]
+
def test_adjacencies():
CliOnFail = None
# For debugging, uncomment the next line
diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/test_bgp_l3vpn_to_bgp_vrf.py b/tests/topotests/bgp_l3vpn_to_bgp_vrf/test_bgp_l3vpn_to_bgp_vrf.py
index d2e40037a6..8bb700235c 100755
--- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/test_bgp_l3vpn_to_bgp_vrf.py
+++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/test_bgp_l3vpn_to_bgp_vrf.py
@@ -31,6 +31,7 @@ from lib.ltemplate import *
pytestmark = [pytest.mark.bgpd, pytest.mark.ospfd]
+
def test_check_linux_vrf():
CliOnFail = None
# For debugging, uncomment the next line
diff --git a/tests/topotests/bgp_listen_on_multiple_addresses/test_bgp_listen_on_multiple_addresses.py b/tests/topotests/bgp_listen_on_multiple_addresses/test_bgp_listen_on_multiple_addresses.py
index d773e87ef6..a3ca1408e2 100755
--- a/tests/topotests/bgp_listen_on_multiple_addresses/test_bgp_listen_on_multiple_addresses.py
+++ b/tests/topotests/bgp_listen_on_multiple_addresses/test_bgp_listen_on_multiple_addresses.py
@@ -142,7 +142,7 @@ def _bgp_converge_initial(router_name, peer_address, timeout=180):
"""
Waits for the BGP connection between a given router and a given peer
(specified by its IP address) to be established. If the connection is
- not established within a given timeout, then an exception is raised.
+ not established within a given timeout, then an exception is raised.
"""
tgen = get_topogen()
router = tgen.routers()[router_name]
diff --git a/tests/topotests/bgp_lu_topo1/test_bgp_lu.py b/tests/topotests/bgp_lu_topo1/test_bgp_lu.py
index 61418d7a79..d550c38a2f 100644
--- a/tests/topotests/bgp_lu_topo1/test_bgp_lu.py
+++ b/tests/topotests/bgp_lu_topo1/test_bgp_lu.py
@@ -45,17 +45,18 @@ from lib.topolog import logger
# Required to instantiate the topology builder class.
from mininet.topo import Topo
-#Basic scenario for BGP-LU. Nodes are directly connected.
-#Node 3 is advertising many routes to 2, which advertises them
-#as BGP-LU to 1; this way we get routes with actual labels, as
-#opposed to implicit-null routes in the 2-node case.
+# Basic scenario for BGP-LU. Nodes are directly connected.
+# Node 3 is advertising many routes to 2, which advertises them
+# as BGP-LU to 1; this way we get routes with actual labels, as
+# opposed to implicit-null routes in the 2-node case.
#
# AS1 BGP-LU AS2 iBGP AS2
-#+-----+ +-----+ +-----+
-#| |.1 .2| |.2 .3| |
-#| 1 +----------------+ 2 +-----------------+ 3 |
-#| | 10.0.0.0/24 | | 10.0.1.0/24 | |
-#+-----+ +-----+ +-----+
+# +-----+ +-----+ +-----+
+# | |.1 .2| |.2 .3| |
+# | 1 +----------------+ 2 +-----------------+ 3 |
+# | | 10.0.0.0/24 | | 10.0.1.0/24 | |
+# +-----+ +-----+ +-----+
+
class TemplateTopo(Topo):
"Test topology builder"
@@ -84,7 +85,6 @@ class TemplateTopo(Topo):
switch.add_link(tgen.gears["R3"])
-
def setup_module(mod):
"Sets up the pytest environment"
# This function initiates the topology build with Topogen...
@@ -115,15 +115,19 @@ def teardown_module(mod):
# This function tears down the whole topology.
tgen.stop_topology()
+
def check_labelpool(router):
json_file = "{}/{}/labelpool.summ.json".format(CWD, router.name)
expected = json.loads(open(json_file).read())
- test_func = partial(topotest.router_json_cmp, router, "show bgp labelpool summary json", expected)
+ test_func = partial(
+ topotest.router_json_cmp, router, "show bgp labelpool summary json", expected
+ )
_, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
assertmsg = '"{}" JSON output mismatches - Did not converge'.format(router.name)
assert result is None, assertmsg
-
+
+
def test_converge_bgplu():
"Wait for protocol convergence"
@@ -132,13 +136,14 @@ def test_converge_bgplu():
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- #tgen.mininet_cli();
+ # tgen.mininet_cli();
r1 = tgen.gears["R1"]
r2 = tgen.gears["R2"]
check_labelpool(r1)
check_labelpool(r2)
+
def test_clear_bgplu():
"Wait for protocol convergence"
@@ -147,7 +152,7 @@ def test_clear_bgplu():
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- #tgen.mininet_cli();
+ # tgen.mininet_cli();
r1 = tgen.gears["R1"]
r2 = tgen.gears["R2"]
@@ -164,6 +169,7 @@ def test_clear_bgplu():
check_labelpool(r1)
check_labelpool(r2)
+
def test_memory_leak():
"Run the memory leak test and report results."
tgen = get_topogen()
diff --git a/tests/topotests/bgp_rfapi_basic_sanity/test_bgp_rfapi_basic_sanity.py b/tests/topotests/bgp_rfapi_basic_sanity/test_bgp_rfapi_basic_sanity.py
index ce69f28aba..fa04aaf366 100755
--- a/tests/topotests/bgp_rfapi_basic_sanity/test_bgp_rfapi_basic_sanity.py
+++ b/tests/topotests/bgp_rfapi_basic_sanity/test_bgp_rfapi_basic_sanity.py
@@ -31,6 +31,7 @@ from lib.ltemplate import *
pytestmark = [pytest.mark.bgpd, pytest.mark.ospfd]
+
def test_add_routes():
CliOnFail = None
# For debugging, uncomment the next line
diff --git a/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo1.py b/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo1.py
index 63db393178..b99f1a7418 100644
--- a/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo1.py
+++ b/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo1.py
@@ -498,7 +498,6 @@ def disable_route_map_to_prefer_global_next_hop(tgen, topo):
#
#####################################################
-
def test_dynamic_imported_routes_advertised_to_iBGP_peer_p0(request):
"""
TC5_FUNC_5:
@@ -761,6 +760,29 @@ def test_dynamic_imported_routes_advertised_to_iBGP_peer_p0(request):
tc_name, result
)
+ for addr_type in ADDR_TYPES:
+
+ step(
+ "On router R1 delete static routes in vrf ISR to LOOPBACK_1"
+ )
+
+ input_routes_r1 = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_3[addr_type], NETWORK1_4[addr_type]],
+ "next_hop": (intf_r2_r1[addr_type]).split("/")[0],
+ "delete": True
+ }
+ ]
+ }
+ }
+
+ result = create_static_routes(tgen, input_routes_r1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
write_test_footer(tc_name)
diff --git a/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py b/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py
index 87f391ae49..036277411f 100644
--- a/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py
+++ b/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py
@@ -1312,14 +1312,14 @@ def test_evpn_routes_from_VNFs_p1(request):
)
for addr_type in ADDR_TYPES:
input_routes = {key: topo["routers"][key] for key in ["r1"]}
- result = verify_rib(tgen, addr_type, "d2", input_routes, expected=False)
+ result = verify_rib(tgen, addr_type, "d2", input_routes, expected=True)
assert result is True, "Testcase {} :Failed \n Error: {}".format(
tc_name, result
)
for addr_type in ADDR_TYPES:
input_routes = {key: topo["routers"][key] for key in ["r2"]}
- result = verify_rib(tgen, addr_type, "d2", input_routes, expected=False)
+ result = verify_rib(tgen, addr_type, "d2", input_routes, expected=True)
assert result is True, "Testcase {} :Failed \n Error: {}".format(
tc_name, result
)
@@ -1475,8 +1475,8 @@ def test_evpn_routes_from_VNFs_p1(request):
tgen, dut, intf_name, intf_ipv6, vrf, create=False
)
- logger.info("Wait for 60 sec.")
- sleep(60)
+ result = verify_bgp_convergence(tgen, topo, dut)
+ assert result is True, "Failed to converge on {}".format(dut)
step(
"Verify that DCG-2 receives EVPN routes corresponding to "
diff --git a/tests/topotests/example-test/test_template.py b/tests/topotests/example-test/test_template.py
index 973303b830..0265dbe796 100644
--- a/tests/topotests/example-test/test_template.py
+++ b/tests/topotests/example-test/test_template.py
@@ -44,7 +44,7 @@ from lib.topolog import logger
from mininet.topo import Topo
-#TODO: select markers based on daemons used during test
+# TODO: select markers based on daemons used during test
# pytest module level markers
"""
pytestmark = pytest.mark.bfdd # single marker
diff --git a/tests/topotests/example-topojson-test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py b/tests/topotests/example-topojson-test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py
index cd48716905..09ac9f2fa4 100755
--- a/tests/topotests/example-topojson-test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py
+++ b/tests/topotests/example-topojson-test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py
@@ -54,7 +54,7 @@ from lib.bgp import verify_bgp_convergence
from lib.topojson import build_topo_from_json, build_config_from_json
-#TODO: select markers based on daemons used during test
+# TODO: select markers based on daemons used during test
# pytest module level markers
"""
pytestmark = pytest.mark.bfdd # single marker
diff --git a/tests/topotests/example-topojson-test/test_topo_json_single_link/test_example_topojson.py b/tests/topotests/example-topojson-test/test_topo_json_single_link/test_example_topojson.py
index 0c72e30044..26336d5de1 100755
--- a/tests/topotests/example-topojson-test/test_topo_json_single_link/test_example_topojson.py
+++ b/tests/topotests/example-topojson-test/test_topo_json_single_link/test_example_topojson.py
@@ -53,7 +53,7 @@ from lib.bgp import verify_bgp_convergence
from lib.topojson import build_topo_from_json, build_config_from_json
-#TODO: select markers based on daemons used during test
+# TODO: select markers based on daemons used during test
# pytest module level markers
"""
pytestmark = pytest.mark.bfdd # single marker
diff --git a/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py b/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py
index d05ad6db21..012b05d376 100755
--- a/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py
+++ b/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py
@@ -55,7 +55,7 @@ from lib.bgp import verify_bgp_convergence
from lib.topojson import build_topo_from_json, build_config_from_json
-#TODO: select markers based on daemons used during test
+# TODO: select markers based on daemons used during test
# pytest module level markers
"""
pytestmark = pytest.mark.bfdd # single marker
diff --git a/tests/topotests/isis-lfa-topo1/test_isis_lfa_topo1.py b/tests/topotests/isis-lfa-topo1/test_isis_lfa_topo1.py
index ab15c3542f..dcfcd11435 100755
--- a/tests/topotests/isis-lfa-topo1/test_isis_lfa_topo1.py
+++ b/tests/topotests/isis-lfa-topo1/test_isis_lfa_topo1.py
@@ -165,6 +165,7 @@ class TemplateTopo(Topo):
f_in.close()
f_out.close()
+
def setup_module(mod):
"Sets up the pytest environment"
tgen = Topogen(TemplateTopo, mod.__name__)
diff --git a/tests/topotests/isis-lsp-bits-topo1/test_isis_lsp_bits_topo1.py b/tests/topotests/isis-lsp-bits-topo1/test_isis_lsp_bits_topo1.py
index 95a0d87c33..27dc1073c6 100755
--- a/tests/topotests/isis-lsp-bits-topo1/test_isis_lsp_bits_topo1.py
+++ b/tests/topotests/isis-lsp-bits-topo1/test_isis_lsp_bits_topo1.py
@@ -73,7 +73,7 @@ from functools import partial
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
# Import topogen and topotest helpers
@@ -87,8 +87,10 @@ from mininet.topo import Topo
# Global multi-dimensional dictionary containing all expected outputs
outputs = {}
+
class TemplateTopo(Topo):
"Test topology builder"
+
def build(self, *_args, **_opts):
"Build function"
tgen = get_topogen(self)
@@ -96,36 +98,36 @@ class TemplateTopo(Topo):
#
# Define FRR Routers
#
- for router in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
+ for router in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
tgen.add_router(router)
#
# Define connections
#
- switch = tgen.add_switch('s1')
- switch.add_link(tgen.gears['rt1'], nodeif="eth-sw1")
- switch.add_link(tgen.gears['rt2'], nodeif="eth-sw1")
- switch.add_link(tgen.gears['rt3'], nodeif="eth-sw1")
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["rt1"], nodeif="eth-sw1")
+ switch.add_link(tgen.gears["rt2"], nodeif="eth-sw1")
+ switch.add_link(tgen.gears["rt3"], nodeif="eth-sw1")
- switch = tgen.add_switch('s2')
- switch.add_link(tgen.gears['rt2'], nodeif="eth-rt4")
- switch.add_link(tgen.gears['rt4'], nodeif="eth-rt2")
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4")
+ switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2")
- switch = tgen.add_switch('s4')
- switch.add_link(tgen.gears['rt3'], nodeif="eth-rt5")
- switch.add_link(tgen.gears['rt5'], nodeif="eth-rt3")
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5")
+ switch.add_link(tgen.gears["rt5"], nodeif="eth-rt3")
- switch = tgen.add_switch('s6')
- switch.add_link(tgen.gears['rt4'], nodeif="eth-rt5")
- switch.add_link(tgen.gears['rt5'], nodeif="eth-rt4")
+ switch = tgen.add_switch("s6")
+ switch.add_link(tgen.gears["rt4"], nodeif="eth-rt5")
+ switch.add_link(tgen.gears["rt5"], nodeif="eth-rt4")
- switch = tgen.add_switch('s7')
- switch.add_link(tgen.gears['rt4'], nodeif="eth-rt6")
- switch.add_link(tgen.gears['rt6'], nodeif="eth-rt4")
+ switch = tgen.add_switch("s7")
+ switch.add_link(tgen.gears["rt4"], nodeif="eth-rt6")
+ switch.add_link(tgen.gears["rt6"], nodeif="eth-rt4")
- switch = tgen.add_switch('s8')
- switch.add_link(tgen.gears['rt5'], nodeif="eth-rt6")
- switch.add_link(tgen.gears['rt6'], nodeif="eth-rt5")
+ switch = tgen.add_switch("s8")
+ switch.add_link(tgen.gears["rt5"], nodeif="eth-rt6")
+ switch.add_link(tgen.gears["rt6"], nodeif="eth-rt5")
def setup_module(mod):
@@ -138,16 +140,15 @@ def setup_module(mod):
# For all registered routers, load the zebra configuration file
for rname, router in router_list.items():
router.load_config(
- TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format(rname))
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_ISIS,
- os.path.join(CWD, '{}/isisd.conf'.format(rname))
+ TopoRouter.RD_ISIS, os.path.join(CWD, "{}/isisd.conf".format(rname))
)
tgen.start_router()
+
def teardown_module(mod):
"Teardown the pytest environment"
tgen = get_topogen()
@@ -155,6 +156,7 @@ def teardown_module(mod):
# This function tears down the whole topology.
tgen.stop_topology()
+
def router_compare_json_output(rname, command, reference):
"Compare router JSON output"
@@ -170,6 +172,7 @@ def router_compare_json_output(rname, command, reference):
assertmsg = '"{}" JSON output mismatches the expected result'.format(rname)
assert diff is None, assertmsg
+
#
# Step 1
#
@@ -184,13 +187,14 @@ def test_isis_adjacencies_step1():
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
+ for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
router_compare_json_output(
- rname,
+ rname,
"show yang operational-data /frr-interface:lib isisd",
"step1/show_yang_interface_isis_adjacencies.ref",
)
+
def test_rib_ipv4_step1():
logger.info("Test (step 1): verify IPv4 RIB")
tgen = get_topogen()
@@ -199,11 +203,12 @@ def test_rib_ipv4_step1():
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
+ for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
router_compare_json_output(
rname, "show ip route isis json", "step1/show_ip_route.ref"
)
+
def test_rib_ipv6_step1():
logger.info("Test (step 1): verify IPv6 RIB")
tgen = get_topogen()
@@ -212,11 +217,12 @@ def test_rib_ipv6_step1():
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']:
+ for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
router_compare_json_output(
rname, "show ipv6 route isis json", "step1/show_ipv6_route.ref"
)
+
#
# Step 2
#
@@ -235,15 +241,20 @@ def test_rib_ipv4_step2():
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- logger.info('Disabling setting the attached-bit on RT2 and RT4')
- tgen.net['rt2'].cmd('vtysh -c "conf t" -c "router isis 1" -c "no attached-bit send"')
- tgen.net['rt4'].cmd('vtysh -c "conf t" -c "router isis 1" -c "no attached-bit send"')
+ logger.info("Disabling setting the attached-bit on RT2 and RT4")
+ tgen.net["rt2"].cmd(
+ 'vtysh -c "conf t" -c "router isis 1" -c "no attached-bit send"'
+ )
+ tgen.net["rt4"].cmd(
+ 'vtysh -c "conf t" -c "router isis 1" -c "no attached-bit send"'
+ )
- for rname in ['rt1', 'rt6']:
+ for rname in ["rt1", "rt6"]:
router_compare_json_output(
rname, "show ip route isis json", "step2/show_ip_route.ref"
)
+
def test_rib_ipv6_step2():
logger.info("Test (step 2): verify IPv6 RIB")
tgen = get_topogen()
@@ -252,11 +263,12 @@ def test_rib_ipv6_step2():
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- for rname in ['rt1', 'rt6']:
+ for rname in ["rt1", "rt6"]:
router_compare_json_output(
rname, "show ipv6 route isis json", "step2/show_ipv6_route.ref"
)
+
#
# Step 3
#
@@ -265,7 +277,7 @@ def test_rib_ipv6_step2():
# -disble processing a LSP with attach bit set
#
# Expected changes:
-# -RT1 and RT6 should not install a default route
+# -RT1 and RT6 should not install a default route
#
def test_rib_ipv4_step3():
logger.info("Test (step 3): verify IPv4 RIB")
@@ -275,19 +287,24 @@ def test_rib_ipv4_step3():
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- logger.info('Enable setting the attached-bit on RT2 and RT4')
- tgen.net['rt2'].cmd('vtysh -c "conf t" -c "router isis 1" -c "attached-bit send"')
- tgen.net['rt4'].cmd('vtysh -c "conf t" -c "router isis 1" -c "attached-bit send"')
+ logger.info("Enable setting the attached-bit on RT2 and RT4")
+ tgen.net["rt2"].cmd('vtysh -c "conf t" -c "router isis 1" -c "attached-bit send"')
+ tgen.net["rt4"].cmd('vtysh -c "conf t" -c "router isis 1" -c "attached-bit send"')
- logger.info('Disable processing received attached-bit in LSP on RT1 and RT6')
- tgen.net['rt1'].cmd('vtysh -c "conf t" -c "router isis 1" -c "attached-bit receive ignore"')
- tgen.net['rt6'].cmd('vtysh -c "conf t" -c "router isis 1" -c "attached-bit receive ignore"')
+ logger.info("Disable processing received attached-bit in LSP on RT1 and RT6")
+ tgen.net["rt1"].cmd(
+ 'vtysh -c "conf t" -c "router isis 1" -c "attached-bit receive ignore"'
+ )
+ tgen.net["rt6"].cmd(
+ 'vtysh -c "conf t" -c "router isis 1" -c "attached-bit receive ignore"'
+ )
- for rname in ['rt1', 'rt6']:
+ for rname in ["rt1", "rt6"]:
router_compare_json_output(
rname, "show ip route isis json", "step3/show_ip_route.ref"
)
+
def test_rib_ipv6_step3():
logger.info("Test (step 3): verify IPv6 RIB")
tgen = get_topogen()
@@ -296,11 +313,12 @@ def test_rib_ipv6_step3():
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- for rname in ['rt1', 'rt6']:
+ for rname in ["rt1", "rt6"]:
router_compare_json_output(
rname, "show ipv6 route isis json", "step3/show_ipv6_route.ref"
)
+
#
# Step 4
#
@@ -319,13 +337,21 @@ def test_rib_ipv4_step4():
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- logger.info('restore default processing on received attached-bit in LSP on RT1 and RT6')
- tgen.net['rt1'].cmd('vtysh -c "conf t" -c "router isis 1" -c "no attached-bit receive ignore"')
- tgen.net['rt6'].cmd('vtysh -c "conf t" -c "router isis 1" -c "no attached-bit receive ignore"')
-
- for rname in ['rt1', 'rt6']:
+ logger.info(
+ "restore default processing on received attached-bit in LSP on RT1 and RT6"
+ )
+ tgen.net["rt1"].cmd(
+ 'vtysh -c "conf t" -c "router isis 1" -c "no attached-bit receive ignore"'
+ )
+ tgen.net["rt6"].cmd(
+ 'vtysh -c "conf t" -c "router isis 1" -c "no attached-bit receive ignore"'
+ )
+
+ for rname in ["rt1", "rt6"]:
router_compare_json_output(
- rname, "show ip route isis json", "step4/show_ip_route.ref")
+ rname, "show ip route isis json", "step4/show_ip_route.ref"
+ )
+
def test_rib_ipv6_step4():
logger.info("Test (step 4): verify IPv6 RIB")
@@ -335,19 +361,22 @@ def test_rib_ipv6_step4():
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- for rname in ['rt1', 'rt6']:
+ for rname in ["rt1", "rt6"]:
router_compare_json_output(
- rname, "show ipv6 route isis json", "step4/show_ipv6_route.ref")
+ rname, "show ipv6 route isis json", "step4/show_ipv6_route.ref"
+ )
+
# Memory leak test template
def test_memory_leak():
"Run the memory leak test and report results."
tgen = get_topogen()
if not tgen.is_memleak_enabled():
- pytest.skip('Memory leak test/report is disabled')
+ pytest.skip("Memory leak test/report is disabled")
tgen.report_memory_leaks()
-if __name__ == '__main__':
+
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
diff --git a/tests/topotests/isis-rlfa-topo1/test_isis_rlfa_topo1.py b/tests/topotests/isis-rlfa-topo1/test_isis_rlfa_topo1.py
index 4c692841ac..9ad41c5934 100755
--- a/tests/topotests/isis-rlfa-topo1/test_isis_rlfa_topo1.py
+++ b/tests/topotests/isis-rlfa-topo1/test_isis_rlfa_topo1.py
@@ -167,6 +167,7 @@ class TemplateTopo(Topo):
f_in.close()
f_out.close()
+
def setup_module(mod):
"Sets up the pytest environment"
tgen = Topogen(TemplateTopo, mod.__name__)
diff --git a/tests/topotests/isis-sr-te-topo1/test_isis_sr_te_topo1.py b/tests/topotests/isis-sr-te-topo1/test_isis_sr_te_topo1.py
index da59cfe772..6bbb570267 100755
--- a/tests/topotests/isis-sr-te-topo1/test_isis_sr_te_topo1.py
+++ b/tests/topotests/isis-sr-te-topo1/test_isis_sr_te_topo1.py
@@ -85,7 +85,7 @@ from functools import partial
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
# Import topogen and topotest helpers
@@ -101,6 +101,7 @@ pytestmark = [pytest.mark.bgpd, pytest.mark.isisd, pytest.mark.pathd]
class TemplateTopo(Topo):
"Test topology builder"
+
def build(self, *_args, **_opts):
"Build function"
tgen = get_topogen(self)
@@ -108,48 +109,49 @@ class TemplateTopo(Topo):
#
# Define FRR Routers
#
- for router in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6', 'dst']:
+ for router in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6", "dst"]:
tgen.add_router(router)
#
# Define connections
#
- switch = tgen.add_switch('s1')
- switch.add_link(tgen.gears['rt1'], nodeif="eth-sw1")
- switch.add_link(tgen.gears['rt2'], nodeif="eth-sw1")
- switch.add_link(tgen.gears['rt3'], nodeif="eth-sw1")
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["rt1"], nodeif="eth-sw1")
+ switch.add_link(tgen.gears["rt2"], nodeif="eth-sw1")
+ switch.add_link(tgen.gears["rt3"], nodeif="eth-sw1")
+
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4-1")
+ switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2-1")
- switch = tgen.add_switch('s2')
- switch.add_link(tgen.gears['rt2'], nodeif="eth-rt4-1")
- switch.add_link(tgen.gears['rt4'], nodeif="eth-rt2-1")
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4-2")
+ switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2-2")
- switch = tgen.add_switch('s3')
- switch.add_link(tgen.gears['rt2'], nodeif="eth-rt4-2")
- switch.add_link(tgen.gears['rt4'], nodeif="eth-rt2-2")
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5-1")
+ switch.add_link(tgen.gears["rt5"], nodeif="eth-rt3-1")
- switch = tgen.add_switch('s4')
- switch.add_link(tgen.gears['rt3'], nodeif="eth-rt5-1")
- switch.add_link(tgen.gears['rt5'], nodeif="eth-rt3-1")
+ switch = tgen.add_switch("s5")
+ switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5-2")
+ switch.add_link(tgen.gears["rt5"], nodeif="eth-rt3-2")
- switch = tgen.add_switch('s5')
- switch.add_link(tgen.gears['rt3'], nodeif="eth-rt5-2")
- switch.add_link(tgen.gears['rt5'], nodeif="eth-rt3-2")
+ switch = tgen.add_switch("s6")
+ switch.add_link(tgen.gears["rt4"], nodeif="eth-rt5")
+ switch.add_link(tgen.gears["rt5"], nodeif="eth-rt4")
- switch = tgen.add_switch('s6')
- switch.add_link(tgen.gears['rt4'], nodeif="eth-rt5")
- switch.add_link(tgen.gears['rt5'], nodeif="eth-rt4")
+ switch = tgen.add_switch("s7")
+ switch.add_link(tgen.gears["rt4"], nodeif="eth-rt6")
+ switch.add_link(tgen.gears["rt6"], nodeif="eth-rt4")
- switch = tgen.add_switch('s7')
- switch.add_link(tgen.gears['rt4'], nodeif="eth-rt6")
- switch.add_link(tgen.gears['rt6'], nodeif="eth-rt4")
+ switch = tgen.add_switch("s8")
+ switch.add_link(tgen.gears["rt5"], nodeif="eth-rt6")
+ switch.add_link(tgen.gears["rt6"], nodeif="eth-rt5")
- switch = tgen.add_switch('s8')
- switch.add_link(tgen.gears['rt5'], nodeif="eth-rt6")
- switch.add_link(tgen.gears['rt6'], nodeif="eth-rt5")
+ switch = tgen.add_switch("s9")
+ switch.add_link(tgen.gears["rt6"], nodeif="eth-dst")
+ switch.add_link(tgen.gears["dst"], nodeif="eth-rt6")
- switch = tgen.add_switch('s9')
- switch.add_link(tgen.gears['rt6'], nodeif="eth-dst")
- switch.add_link(tgen.gears['dst'], nodeif="eth-rt6")
def setup_module(mod):
"Sets up the pytest environment"
@@ -167,24 +169,21 @@ def setup_module(mod):
# For all registered routers, load the zebra configuration file
for rname, router in router_list.iteritems():
router.load_config(
- TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format(rname))
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_ISIS,
- os.path.join(CWD, '{}/isisd.conf'.format(rname))
+ TopoRouter.RD_ISIS, os.path.join(CWD, "{}/isisd.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_PATH,
- os.path.join(CWD, '{}/pathd.conf'.format(rname))
+ TopoRouter.RD_PATH, os.path.join(CWD, "{}/pathd.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_BGP,
- os.path.join(CWD, '{}/bgpd.conf'.format(rname))
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
)
tgen.start_router()
+
def teardown_module(mod):
"Teardown the pytest environment"
tgen = get_topogen()
@@ -192,6 +191,7 @@ def teardown_module(mod):
# This function tears down the whole topology.
tgen.stop_topology()
+
def setup_testcase(msg):
logger.info(msg)
tgen = get_topogen()
@@ -202,9 +202,11 @@ def setup_testcase(msg):
return tgen
+
def print_cmd_result(rname, command):
print(get_topogen().gears[rname].vtysh_cmd(command, isjson=False))
+
def compare_json_test(router, command, reference, exact):
output = router.vtysh_cmd(command, isjson=True)
result = topotest.json_cmp(output, reference)
@@ -212,9 +214,10 @@ def compare_json_test(router, command, reference, exact):
# Note: topotest.json_cmp() just checks on inclusion of keys.
# For exact matching also compare the other way around.
if not result and exact:
- return topotest.json_cmp(reference, output)
+ return topotest.json_cmp(reference, output)
else:
- return result
+ return result
+
def cmp_json_output(rname, command, reference, exact=False):
"Compare router JSON output"
@@ -222,78 +225,136 @@ def cmp_json_output(rname, command, reference, exact=False):
logger.info('Comparing router "%s" "%s" output', rname, command)
tgen = get_topogen()
- filename = '{}/{}/{}'.format(CWD, rname, reference)
+ filename = "{}/{}/{}".format(CWD, rname, reference)
expected = json.loads(open(filename).read())
# Run test function until we get an result. Wait at most 60 seconds.
- test_func = partial(compare_json_test,
- tgen.gears[rname], command, expected, exact)
+ test_func = partial(compare_json_test, tgen.gears[rname], command, expected, exact)
_, diff = topotest.run_and_expect(test_func, None, count=120, wait=0.5)
assertmsg = '"{}" JSON output mismatches the expected result'.format(rname)
assert diff is None, assertmsg
+
def cmp_json_output_exact(rname, command, reference):
return cmp_json_output(rname, command, reference, True)
-def add_candidate_path(rname, endpoint, pref, name, segment_list='default'):
- get_topogen().net[rname].cmd(''' \
+
+def add_candidate_path(rname, endpoint, pref, name, segment_list="default"):
+ get_topogen().net[rname].cmd(
+ """ \
vtysh -c "conf t" \
-c "segment-routing" \
-c "traffic-eng" \
- -c "policy color 1 endpoint ''' + endpoint + '''" \
- -c "candidate-path preference ''' + str(pref) + ''' name ''' + name + ''' explicit segment-list ''' + segment_list + '''"''')
+ -c "policy color 1 endpoint """
+ + endpoint
+ + """" \
+ -c "candidate-path preference """
+ + str(pref)
+ + """ name """
+ + name
+ + """ explicit segment-list """
+ + segment_list
+ + '''"'''
+ )
+
def delete_candidate_path(rname, endpoint, pref):
- get_topogen().net[rname].cmd(''' \
+ get_topogen().net[rname].cmd(
+ """ \
vtysh -c "conf t" \
-c "segment-routing" \
-c "traffic-eng" \
- -c "policy color 1 endpoint ''' + endpoint + '''" \
- -c "no candidate-path preference ''' + str(pref) + '''"''')
+ -c "policy color 1 endpoint """
+ + endpoint
+ + """" \
+ -c "no candidate-path preference """
+ + str(pref)
+ + '''"'''
+ )
+
def add_segment(rname, name, index, label):
- get_topogen().net[rname].cmd(''' \
+ get_topogen().net[rname].cmd(
+ """ \
vtysh -c "conf t" \
-c "segment-routing" \
-c "traffic-eng" \
- -c "segment-list ''' + name + '''" \
- -c "index ''' + str(index) + ''' mpls label ''' + str(label) + '''"''')
+ -c "segment-list """
+ + name
+ + """" \
+ -c "index """
+ + str(index)
+ + """ mpls label """
+ + str(label)
+ + '''"'''
+ )
+
def delete_segment(rname, name, index):
- get_topogen().net[rname].cmd(''' \
+ get_topogen().net[rname].cmd(
+ """ \
vtysh -c "conf t" \
-c "segment-routing" \
-c "traffic-eng" \
- -c "segment-list ''' + name + '''" \
- -c "no index ''' + str(index) + '''"''')
+ -c "segment-list """
+ + name
+ + """" \
+ -c "no index """
+ + str(index)
+ + '''"'''
+ )
+
def create_sr_policy(rname, endpoint, bsid):
- get_topogen().net[rname].cmd(''' \
+ get_topogen().net[rname].cmd(
+ """ \
vtysh -c "conf t" \
-c "segment-routing" \
-c "traffic-eng" \
- -c "policy color 1 endpoint ''' + endpoint + '''" \
+ -c "policy color 1 endpoint """
+ + endpoint
+ + """" \
-c "name default" \
- -c "binding-sid ''' + str(bsid) + '''"''')
+ -c "binding-sid """
+ + str(bsid)
+ + '''"'''
+ )
+
def delete_sr_policy(rname, endpoint):
- get_topogen().net[rname].cmd(''' \
+ get_topogen().net[rname].cmd(
+ """ \
vtysh -c "conf t" \
-c "segment-routing" \
-c "traffic-eng" \
- -c "no policy color 1 endpoint ''' + endpoint + '''"''')
+ -c "no policy color 1 endpoint """
+ + endpoint
+ + '''"'''
+ )
+
def create_prefix_sid(rname, prefix, sid):
- get_topogen().net[rname].cmd(''' \
+ get_topogen().net[rname].cmd(
+ """ \
vtysh -c "conf t" \
-c "router isis 1" \
- -c "segment-routing prefix ''' + prefix + " index " + str(sid) + '''"''')
+ -c "segment-routing prefix """
+ + prefix
+ + " index "
+ + str(sid)
+ + '''"'''
+ )
+
def delete_prefix_sid(rname, prefix):
- get_topogen().net[rname].cmd(''' \
+ get_topogen().net[rname].cmd(
+ ''' \
vtysh -c "conf t" \
-c "router isis 1" \
- -c "no segment-routing prefix "''' + prefix)
+ -c "no segment-routing prefix "'''
+ + prefix
+ )
+
#
# Step 1
@@ -303,37 +364,42 @@ def delete_prefix_sid(rname, prefix):
def test_srte_init_step1():
setup_testcase("Test (step 1): wait for IS-IS convergence / label distribution")
- for rname in ['rt1', 'rt6']:
- cmp_json_output(rname,
- "show mpls table json",
- "step1/show_mpls_table_without_candidate.ref")
+ for rname in ["rt1", "rt6"]:
+ cmp_json_output(
+ rname, "show mpls table json", "step1/show_mpls_table_without_candidate.ref"
+ )
+
def test_srte_add_candidate_check_mpls_table_step1():
setup_testcase("Test (step 1): check MPLS table regarding the added Candidate Path")
- for rname, endpoint in [('rt1', '6.6.6.6'), ('rt6', '1.1.1.1')]:
- add_candidate_path(rname, endpoint, 100, 'default')
- cmp_json_output(rname,
- "show mpls table json",
- "step1/show_mpls_table_with_candidate.ref")
+ for rname, endpoint in [("rt1", "6.6.6.6"), ("rt6", "1.1.1.1")]:
+ add_candidate_path(rname, endpoint, 100, "default")
+ cmp_json_output(
+ rname, "show mpls table json", "step1/show_mpls_table_with_candidate.ref"
+ )
delete_candidate_path(rname, endpoint, 100)
+
def test_srte_reinstall_sr_policy_check_mpls_table_step1():
- setup_testcase("Test (step 1): check MPLS table after the SR Policy was removed and reinstalled")
+ setup_testcase(
+ "Test (step 1): check MPLS table after the SR Policy was removed and reinstalled"
+ )
- for rname, endpoint, bsid in [('rt1', '6.6.6.6', 1111), ('rt6', '1.1.1.1', 6666)]:
- add_candidate_path(rname, endpoint, 100, 'default')
+ for rname, endpoint, bsid in [("rt1", "6.6.6.6", 1111), ("rt6", "1.1.1.1", 6666)]:
+ add_candidate_path(rname, endpoint, 100, "default")
delete_sr_policy(rname, endpoint)
- cmp_json_output(rname,
- "show mpls table json",
- "step1/show_mpls_table_without_candidate.ref")
+ cmp_json_output(
+ rname, "show mpls table json", "step1/show_mpls_table_without_candidate.ref"
+ )
create_sr_policy(rname, endpoint, bsid)
- add_candidate_path(rname, endpoint, 100, 'default')
- cmp_json_output(rname,
- "show mpls table json",
- "step1/show_mpls_table_with_candidate.ref")
+ add_candidate_path(rname, endpoint, 100, "default")
+ cmp_json_output(
+ rname, "show mpls table json", "step1/show_mpls_table_with_candidate.ref"
+ )
delete_candidate_path(rname, endpoint, 100)
+
#
# Step 2
#
@@ -342,28 +408,41 @@ def test_srte_reinstall_sr_policy_check_mpls_table_step1():
def test_srte_bare_policy_step2():
setup_testcase("Test (step 2): bare SR Policy should not be operational")
- for rname in ['rt1', 'rt6']:
- cmp_json_output_exact(rname,
- "show yang operational-data /frr-pathd:pathd pathd",
- "step2/show_operational_data.ref")
+ for rname in ["rt1", "rt6"]:
+ cmp_json_output_exact(
+ rname,
+ "show yang operational-data /frr-pathd:pathd pathd",
+ "step2/show_operational_data.ref",
+ )
+
def test_srte_add_candidate_check_operational_data_step2():
- setup_testcase("Test (step 2): add single Candidate Path, SR Policy should be operational")
+ setup_testcase(
+ "Test (step 2): add single Candidate Path, SR Policy should be operational"
+ )
+
+ for rname, endpoint in [("rt1", "6.6.6.6"), ("rt6", "1.1.1.1")]:
+ add_candidate_path(rname, endpoint, 100, "default")
+ cmp_json_output(
+ rname,
+ "show yang operational-data /frr-pathd:pathd pathd",
+ "step2/show_operational_data_with_candidate.ref",
+ )
- for rname, endpoint in [('rt1', '6.6.6.6'), ('rt6', '1.1.1.1')]:
- add_candidate_path(rname, endpoint, 100, 'default')
- cmp_json_output(rname,
- "show yang operational-data /frr-pathd:pathd pathd",
- "step2/show_operational_data_with_candidate.ref")
def test_srte_config_remove_candidate_check_operational_data_step2():
- setup_testcase("Test (step 2): remove single Candidate Path, SR Policy should not be operational anymore")
+ setup_testcase(
+ "Test (step 2): remove single Candidate Path, SR Policy should not be operational anymore"
+ )
- for rname, endpoint in [('rt1', '6.6.6.6'), ('rt6', '1.1.1.1')]:
+ for rname, endpoint in [("rt1", "6.6.6.6"), ("rt6", "1.1.1.1")]:
delete_candidate_path(rname, endpoint, 100)
- cmp_json_output_exact(rname,
- "show yang operational-data /frr-pathd:pathd pathd",
- "step2/show_operational_data.ref")
+ cmp_json_output_exact(
+ rname,
+ "show yang operational-data /frr-pathd:pathd pathd",
+ "step2/show_operational_data.ref",
+ )
+
#
# Step 3
@@ -373,53 +452,62 @@ def test_srte_config_remove_candidate_check_operational_data_step2():
def test_srte_add_two_candidates_step3():
setup_testcase("Test (step 3): second Candidate Path has higher Priority")
- for rname, endpoint in [('rt1', '6.6.6.6'), ('rt6', '1.1.1.1')]:
- for pref, cand_name in [('100', 'first'), ('200', 'second')]:
+ for rname, endpoint in [("rt1", "6.6.6.6"), ("rt6", "1.1.1.1")]:
+ for pref, cand_name in [("100", "first"), ("200", "second")]:
add_candidate_path(rname, endpoint, pref, cand_name)
- cmp_json_output(rname,
- "show yang operational-data /frr-pathd:pathd pathd",
- "step3/show_operational_data_with_two_candidates.ref")
+ cmp_json_output(
+ rname,
+ "show yang operational-data /frr-pathd:pathd pathd",
+ "step3/show_operational_data_with_two_candidates.ref",
+ )
# cleanup
- for rname, endpoint in [('rt1', '6.6.6.6'), ('rt6', '1.1.1.1')]:
- for pref in ['100', '200']:
+ for rname, endpoint in [("rt1", "6.6.6.6"), ("rt6", "1.1.1.1")]:
+ for pref in ["100", "200"]:
delete_candidate_path(rname, endpoint, pref)
+
def test_srte_add_two_candidates_with_reverse_priority_step3():
setup_testcase("Test (step 3): second Candidate Path has lower Priority")
# Use reversed priorities here
- for rname, endpoint in [('rt1', '6.6.6.6'), ('rt6', '1.1.1.1')]:
- for pref, cand_name in [('200', 'first'), ('100', 'second')]:
+ for rname, endpoint in [("rt1", "6.6.6.6"), ("rt6", "1.1.1.1")]:
+ for pref, cand_name in [("200", "first"), ("100", "second")]:
add_candidate_path(rname, endpoint, pref, cand_name)
- cmp_json_output(rname,
- "show yang operational-data /frr-pathd:pathd pathd",
- "step3/show_operational_data_with_two_candidates.ref")
+ cmp_json_output(
+ rname,
+ "show yang operational-data /frr-pathd:pathd pathd",
+ "step3/show_operational_data_with_two_candidates.ref",
+ )
# cleanup
- for rname, endpoint in [('rt1', '6.6.6.6'), ('rt6', '1.1.1.1')]:
- for pref in ['100', '200']:
+ for rname, endpoint in [("rt1", "6.6.6.6"), ("rt6", "1.1.1.1")]:
+ for pref in ["100", "200"]:
delete_candidate_path(rname, endpoint, pref)
+
def test_srte_remove_best_candidate_step3():
setup_testcase("Test (step 3): delete the Candidate Path with higher priority")
- for rname, endpoint in [('rt1', '6.6.6.6'), ('rt6', '1.1.1.1')]:
- for pref, cand_name in [('100', 'first'), ('200', 'second')]:
+ for rname, endpoint in [("rt1", "6.6.6.6"), ("rt6", "1.1.1.1")]:
+ for pref, cand_name in [("100", "first"), ("200", "second")]:
add_candidate_path(rname, endpoint, pref, cand_name)
# Delete candidate with higher priority
- for rname, endpoint in [('rt1', '6.6.6.6'), ('rt6', '1.1.1.1')]:
+ for rname, endpoint in [("rt1", "6.6.6.6"), ("rt6", "1.1.1.1")]:
delete_candidate_path(rname, endpoint, 200)
# Candidate with lower priority should get active now
- for rname, endpoint in [('rt1', '6.6.6.6'), ('rt6', '1.1.1.1')]:
- cmp_json_output(rname,
- "show yang operational-data /frr-pathd:pathd pathd",
- "step3/show_operational_data_with_single_candidate.ref")
+ for rname, endpoint in [("rt1", "6.6.6.6"), ("rt6", "1.1.1.1")]:
+ cmp_json_output(
+ rname,
+ "show yang operational-data /frr-pathd:pathd pathd",
+ "step3/show_operational_data_with_single_candidate.ref",
+ )
# cleanup
delete_candidate_path(rname, endpoint, 100)
+
#
# Step 4
#
@@ -428,38 +516,38 @@ def test_srte_remove_best_candidate_step3():
def test_srte_change_segment_list_check_mpls_table_step4():
setup_testcase("Test (step 4): check MPLS table for changed Segment List")
- for rname, endpoint in [('rt1', '6.6.6.6'), ('rt6', '1.1.1.1')]:
- add_candidate_path(rname, endpoint, 100, 'default')
- # now change the segment list name
- add_candidate_path(rname, endpoint, 100, 'default', 'test')
- cmp_json_output(rname,
- "show mpls table json",
- "step4/show_mpls_table.ref")
+ for rname, endpoint in [("rt1", "6.6.6.6"), ("rt6", "1.1.1.1")]:
+ add_candidate_path(rname, endpoint, 100, "default")
+ # now change the segment list name
+ add_candidate_path(rname, endpoint, 100, "default", "test")
+ cmp_json_output(rname, "show mpls table json", "step4/show_mpls_table.ref")
delete_candidate_path(rname, endpoint, 100)
+
def test_srte_segment_list_add_segment_check_mpls_table_step4():
- setup_testcase("Test (step 4): check MPLS table for added (then changed and finally deleted) segment")
+ setup_testcase(
+ "Test (step 4): check MPLS table for added (then changed and finally deleted) segment"
+ )
- add_candidate_path('rt1', '6.6.6.6', 100, 'default', 'test')
+ add_candidate_path("rt1", "6.6.6.6", 100, "default", "test")
# first add a new segment
- add_segment('rt1', 'test', 25, 16050)
- cmp_json_output('rt1',
- "show mpls table json",
- "step4/show_mpls_table_add_segment.ref")
+ add_segment("rt1", "test", 25, 16050)
+ cmp_json_output(
+ "rt1", "show mpls table json", "step4/show_mpls_table_add_segment.ref"
+ )
# ... then change it ...
- add_segment('rt1', 'test', 25, 16030)
- cmp_json_output('rt1',
- "show mpls table json",
- "step4/show_mpls_table_change_segment.ref")
+ add_segment("rt1", "test", 25, 16030)
+ cmp_json_output(
+ "rt1", "show mpls table json", "step4/show_mpls_table_change_segment.ref"
+ )
# ... and finally delete it
- delete_segment('rt1', 'test', 25)
- cmp_json_output('rt1',
- "show mpls table json",
- "step4/show_mpls_table.ref")
- delete_candidate_path('rt1', '6.6.6.6', 100)
+ delete_segment("rt1", "test", 25)
+ cmp_json_output("rt1", "show mpls table json", "step4/show_mpls_table.ref")
+ delete_candidate_path("rt1", "6.6.6.6", 100)
+
#
# Step 5
@@ -467,68 +555,81 @@ def test_srte_segment_list_add_segment_check_mpls_table_step4():
# Checking the nexthop using a single SR Policy and a Candidate Path with configured route-map
#
def test_srte_route_map_with_sr_policy_check_nextop_step5():
- setup_testcase("Test (step 5): recursive nexthop learned through BGP neighbour should be aligned with SR Policy from route-map")
+ setup_testcase(
+ "Test (step 5): recursive nexthop learned through BGP neighbour should be aligned with SR Policy from route-map"
+ )
# (re-)build the SR Policy two times to ensure that reinstalling still works
- for i in [1,2]:
- cmp_json_output('rt1',
- "show ip route bgp json",
- "step5/show_ip_route_bgp_inactive_srte.ref")
+ for i in [1, 2]:
+ cmp_json_output(
+ "rt1", "show ip route bgp json", "step5/show_ip_route_bgp_inactive_srte.ref"
+ )
- delete_sr_policy('rt1', '6.6.6.6')
- cmp_json_output('rt1',
- "show ip route bgp json",
- "step5/show_ip_route_bgp_inactive_srte.ref")
+ delete_sr_policy("rt1", "6.6.6.6")
+ cmp_json_output(
+ "rt1", "show ip route bgp json", "step5/show_ip_route_bgp_inactive_srte.ref"
+ )
- create_sr_policy('rt1', '6.6.6.6', 1111)
- cmp_json_output('rt1',
- "show ip route bgp json",
- "step5/show_ip_route_bgp_inactive_srte.ref")
+ create_sr_policy("rt1", "6.6.6.6", 1111)
+ cmp_json_output(
+ "rt1", "show ip route bgp json", "step5/show_ip_route_bgp_inactive_srte.ref"
+ )
+
+ add_candidate_path("rt1", "6.6.6.6", 100, "default")
+ cmp_json_output(
+ "rt1", "show ip route bgp json", "step5/show_ip_route_bgp_active_srte.ref"
+ )
- add_candidate_path('rt1', '6.6.6.6', 100, 'default')
- cmp_json_output('rt1',
- "show ip route bgp json",
- "step5/show_ip_route_bgp_active_srte.ref")
+ delete_candidate_path("rt1", "6.6.6.6", 100)
- delete_candidate_path('rt1', '6.6.6.6', 100)
def test_srte_route_map_with_sr_policy_reinstall_prefix_sid_check_nextop_step5():
- setup_testcase("Test (step 5): remove and re-install prefix SID on fist path element and check SR Policy activity")
+ setup_testcase(
+ "Test (step 5): remove and re-install prefix SID on fist path element and check SR Policy activity"
+ )
# first add a candidate path so the SR Policy is active
- add_candidate_path('rt1', '6.6.6.6', 100, 'default')
- cmp_json_output('rt1',
- "show yang operational-data /frr-pathd:pathd pathd",
- "step5/show_operational_data_active.ref")
+ add_candidate_path("rt1", "6.6.6.6", 100, "default")
+ cmp_json_output(
+ "rt1",
+ "show yang operational-data /frr-pathd:pathd pathd",
+ "step5/show_operational_data_active.ref",
+ )
# delete prefix SID from first element of the configured path and check
# if the SR Policy is inactive since the label can't be resolved anymore
- delete_prefix_sid('rt5', "5.5.5.5/32")
- cmp_json_output('rt1',
- "show yang operational-data /frr-pathd:pathd pathd",
- "step5/show_operational_data_inactive.ref")
- cmp_json_output('rt1',
- "show ip route bgp json",
- "step5/show_ip_route_bgp_inactive_srte.ref")
+ delete_prefix_sid("rt5", "5.5.5.5/32")
+ cmp_json_output(
+ "rt1",
+ "show yang operational-data /frr-pathd:pathd pathd",
+ "step5/show_operational_data_inactive.ref",
+ )
+ cmp_json_output(
+ "rt1", "show ip route bgp json", "step5/show_ip_route_bgp_inactive_srte.ref"
+ )
# re-create the prefix SID and check if the SR Policy is active
- create_prefix_sid('rt5', "5.5.5.5/32", 50)
- cmp_json_output('rt1',
- "show yang operational-data /frr-pathd:pathd pathd",
- "step5/show_operational_data_active.ref")
- cmp_json_output('rt1',
- "show ip route bgp json",
- "step5/show_ip_route_bgp_active_srte.ref")
+ create_prefix_sid("rt5", "5.5.5.5/32", 50)
+ cmp_json_output(
+ "rt1",
+ "show yang operational-data /frr-pathd:pathd pathd",
+ "step5/show_operational_data_active.ref",
+ )
+ cmp_json_output(
+ "rt1", "show ip route bgp json", "step5/show_ip_route_bgp_active_srte.ref"
+ )
+
# Memory leak test template
def test_memory_leak():
"Run the memory leak test and report results."
tgen = get_topogen()
if not tgen.is_memleak_enabled():
- pytest.skip('Memory leak test/report is disabled')
+ pytest.skip("Memory leak test/report is disabled")
tgen.report_memory_leaks()
-if __name__ == '__main__':
+
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
diff --git a/tests/topotests/isis-sr-topo1/test_isis_sr_topo1.py b/tests/topotests/isis-sr-topo1/test_isis_sr_topo1.py
index 8a7d6ee882..148a89474e 100644
--- a/tests/topotests/isis-sr-topo1/test_isis_sr_topo1.py
+++ b/tests/topotests/isis-sr-topo1/test_isis_sr_topo1.py
@@ -86,6 +86,7 @@ from mininet.topo import Topo
pytestmark = [pytest.mark.isisd]
+
class TemplateTopo(Topo):
"Test topology builder"
diff --git a/tests/topotests/isis-tilfa-topo1/test_isis_tilfa_topo1.py b/tests/topotests/isis-tilfa-topo1/test_isis_tilfa_topo1.py
index 39b3299603..00cb623999 100755
--- a/tests/topotests/isis-tilfa-topo1/test_isis_tilfa_topo1.py
+++ b/tests/topotests/isis-tilfa-topo1/test_isis_tilfa_topo1.py
@@ -179,6 +179,7 @@ class TemplateTopo(Topo):
f_in.close()
f_out.close()
+
def setup_module(mod):
"Sets up the pytest environment"
tgen = Topogen(TemplateTopo, mod.__name__)
diff --git a/tests/topotests/isis-topo1-vrf/test_isis_topo1_vrf.py b/tests/topotests/isis-topo1-vrf/test_isis_topo1_vrf.py
index 7bc694cbb0..24287ccd44 100644
--- a/tests/topotests/isis-topo1-vrf/test_isis_topo1_vrf.py
+++ b/tests/topotests/isis-topo1-vrf/test_isis_topo1_vrf.py
@@ -45,6 +45,7 @@ from mininet.topo import Topo
pytestmark = [pytest.mark.isisd]
+
class ISISTopo1(Topo):
"Simple two layer ISIS vrf topology"
diff --git a/tests/topotests/isis-topo1/test_isis_topo1.py b/tests/topotests/isis-topo1/test_isis_topo1.py
index 6963429288..6ec3bf9ea1 100644
--- a/tests/topotests/isis-topo1/test_isis_topo1.py
+++ b/tests/topotests/isis-topo1/test_isis_topo1.py
@@ -47,6 +47,7 @@ from mininet.topo import Topo
pytestmark = [pytest.mark.isisd]
+
class ISISTopo1(Topo):
"Simple two layer ISIS topology"
diff --git a/tests/topotests/ldp-oc-acl-topo1/test_ldp_oc_acl_topo1.py b/tests/topotests/ldp-oc-acl-topo1/test_ldp_oc_acl_topo1.py
index a74e80dd80..9aa4024598 100644
--- a/tests/topotests/ldp-oc-acl-topo1/test_ldp_oc_acl_topo1.py
+++ b/tests/topotests/ldp-oc-acl-topo1/test_ldp_oc_acl_topo1.py
@@ -80,6 +80,7 @@ from mininet.topo import Topo
pytestmark = [pytest.mark.ldpd, pytest.mark.ospfd]
+
class TemplateTopo(Topo):
"Test topology builder"
diff --git a/tests/topotests/ldp-oc-topo1/test_ldp_oc_topo1.py b/tests/topotests/ldp-oc-topo1/test_ldp_oc_topo1.py
index 97790487f2..aef22c395d 100644
--- a/tests/topotests/ldp-oc-topo1/test_ldp_oc_topo1.py
+++ b/tests/topotests/ldp-oc-topo1/test_ldp_oc_topo1.py
@@ -80,6 +80,7 @@ from mininet.topo import Topo
pytestmark = [pytest.mark.ldpd, pytest.mark.ospfd]
+
class TemplateTopo(Topo):
"Test topology builder"
diff --git a/tests/topotests/ldp-sync-ospf-topo1/test_ldp_sync_ospf_topo1.py b/tests/topotests/ldp-sync-ospf-topo1/test_ldp_sync_ospf_topo1.py
index 99c831c8cf..57b45e5fdf 100644
--- a/tests/topotests/ldp-sync-ospf-topo1/test_ldp_sync_ospf_topo1.py
+++ b/tests/topotests/ldp-sync-ospf-topo1/test_ldp_sync_ospf_topo1.py
@@ -81,6 +81,7 @@ from mininet.topo import Topo
pytestmark = [pytest.mark.ldpd, pytest.mark.ospfd]
+
class TemplateTopo(Topo):
"Test topology builder"
diff --git a/tests/topotests/ldp-vpls-topo1/test_ldp_vpls_topo1.py b/tests/topotests/ldp-vpls-topo1/test_ldp_vpls_topo1.py
index fe57f3707a..0ea7aca3eb 100644
--- a/tests/topotests/ldp-vpls-topo1/test_ldp_vpls_topo1.py
+++ b/tests/topotests/ldp-vpls-topo1/test_ldp_vpls_topo1.py
@@ -82,6 +82,7 @@ from mininet.topo import Topo
pytestmark = [pytest.mark.ldpd, pytest.mark.ospfd]
+
class TemplateTopo(Topo):
"Test topology builder"
diff --git a/tests/topotests/lib/ospf.py b/tests/topotests/lib/ospf.py
index 5bc9f14fea..79e4d97448 100644
--- a/tests/topotests/lib/ospf.py
+++ b/tests/topotests/lib/ospf.py
@@ -94,7 +94,9 @@ def create_router_ospf(tgen, topo, input_dict=None, build=False, load_config=Tru
return result
-def __create_ospf_global(tgen, input_dict, router, build=False, load_config=True, ospf="ospf"):
+def __create_ospf_global(
+ tgen, input_dict, router, build=False, load_config=True, ospf="ospf"
+):
"""
Helper API to create ospf global configuration.
diff --git a/tests/topotests/lib/pim.py b/tests/topotests/lib/pim.py
index 294f60bf68..d07b58a774 100644
--- a/tests/topotests/lib/pim.py
+++ b/tests/topotests/lib/pim.py
@@ -1564,26 +1564,30 @@ def verify_pim_interface(tgen, topo, dut, interface=None, interface_ip=None):
logger.info("[DUT: %s]: Verifying PIM interface status:", dut)
rnode = tgen.routers()[dut]
- show_ip_pim_interface_json = rnode.\
- vtysh_cmd("show ip pim interface json", isjson=True)
+ show_ip_pim_interface_json = rnode.vtysh_cmd(
+ "show ip pim interface json", isjson=True
+ )
- logger.info("show_ip_pim_interface_json: \n %s",
- show_ip_pim_interface_json)
+ logger.info("show_ip_pim_interface_json: \n %s", show_ip_pim_interface_json)
if interface_ip:
if interface in show_ip_pim_interface_json:
pim_intf_json = show_ip_pim_interface_json[interface]
if pim_intf_json["address"] != interface_ip:
- errormsg = ("[DUT %s]: PIM interface "
- "ip is not correct "
- "[FAILED]!! Expected : %s, Found : %s"
- %(dut, pim_intf_json["address"],interface_ip))
+ errormsg = (
+ "[DUT %s]: PIM interface "
+ "ip is not correct "
+ "[FAILED]!! Expected : %s, Found : %s"
+ % (dut, pim_intf_json["address"], interface_ip)
+ )
return errormsg
else:
- logger.info("[DUT %s]: PIM interface "
- "ip is correct "
- "[Passed]!! Expected : %s, Found : %s"
- %(dut, pim_intf_json["address"],interface_ip))
+ logger.info(
+ "[DUT %s]: PIM interface "
+ "ip is correct "
+ "[Passed]!! Expected : %s, Found : %s"
+ % (dut, pim_intf_json["address"], interface_ip)
+ )
return True
else:
for destLink, data in topo["routers"][dut]["links"].items():
@@ -1595,24 +1599,36 @@ def verify_pim_interface(tgen, topo, dut, interface=None, interface_ip=None):
pim_intf_ip = data["ipv4"].split("/")[0]
if pim_interface in show_ip_pim_interface_json:
- pim_intf_json = show_ip_pim_interface_json\
- [pim_interface]
+ pim_intf_json = show_ip_pim_interface_json[pim_interface]
# Verifying PIM interface
- if pim_intf_json["address"] != pim_intf_ip and \
- pim_intf_json["state"] != "up":
- errormsg = ("[DUT %s]: PIM interface: %s "
- "PIM interface ip: %s, status check "
- "[FAILED]!! Expected : %s, Found : %s"
- %(dut, pim_interface, pim_intf_ip,
- pim_interface, pim_intf_json["state"]))
+ if (
+ pim_intf_json["address"] != pim_intf_ip
+ and pim_intf_json["state"] != "up"
+ ):
+ errormsg = (
+ "[DUT %s]: PIM interface: %s "
+ "PIM interface ip: %s, status check "
+ "[FAILED]!! Expected : %s, Found : %s"
+ % (
+ dut,
+ pim_interface,
+ pim_intf_ip,
+ pim_interface,
+ pim_intf_json["state"],
+ )
+ )
return errormsg
- logger.info("[DUT %s]: PIM interface: %s, "
- "interface ip: %s, status: %s"
- " [PASSED]!!",
- dut, pim_interface, pim_intf_ip,
- pim_intf_json["state"])
+ logger.info(
+ "[DUT %s]: PIM interface: %s, "
+ "interface ip: %s, status: %s"
+ " [PASSED]!!",
+ dut,
+ pim_interface,
+ pim_intf_ip,
+ pim_intf_json["state"],
+ )
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return True
@@ -3420,30 +3436,36 @@ def verify_igmp_interface(tgen, topo, dut, igmp_iface, interface_ip):
if router != dut:
continue
- logger.info("[DUT: %s]: Verifying PIM interface status:",
- dut)
+ logger.info("[DUT: %s]: Verifying PIM interface status:", dut)
rnode = tgen.routers()[dut]
- show_ip_igmp_interface_json = \
- run_frr_cmd(rnode, "show ip igmp interface json", isjson=True)
+ show_ip_igmp_interface_json = run_frr_cmd(
+ rnode, "show ip igmp interface json", isjson=True
+ )
- if igmp_iface in show_ip_igmp_interface_json:
+ if igmp_iface in show_ip_igmp_interface_json:
igmp_intf_json = show_ip_igmp_interface_json[igmp_iface]
# Verifying igmp interface
- if igmp_intf_json["address"] != interface_ip:
- errormsg = ("[DUT %s]: igmp interface ip is not correct "
- "[FAILED]!! Expected : %s, Found : %s"
- %(dut, igmp_intf_json["address"], interface_ip))
+ if igmp_intf_json["address"] != interface_ip:
+ errormsg = (
+ "[DUT %s]: igmp interface ip is not correct "
+ "[FAILED]!! Expected : %s, Found : %s"
+ % (dut, igmp_intf_json["address"], interface_ip)
+ )
return errormsg
- logger.info("[DUT %s]: igmp interface: %s, "
- "interface ip: %s"
- " [PASSED]!!",
- dut, igmp_iface, interface_ip)
+ logger.info(
+ "[DUT %s]: igmp interface: %s, " "interface ip: %s" " [PASSED]!!",
+ dut,
+ igmp_iface,
+ interface_ip,
+ )
else:
- errormsg = ("[DUT %s]: igmp interface: %s "
- "igmp interface ip: %s, is not present "
- %(dut, igmp_iface, interface_ip))
+ errormsg = (
+ "[DUT %s]: igmp interface: %s "
+ "igmp interface ip: %s, is not present "
+ % (dut, igmp_iface, interface_ip)
+ )
return errormsg
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
diff --git a/tests/topotests/lib/snmptest.py b/tests/topotests/lib/snmptest.py
index 5112500e0b..e6b140a0e2 100644
--- a/tests/topotests/lib/snmptest.py
+++ b/tests/topotests/lib/snmptest.py
@@ -86,12 +86,11 @@ class SnmpTester(object):
def _get_snmp_oid(snmp_output):
tokens = snmp_output.strip().split()
-# if len(tokens) > 5:
-# return None
-
+ # if len(tokens) > 5:
+ # return None
# third token is the value of the object
- return tokens[0].split('.',1)[1]
+ return tokens[0].split(".", 1)[1]
def _parse_multiline(self, snmp_output):
results = snmp_output.strip().split("\r\n")
@@ -142,7 +141,11 @@ class SnmpTester(object):
print("FAIL: missing oid key {}".format(oid))
return False
if results_dict[oid] != values[index]:
- print("FAIL{} {} |{}| == |{}|".format(oid, index, results_dict[oid], values[index]))
+ print(
+ "FAIL{} {} |{}| == |{}|".format(
+ oid, index, results_dict[oid], values[index]
+ )
+ )
return False
index += 1
return True
diff --git a/tests/topotests/lib/topogen.py b/tests/topotests/lib/topogen.py
index f958cc11d3..553f2bc6cf 100644
--- a/tests/topotests/lib/topogen.py
+++ b/tests/topotests/lib/topogen.py
@@ -1006,7 +1006,7 @@ def diagnose_env_linux():
if not os.path.isdir("/tmp"):
logger.warning("could not find /tmp for logs")
else:
- os.system("mkdir /tmp/topotests")
+ os.system("mkdir -p /tmp/topotests")
# Log diagnostics to file so it can be examined later.
fhandler = logging.FileHandler(filename="/tmp/topotests/diagnostics.txt")
fhandler.setLevel(logging.DEBUG)
diff --git a/tests/topotests/lib/topotest.py b/tests/topotests/lib/topotest.py
index 1e6ef1b2b3..60f6c9f943 100644
--- a/tests/topotests/lib/topotest.py
+++ b/tests/topotests/lib/topotest.py
@@ -1424,7 +1424,7 @@ class Router(Node):
zebra_option = self.daemons_options["zebra"]
self.cmd(
"ASAN_OPTIONS=log_path=zebra.asan {0} {1} --log file:zebra.log --log-level debug -s 90000000 -d > zebra.out 2> zebra.err".format(
- zebra_path, zebra_option, self.logdir, self.name
+ zebra_path, zebra_option
)
)
logger.debug("{}: {} zebra started".format(self, self.routertype))
@@ -1439,7 +1439,7 @@ class Router(Node):
staticd_option = self.daemons_options["staticd"]
self.cmd(
"ASAN_OPTIONS=log_path=staticd.asan {0} {1} --log file:staticd.log --log-level debug -d > staticd.out 2> staticd.err".format(
- staticd_path, staticd_option, self.logdir, self.name
+ staticd_path, staticd_option
)
)
logger.debug("{}: {} staticd started".format(self, self.routertype))
@@ -1831,8 +1831,8 @@ class LinuxRouter(Router):
class FreeBSDRouter(Router):
"A FreeBSD Router Node with IPv4/IPv6 forwarding enabled."
- def __init__(eslf, name, **params):
- Router.__init__(Self, name, **params)
+ def __init__(self, name, **params):
+ Router.__init__(self, name, **params)
class LegacySwitch(OVSSwitch):
diff --git a/tests/topotests/multicast-pim-bsm-topo1/test_mcast_pim_bsmp_01.py b/tests/topotests/multicast-pim-bsm-topo1/test_mcast_pim_bsmp_01.py
index ef01880381..96c4ea3646 100644
--- a/tests/topotests/multicast-pim-bsm-topo1/test_mcast_pim_bsmp_01.py
+++ b/tests/topotests/multicast-pim-bsm-topo1/test_mcast_pim_bsmp_01.py
@@ -642,10 +642,13 @@ def test_BSR_CRP_with_blackhole_address_p1(request):
next_hop_rp = topo["routers"]["f1"]["links"]["i1"]["ipv4"].split("/")[0]
next_hop_lhr = topo["routers"]["i1"]["links"]["l1"]["ipv4"].split("/")[0]
+ next_hop_fhr = topo["routers"]["i1"]["links"]["f1"]["ipv4"].split("/")[0]
+ CRP = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet9"]["candidate_rp"]
input_dict = {
"i1": {"static_routes": [{"network": BSR1_ADDR, "next_hop": next_hop_rp}]},
"l1": {"static_routes": [{"network": BSR1_ADDR, "next_hop": next_hop_lhr}]},
+ "f1": {"static_routes": [{"network": CRP, "next_hop": next_hop_fhr, "delete": True}]},
}
result = create_static_routes(tgen, input_dict)
@@ -654,7 +657,6 @@ def test_BSR_CRP_with_blackhole_address_p1(request):
# Use scapy to send pre-defined packet from senser to receiver
group = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet9"]["group"]
- CRP = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet9"]["candidate_rp"]
step("waiting for BSR to timeout before configuring blackhole route")
clear_bsrp_data(tgen, topo)
@@ -706,7 +708,7 @@ def test_BSR_CRP_with_blackhole_address_p1(request):
"f1": {
"static_routes": [
{"network": [BSR1_ADDR, CRP], "next_hop": "blackhole", "delete": True},
- {"network": BSR1_ADDR, "next_hop": NEXT_HOP1}
+ {"network": BSR1_ADDR, "next_hop": NEXT_HOP1},
]
}
}
@@ -1118,7 +1120,10 @@ def test_static_rp_override_p1(request):
"l1": {
"pim": {
"rp": [
- {"rp_addr": "33.33.33.33", "group_addr_range": ["225.1.1.1/32"],}
+ {
+ "rp_addr": "33.33.33.33",
+ "group_addr_range": ["225.1.1.1/32"],
+ }
]
}
}
@@ -1294,7 +1299,8 @@ def test_bsmp_stress_add_del_restart_p2(request):
assert (
rp_add1 == rp2[group]
), "Testcase {} :Failed \n Error : rp expected {} rp received {}".format(
- tc_name, rp_add1,
+ tc_name,
+ rp_add1,
)
# Verify if that rp is installed
@@ -1632,7 +1638,14 @@ def test_iif_join_state_p0(request):
# Add back route for RP to make it reachable
step("Add back route for RP to make it reachable")
input_dict = {
- "l1": {"static_routes": [{"network": rp_ip, "next_hop": next_hop_lhr,}]}
+ "l1": {
+ "static_routes": [
+ {
+ "network": rp_ip,
+ "next_hop": next_hop_lhr,
+ }
+ ]
+ }
}
result = create_static_routes(tgen, input_dict)
assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
diff --git a/tests/topotests/multicast-pim-bsm-topo2/test_mcast_pim_bsmp_02.py b/tests/topotests/multicast-pim-bsm-topo2/test_mcast_pim_bsmp_02.py
index 459afb5a02..8bd91401c3 100644
--- a/tests/topotests/multicast-pim-bsm-topo2/test_mcast_pim_bsmp_02.py
+++ b/tests/topotests/multicast-pim-bsm-topo2/test_mcast_pim_bsmp_02.py
@@ -695,7 +695,8 @@ def test_RP_priority_p0(request):
assert (
rp_add1 == rp2[group]
), "Testcase {} :Failed \n Error : rp expected {} rp received {}".format(
- tc_name, rp_add1,
+ tc_name,
+ rp_add1,
)
# Verify if that rp is installed
diff --git a/tests/topotests/multicast-pim-sm-topo1/test_multicast_pim_sm_topo1.py b/tests/topotests/multicast-pim-sm-topo1/test_multicast_pim_sm_topo1.py
index ac675c5c2f..e55e30270d 100755
--- a/tests/topotests/multicast-pim-sm-topo1/test_multicast_pim_sm_topo1.py
+++ b/tests/topotests/multicast-pim-sm-topo1/test_multicast_pim_sm_topo1.py
@@ -102,7 +102,7 @@ from lib.pim import (
clear_ip_mroute,
clear_ip_pim_interface_traffic,
verify_igmp_config,
- clear_ip_mroute_verify
+ clear_ip_mroute_verify,
)
from lib.topolog import logger
from lib.topojson import build_topo_from_json, build_config_from_json
diff --git a/tests/topotests/multicast-pim-sm-topo2/test_multicast_pim_sm_topo2.py b/tests/topotests/multicast-pim-sm-topo2/test_multicast_pim_sm_topo2.py
index a9d914da57..7e409c2a05 100755
--- a/tests/topotests/multicast-pim-sm-topo2/test_multicast_pim_sm_topo2.py
+++ b/tests/topotests/multicast-pim-sm-topo2/test_multicast_pim_sm_topo2.py
@@ -877,7 +877,7 @@ def test_verify_SPT_switchover_when_RPT_and_SPT_path_is_different_p0(request):
data["src_address"],
_IGMP_JOIN_RANGE,
data["iif"],
- data["oil"]
+ data["oil"],
)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
@@ -1122,8 +1122,9 @@ def test_verify_mroute_after_shut_noshut_of_upstream_interface_p1(request):
done_flag = False
for retry in range(1, 11):
- result = verify_upstream_iif(tgen, "l1", "Unknown", source, IGMP_JOIN_RANGE_2,
- expected=False)
+ result = verify_upstream_iif(
+ tgen, "l1", "Unknown", source, IGMP_JOIN_RANGE_2, expected=False
+ )
if result is not True:
done_flag = True
else:
@@ -1515,7 +1516,7 @@ def test_verify_mroute_when_FRR_is_FHR_and_LHR_p0(request):
_IGMP_JOIN_RANGE,
data["iif"],
data["oil"],
- expected=False
+ expected=False,
)
if result is not True:
done_flag = True
@@ -1928,9 +1929,10 @@ def test_verify_oil_iif_for_mroute_after_shut_noshut_source_interface_p1(request
"f1-i8-eth2",
expected=False,
)
- assert result is not True, (
- "Testcase {} : Failed \n mroutes are"
- " still present \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n mroutes are" " still present \n Error: {}".format(
+ tc_name, result
)
logger.info("Expected Behavior: {}".format(result))
diff --git a/tests/topotests/multicast-pim-sm-topo3/test_multicast_pim_sm_topo3.py b/tests/topotests/multicast-pim-sm-topo3/test_multicast_pim_sm_topo3.py
index fdceb77fd1..ad9a2fefde 100755
--- a/tests/topotests/multicast-pim-sm-topo3/test_multicast_pim_sm_topo3.py
+++ b/tests/topotests/multicast-pim-sm-topo3/test_multicast_pim_sm_topo3.py
@@ -597,9 +597,10 @@ def test_verify_oil_when_join_prune_sent_scenario_1_p1(request):
input_traffic = {"l1": {"traffic_sent": [intf_l1_i1]}}
result = verify_multicast_traffic(tgen, input_traffic, expected=False)
- assert result is not True, (
- "Testcase {} : Failed \n "
- " Traffic is not stopped yet \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n " " Traffic is not stopped yet \n Error: {}".format(
+ tc_name, result
)
logger.info("Expected Behaviour: {}".format(result))
@@ -612,9 +613,10 @@ def test_verify_oil_when_join_prune_sent_scenario_1_p1(request):
result = verify_igmp_groups(
tgen, dut, intf_l1_i1, IGMP_JOIN_RANGE_1, expected=False
)
- assert result is not True, (
- "Testcase {} : Failed \n "
- "IGMP groups are not deleted \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n " "IGMP groups are not deleted \n Error: {}".format(
+ tc_name, result
)
logger.info("Expected Behaviour: {}".format(result))
@@ -655,9 +657,10 @@ def test_verify_oil_when_join_prune_sent_scenario_1_p1(request):
data["oil"],
expected=False,
)
- assert result is not True, (
- "Testcase {} : Failed \n "
- "mroutes are still present \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n " "mroutes are still present \n Error: {}".format(
+ tc_name, result
)
logger.info("Expected Behaviour: {}".format(result))
@@ -722,9 +725,10 @@ def test_verify_oil_when_join_prune_sent_scenario_1_p1(request):
input_traffic = {"f1": {"traffic_sent": [intf_f1_i8]}}
result = verify_multicast_traffic(tgen, input_traffic, expected=False)
- assert result is not True, (
- "Testcase {} : Failed \n "
- " Traffic is not stopped yet \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n " " Traffic is not stopped yet \n Error: {}".format(
+ tc_name, result
)
logger.info("Expected Behaviour: {}".format(result))
@@ -737,9 +741,10 @@ def test_verify_oil_when_join_prune_sent_scenario_1_p1(request):
result = verify_igmp_groups(
tgen, dut, intf_f1_i8, IGMP_JOIN_RANGE_1, expected=False
)
- assert result is not True, (
- "Testcase {} : Failed \n "
- "IGMP groups are not deleted \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n " "IGMP groups are not deleted \n Error: {}".format(
+ tc_name, result
)
logger.info("Expected Behaviour: {}".format(result))
@@ -775,9 +780,10 @@ def test_verify_oil_when_join_prune_sent_scenario_1_p1(request):
data["oil"],
expected=False,
)
- assert result is not True, (
- "Testcase {} : Failed \n "
- "mroutes are still present \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n " "mroutes are still present \n Error: {}".format(
+ tc_name, result
)
logger.info("Expected Behaviour: {}".format(result))
@@ -959,9 +965,10 @@ def test_verify_oil_when_join_prune_sent_scenario_2_p1(request):
data["oil"],
expected=False,
)
- assert result is not True, (
- "Testcase {} : Failed \n "
- "mroutes are still present \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n " "mroutes are still present \n Error: {}".format(
+ tc_name, result
)
logger.info("Expected Behaviour: {}".format(result))
@@ -1022,9 +1029,10 @@ def test_verify_oil_when_join_prune_sent_scenario_2_p1(request):
data["oil"],
expected=False,
)
- assert result is not True, (
- "Testcase {} : Failed \n "
- "mroutes are still present \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n " "mroutes are still present \n Error: {}".format(
+ tc_name, result
)
logger.info("Expected Behaviour: {}".format(result))
@@ -1190,9 +1198,10 @@ def test_shut_noshut_source_interface_when_upstream_cleared_from_LHR_p1(request)
result = verify_ip_mroutes(
tgen, "f1", source_i2, IGMP_JOIN_RANGE_1, intf_f1_i2, intf_f1_r2, expected=False
)
- assert result is not True, (
- "Testcase {} : Failed \n mroutes are"
- " still present \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n mroutes are" " still present \n Error: {}".format(
+ tc_name, result
)
logger.info("Expected Behavior: {}".format(result))
@@ -1630,7 +1639,14 @@ def test_verify_remove_add_igmp_config_to_receiver_interface_p0(request):
input_dict_2 = {
"l1": {
"igmp": {
- "interfaces": {intf_l1_i1: {"igmp": {"version": "2", "delete": True,}}}
+ "interfaces": {
+ intf_l1_i1: {
+ "igmp": {
+ "version": "2",
+ "delete": True,
+ }
+ }
+ }
}
}
}
@@ -1642,9 +1658,10 @@ def test_verify_remove_add_igmp_config_to_receiver_interface_p0(request):
dut = "l1"
interface = topo["routers"]["l1"]["links"]["i1"]["interface"]
result = verify_igmp_groups(tgen, dut, interface, IGMP_JOIN_RANGE_1, expected=False)
- assert result is not True, (
- "Testcase {} : Failed \n Groups are not"
- " present \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Groups are not" " present \n Error: {}".format(
+ tc_name, result
)
logger.info("Expected Behaviour: {}".format(result))
@@ -1712,7 +1729,14 @@ def test_verify_remove_add_igmp_config_to_receiver_interface_p0(request):
input_dict_2 = {
"l1": {
"igmp": {
- "interfaces": {intf_l1_i1: {"igmp": {"version": "2", "delete": True,}}}
+ "interfaces": {
+ intf_l1_i1: {
+ "igmp": {
+ "version": "2",
+ "delete": True,
+ }
+ }
+ }
}
}
}
@@ -1725,9 +1749,10 @@ def test_verify_remove_add_igmp_config_to_receiver_interface_p0(request):
dut = "l1"
interface = topo["routers"]["l1"]["links"]["i1"]["interface"]
result = verify_igmp_groups(tgen, dut, interface, IGMP_JOIN_RANGE_1, expected=False)
- assert result is not True, (
- "Testcase {} : Failed \n Groups are not"
- " present \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Groups are not" " present \n Error: {}".format(
+ tc_name, result
)
logger.info("Expected Behaviour: {}".format(result))
@@ -1811,7 +1836,14 @@ def test_verify_remove_add_igmp_config_to_receiver_interface_p0(request):
input_dict_2 = {
"l1": {
"igmp": {
- "interfaces": {intf_l1_i1: {"igmp": {"version": "2", "delete": True,}}}
+ "interfaces": {
+ intf_l1_i1: {
+ "igmp": {
+ "version": "2",
+ "delete": True,
+ }
+ }
+ }
}
}
}
@@ -1831,9 +1863,10 @@ def test_verify_remove_add_igmp_config_to_receiver_interface_p0(request):
result = verify_ip_mroutes(
tgen, dut, source, IGMP_JOIN_RANGE_1, iif, oil, expected=False
)
- assert result is not True, (
- "Testcase {} : Failed \n routes are still"
- " present \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n routes are still" " present \n Error: {}".format(
+ tc_name, result
)
logger.info("Expected Behaviour: {}".format(result))
@@ -1995,7 +2028,14 @@ def test_verify_remove_add_igmp_commands_when_pim_configured_p0(request):
input_dict_2 = {
"l1": {
"igmp": {
- "interfaces": {intf_l1_i1: {"igmp": {"version": "2", "delete": True,}}}
+ "interfaces": {
+ intf_l1_i1: {
+ "igmp": {
+ "version": "2",
+ "delete": True,
+ }
+ }
+ }
}
}
}
@@ -2009,9 +2049,10 @@ def test_verify_remove_add_igmp_commands_when_pim_configured_p0(request):
)
result = verify_igmp_config(tgen, input_dict_1, expected=False)
- assert result is not True, (
- "Testcase {} : Failed \n "
- "IGMP interface is not removed \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n " "IGMP interface is not removed \n Error: {}".format(
+ tc_name, result
)
logger.info("Expected Behaviour: {}".format(result))
@@ -2914,9 +2955,10 @@ def test_mroute_after_removing_RP_sending_IGMP_prune_p2(request):
data["oil"],
expected=False,
)
- assert result is not True, (
- "Testcase {} : Failed \n "
- "mroute still present \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n " "mroute still present \n Error: {}".format(
+ tc_name, result
)
logger.info("Expected Behaviour: {}".format(result))
@@ -3259,9 +3301,10 @@ def test_prune_sent_to_LHR_and_FHR_when_PIMnbr_down_p2(request):
data["oil"],
expected=False,
)
- assert result is not True, (
- "Testcase {} : Failed \n "
- "mroute still present \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n " "mroute still present \n Error: {}".format(
+ tc_name, result
)
logger.info("Expected Behaviour: {}".format(result))
@@ -3287,9 +3330,10 @@ def test_prune_sent_to_LHR_and_FHR_when_PIMnbr_down_p2(request):
IGMP_JOIN_RANGE_1,
expected=False,
)
- assert result is not True, (
- "Testcase {} : Failed \n "
- "upstream still present \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n " "upstream still present \n Error: {}".format(
+ tc_name, result
)
logger.info("Expected Behaviour: {}".format(result))
@@ -3311,9 +3355,10 @@ def test_prune_sent_to_LHR_and_FHR_when_PIMnbr_down_p2(request):
result = verify_pim_rp_info(
tgen, topo, dut, GROUP_RANGE_1, "Unknown", rp_address, SOURCE, expected=False
)
- assert result is not True, (
- "Testcase {} : Failed \n "
- "RP iif is not updated \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n " "RP iif is not updated \n Error: {}".format(
+ tc_name, result
)
logger.info("Expected Behaviour: {}".format(result))
@@ -3459,9 +3504,10 @@ def test_prune_sent_to_LHR_and_FHR_when_PIMnbr_down_p2(request):
result = verify_pim_rp_info(
tgen, topo, dut, GROUP_RANGE_1, "Unknown", rp_address, SOURCE, expected=False
)
- assert result is not True, (
- "Testcase {} : Failed \n "
- "RP iif is not updated \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n " "RP iif is not updated \n Error: {}".format(
+ tc_name, result
)
logger.info("Expected Behaviour: {}".format(result))
@@ -3606,9 +3652,10 @@ def test_prune_sent_to_LHR_and_FHR_when_PIMnbr_down_p2(request):
result = verify_pim_rp_info(
tgen, topo, dut, GROUP_RANGE_1, "Unknown", rp_address, SOURCE, expected=False
)
- assert result is not True, (
- "Testcase {} : Failed \n "
- "RP iif is not updated \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n " "RP iif is not updated \n Error: {}".format(
+ tc_name, result
)
logger.info("Expected Behaviour: {}".format(result))
@@ -3873,7 +3920,12 @@ def test_verify_multicast_traffic_when_LHR_connected_to_RP_p1(request):
"l1": {
"igmp": {
"interfaces": {
- "l1-i1-eth1": {"igmp": {"version": "2", "delete": True,}}
+ "l1-i1-eth1": {
+ "igmp": {
+ "version": "2",
+ "delete": True,
+ }
+ }
}
}
}
@@ -4107,9 +4159,10 @@ def test_verify_multicast_traffic_when_LHR_connected_to_RP_p1(request):
data["oil"],
expected=False,
)
- assert result is not True, (
- "Testcase {} : Failed \n"
- "mroutes are cleared \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n" "mroutes are cleared \n Error: {}".format(
+ tc_name, result
)
logger.info("Expected Behaviour: {}".format(result))
@@ -4181,9 +4234,10 @@ def test_verify_multicast_traffic_when_LHR_connected_to_RP_p1(request):
data["oil"],
expected=False,
)
- assert result is not True, (
- "Testcase {} : Failed \n"
- " mroutes are cleared \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n" " mroutes are cleared \n Error: {}".format(
+ tc_name, result
)
logger.info("Expected Behaviour: {}".format(result))
@@ -4248,9 +4302,10 @@ def test_verify_multicast_traffic_when_LHR_connected_to_RP_p1(request):
result = verify_ip_mroutes(
tgen, dut, src_address, _IGMP_JOIN_RANGE, iif, oil, expected=False
)
- assert result is not True, (
- "Testcase {} : Failed \n"
- " mroutes are cleared \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n" " mroutes are cleared \n Error: {}".format(
+ tc_name, result
)
logger.info("Expected Behaviour: {}".format(result))
@@ -4459,9 +4514,10 @@ def test_verify_multicast_traffic_when_FHR_connected_to_RP_p1(request):
data["oil"],
expected=False,
)
- assert result is not True, (
- "Testcase {} : Failed \n"
- " mroutes are cleared \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n" " mroutes are cleared \n Error: {}".format(
+ tc_name, result
)
logger.info("Expected Behaviour: {}".format(result))
@@ -4524,9 +4580,10 @@ def test_verify_multicast_traffic_when_FHR_connected_to_RP_p1(request):
data["oil"],
expected=False,
)
- assert result is not True, (
- "Testcase {} : Failed \n"
- " mroutes are cleared \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n" " mroutes are cleared \n Error: {}".format(
+ tc_name, result
)
logger.info("Expected Behaviour: {}".format(result))
@@ -4595,9 +4652,10 @@ def test_verify_multicast_traffic_when_FHR_connected_to_RP_p1(request):
data["oil"],
expected=False,
)
- assert result is not True, (
- "Testcase {} : Failed \n"
- " mroutes are cleared \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n" " mroutes are cleared \n Error: {}".format(
+ tc_name, result
)
logger.info("Expected Behaviour: {}".format(result))
diff --git a/tests/topotests/multicast-pim-sm-topo3/test_multicast_pim_sm_topo4.py b/tests/topotests/multicast-pim-sm-topo3/test_multicast_pim_sm_topo4.py
index e8579e2a1e..bb2971842b 100755
--- a/tests/topotests/multicast-pim-sm-topo3/test_multicast_pim_sm_topo4.py
+++ b/tests/topotests/multicast-pim-sm-topo3/test_multicast_pim_sm_topo4.py
@@ -1009,7 +1009,11 @@ def test_PIM_hello_tx_rx_p1(request):
intf_c1_l1 = topo["routers"]["c1"]["links"]["l1"]["interface"]
step("verify before stats on C1")
- state_dict = {"c1": {intf_c1_l1: ["helloTx", "helloRx"],}}
+ state_dict = {
+ "c1": {
+ intf_c1_l1: ["helloTx", "helloRx"],
+ }
+ }
c1_state_before = verify_pim_interface_traffic(tgen, state_dict)
assert isinstance(
@@ -1040,7 +1044,11 @@ def test_PIM_hello_tx_rx_p1(request):
), "Testcase{} : Failed Error: {}" "stats incremented".format(tc_name, result)
step("verify before stats on l1")
- l1_state_dict = {"l1": {intf_l1_c1: ["helloTx", "helloRx"],}}
+ l1_state_dict = {
+ "l1": {
+ intf_l1_c1: ["helloTx", "helloRx"],
+ }
+ }
l1_state_before = verify_pim_interface_traffic(tgen, l1_state_dict)
assert isinstance(
@@ -1077,7 +1085,11 @@ def test_PIM_hello_tx_rx_p1(request):
l1_state_after = {}
step("verify before stats on C1")
- state_dict = {"c1": {intf_c1_l1: ["helloTx", "helloRx"],}}
+ state_dict = {
+ "c1": {
+ intf_c1_l1: ["helloTx", "helloRx"],
+ }
+ }
c1_state_before = verify_pim_interface_traffic(tgen, state_dict)
assert isinstance(
diff --git a/tests/topotests/multicast-pim-static-rp-topo1/test_multicast_pim_static_rp.py b/tests/topotests/multicast-pim-static-rp-topo1/test_multicast_pim_static_rp.py
index 8dfdd50527..f01f57d1eb 100755
--- a/tests/topotests/multicast-pim-static-rp-topo1/test_multicast_pim_static_rp.py
+++ b/tests/topotests/multicast-pim-static-rp-topo1/test_multicast_pim_static_rp.py
@@ -281,6 +281,7 @@ def teardown_module():
#
#####################################################
+
def config_to_send_igmp_join_and_traffic(tgen, tc_name):
"""
API to do pre-configuration to send IGMP join and multicast
@@ -961,7 +962,12 @@ def test_add_RP_after_join_received_p1(request):
input_dict = {
"r1": {
"pim": {
- "rp": [{"rp_addr": "1.0.2.17", "group_addr_range": GROUP_RANGE_ALL,}]
+ "rp": [
+ {
+ "rp_addr": "1.0.2.17",
+ "group_addr_range": GROUP_RANGE_ALL,
+ }
+ ]
}
}
}
@@ -1147,32 +1153,32 @@ def test_reachable_static_RP_after_join_p0(request):
def test_send_join_on_higher_preffered_rp_p1(request):
"""
- TC_11_P1 : Verify PIM join send towards the higher preferred RP
- TC_12_P1 : Verify PIM prune send towards the lower preferred RP
- TC_13_P1 : Verify RPF interface is updated in mroute (kernel) when higher
- preferred overlapping RP configured
- TC_14_P1 : Verify IIF and OIL in "show ip pim state" updated properly when
- higher preferred overlapping RP configured
- TC_15_P1 : Verify upstream interfaces(IIF) and join state are updated when
- higher preferred overlapping RP is configured
- TC_16_P1 : Verify join is send to lower preferred RP, when higher
- preferred RP gets deleted
- TC_17_P1 : Verify prune is send to higher preferred RP when higher
- preferred RP gets deleted
- TC_18_P1 : Verify RPF interface updated in mroute when higher preferred RP
- gets deleted
- TC_19_P1 : Verify IIF and OIL in "show ip pim state" updated when higher
- preferred overlapping RP is deleted
- TC_20_P1 : Verfiy PIM upstream IIF updated when higher preferred
- overlapping RP deleted
-
- Topology used:
- _______r2
- |
- iperf |
- r0-----r1
- |
- |_______r4
+ TC_11_P1 : Verify PIM join send towards the higher preferred RP
+ TC_12_P1 : Verify PIM prune send towards the lower preferred RP
+ TC_13_P1 : Verify RPF interface is updated in mroute (kernel) when higher
+ preferred overlapping RP configured
+ TC_14_P1 : Verify IIF and OIL in "show ip pim state" updated properly when
+ higher preferred overlapping RP configured
+ TC_15_P1 : Verify upstream interfaces(IIF) and join state are updated when
+ higher preferred overlapping RP is configured
+ TC_16_P1 : Verify join is send to lower preferred RP, when higher
+ preferred RP gets deleted
+ TC_17_P1 : Verify prune is send to higher preferred RP when higher
+ preferred RP gets deleted
+ TC_18_P1 : Verify RPF interface updated in mroute when higher preferred RP
+ gets deleted
+ TC_19_P1 : Verify IIF and OIL in "show ip pim state" updated when higher
+ preferred overlapping RP is deleted
+ TC_20_P1 : Verfiy PIM upstream IIF updated when higher preferred
+ overlapping RP deleted
+
+ Topology used:
+ _______r2
+ |
+ iperf |
+ r0-----r1
+ |
+ |_______r4
"""
tgen = get_topogen()
@@ -1241,7 +1247,12 @@ def test_send_join_on_higher_preffered_rp_p1(request):
input_dict = {
"r4": {
"pim": {
- "rp": [{"rp_addr": "1.0.4.17", "group_addr_range": ["225.1.1.1/32"],}]
+ "rp": [
+ {
+ "rp_addr": "1.0.4.17",
+ "group_addr_range": ["225.1.1.1/32"],
+ }
+ ]
}
}
}
@@ -1483,22 +1494,42 @@ def test_RP_configured_as_LHR_1_p1(request):
input_dict = {
"r1": {
"pim": {
- "rp": [{"rp_addr": "1.0.1.17", "group_addr_range": GROUP_RANGE_ALL,}]
+ "rp": [
+ {
+ "rp_addr": "1.0.1.17",
+ "group_addr_range": GROUP_RANGE_ALL,
+ }
+ ]
}
},
"r2": {
"pim": {
- "rp": [{"rp_addr": "1.0.1.17", "group_addr_range": GROUP_RANGE_ALL,}]
+ "rp": [
+ {
+ "rp_addr": "1.0.1.17",
+ "group_addr_range": GROUP_RANGE_ALL,
+ }
+ ]
}
},
"r3": {
"pim": {
- "rp": [{"rp_addr": "1.0.1.17", "group_addr_range": GROUP_RANGE_ALL,}]
+ "rp": [
+ {
+ "rp_addr": "1.0.1.17",
+ "group_addr_range": GROUP_RANGE_ALL,
+ }
+ ]
}
},
"r4": {
"pim": {
- "rp": [{"rp_addr": "1.0.1.17", "group_addr_range": GROUP_RANGE_ALL,}]
+ "rp": [
+ {
+ "rp_addr": "1.0.1.17",
+ "group_addr_range": GROUP_RANGE_ALL,
+ }
+ ]
}
},
}
@@ -1677,22 +1708,42 @@ def test_RP_configured_as_LHR_2_p1(request):
input_dict = {
"r1": {
"pim": {
- "rp": [{"rp_addr": "1.0.1.17", "group_addr_range": GROUP_RANGE_ALL,}]
+ "rp": [
+ {
+ "rp_addr": "1.0.1.17",
+ "group_addr_range": GROUP_RANGE_ALL,
+ }
+ ]
}
},
"r2": {
"pim": {
- "rp": [{"rp_addr": "1.0.1.17", "group_addr_range": GROUP_RANGE_ALL,}]
+ "rp": [
+ {
+ "rp_addr": "1.0.1.17",
+ "group_addr_range": GROUP_RANGE_ALL,
+ }
+ ]
}
},
"r3": {
"pim": {
- "rp": [{"rp_addr": "1.0.1.17", "group_addr_range": GROUP_RANGE_ALL,}]
+ "rp": [
+ {
+ "rp_addr": "1.0.1.17",
+ "group_addr_range": GROUP_RANGE_ALL,
+ }
+ ]
}
},
"r4": {
"pim": {
- "rp": [{"rp_addr": "1.0.1.17", "group_addr_range": GROUP_RANGE_ALL,}]
+ "rp": [
+ {
+ "rp_addr": "1.0.1.17",
+ "group_addr_range": GROUP_RANGE_ALL,
+ }
+ ]
}
},
}
@@ -1863,22 +1914,42 @@ def test_RP_configured_as_FHR_1_p1(request):
input_dict = {
"r1": {
"pim": {
- "rp": [{"rp_addr": "1.0.3.17", "group_addr_range": GROUP_RANGE_ALL,}]
+ "rp": [
+ {
+ "rp_addr": "1.0.3.17",
+ "group_addr_range": GROUP_RANGE_ALL,
+ }
+ ]
}
},
"r2": {
"pim": {
- "rp": [{"rp_addr": "1.0.3.17", "group_addr_range": GROUP_RANGE_ALL,}]
+ "rp": [
+ {
+ "rp_addr": "1.0.3.17",
+ "group_addr_range": GROUP_RANGE_ALL,
+ }
+ ]
}
},
"r3": {
"pim": {
- "rp": [{"rp_addr": "1.0.3.17", "group_addr_range": GROUP_RANGE_ALL,}]
+ "rp": [
+ {
+ "rp_addr": "1.0.3.17",
+ "group_addr_range": GROUP_RANGE_ALL,
+ }
+ ]
}
},
"r4": {
"pim": {
- "rp": [{"rp_addr": "1.0.3.17", "group_addr_range": GROUP_RANGE_ALL,}]
+ "rp": [
+ {
+ "rp_addr": "1.0.3.17",
+ "group_addr_range": GROUP_RANGE_ALL,
+ }
+ ]
}
},
}
@@ -2050,22 +2121,42 @@ def test_RP_configured_as_FHR_2_p2(request):
input_dict = {
"r1": {
"pim": {
- "rp": [{"rp_addr": "1.0.3.17", "group_addr_range": GROUP_RANGE_ALL,}]
+ "rp": [
+ {
+ "rp_addr": "1.0.3.17",
+ "group_addr_range": GROUP_RANGE_ALL,
+ }
+ ]
}
},
"r2": {
"pim": {
- "rp": [{"rp_addr": "1.0.3.17", "group_addr_range": GROUP_RANGE_ALL,}]
+ "rp": [
+ {
+ "rp_addr": "1.0.3.17",
+ "group_addr_range": GROUP_RANGE_ALL,
+ }
+ ]
}
},
"r3": {
"pim": {
- "rp": [{"rp_addr": "1.0.3.17", "group_addr_range": GROUP_RANGE_ALL,}]
+ "rp": [
+ {
+ "rp_addr": "1.0.3.17",
+ "group_addr_range": GROUP_RANGE_ALL,
+ }
+ ]
}
},
"r4": {
"pim": {
- "rp": [{"rp_addr": "1.0.3.17", "group_addr_range": GROUP_RANGE_ALL,}]
+ "rp": [
+ {
+ "rp_addr": "1.0.3.17",
+ "group_addr_range": GROUP_RANGE_ALL,
+ }
+ ]
}
},
}
@@ -2733,7 +2824,12 @@ def test_multiple_groups_same_RP_address_p2(request):
input_dict = {
"r1": {
"pim": {
- "rp": [{"rp_addr": "1.0.2.17", "group_addr_range": GROUP_RANGE_ALL,}]
+ "rp": [
+ {
+ "rp_addr": "1.0.2.17",
+ "group_addr_range": GROUP_RANGE_ALL,
+ }
+ ]
}
}
}
@@ -2893,12 +2989,22 @@ def test_multiple_groups_different_RP_address_p2(request):
input_dict = {
"r2": {
"pim": {
- "rp": [{"rp_addr": "1.0.2.17", "group_addr_range": GROUP_RANGE_LIST_1,}]
+ "rp": [
+ {
+ "rp_addr": "1.0.2.17",
+ "group_addr_range": GROUP_RANGE_LIST_1,
+ }
+ ]
}
},
"r4": {
"pim": {
- "rp": [{"rp_addr": "1.0.4.17", "group_addr_range": GROUP_RANGE_LIST_2,}]
+ "rp": [
+ {
+ "rp_addr": "1.0.4.17",
+ "group_addr_range": GROUP_RANGE_LIST_2,
+ }
+ ]
}
},
}
@@ -3148,12 +3254,22 @@ def test_multiple_groups_different_RP_address_p2(request):
input_dict = {
"r2": {
"pim": {
- "rp": [{"rp_addr": "1.0.2.17", "group_addr_range": GROUP_RANGE_LIST_1,}]
+ "rp": [
+ {
+ "rp_addr": "1.0.2.17",
+ "group_addr_range": GROUP_RANGE_LIST_1,
+ }
+ ]
}
},
"r4": {
"pim": {
- "rp": [{"rp_addr": "1.0.4.17", "group_addr_range": GROUP_RANGE_LIST_2,}]
+ "rp": [
+ {
+ "rp_addr": "1.0.4.17",
+ "group_addr_range": GROUP_RANGE_LIST_2,
+ }
+ ]
}
},
}
diff --git a/tests/topotests/ospf-sr-topo1/test_ospf_sr_topo1.py b/tests/topotests/ospf-sr-topo1/test_ospf_sr_topo1.py
index 5c901c067a..8a704790d4 100644
--- a/tests/topotests/ospf-sr-topo1/test_ospf_sr_topo1.py
+++ b/tests/topotests/ospf-sr-topo1/test_ospf_sr_topo1.py
@@ -50,6 +50,7 @@ import pytest
pytestmark = [pytest.mark.ospfd]
+
class OspfSrTopo(Topo):
"Test topology builder"
diff --git a/tests/topotests/ospf-tilfa-topo1/test_ospf_tilfa_topo1.py b/tests/topotests/ospf-tilfa-topo1/test_ospf_tilfa_topo1.py
index eb3ad5d995..489690471c 100644
--- a/tests/topotests/ospf-tilfa-topo1/test_ospf_tilfa_topo1.py
+++ b/tests/topotests/ospf-tilfa-topo1/test_ospf_tilfa_topo1.py
@@ -166,6 +166,7 @@ def test_ospf_initial_convergence_step1():
"step1/show_ip_route_initial.ref",
)
+
def test_ospf_link_protection_step2():
logger.info("Test (step 2): check OSPF link protection")
tgen = get_topogen()
@@ -175,9 +176,7 @@ def test_ospf_link_protection_step2():
pytest.skip(tgen.errors)
# enable TI-LFA link protection on all interfaces
- tgen.net["rt1"].cmd(
- 'vtysh -c "conf t" -c "router ospf" -c "fast-reroute ti-lfa"'
- )
+ tgen.net["rt1"].cmd('vtysh -c "conf t" -c "router ospf" -c "fast-reroute ti-lfa"')
router_compare_json_output(
"rt1",
@@ -197,6 +196,7 @@ def test_ospf_link_protection_step2():
"step2/show_ip_route_initial.ref",
)
+
def test_ospf_node_protection_step3():
logger.info("Test (step 3): check OSPF node protection")
tgen = get_topogen()
@@ -228,6 +228,7 @@ def test_ospf_node_protection_step3():
"step3/show_ip_route_initial.ref",
)
+
# Memory leak test template
def test_memory_leak():
"Run the memory leak test and report results."
@@ -237,6 +238,7 @@ def test_memory_leak():
tgen.report_memory_leaks()
+
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
diff --git a/tests/topotests/ospf-topo1-vrf/test_ospf_topo1_vrf.py b/tests/topotests/ospf-topo1-vrf/test_ospf_topo1_vrf.py
index 92dac0f39c..e2cb7bff03 100644
--- a/tests/topotests/ospf-topo1-vrf/test_ospf_topo1_vrf.py
+++ b/tests/topotests/ospf-topo1-vrf/test_ospf_topo1_vrf.py
@@ -47,6 +47,7 @@ from mininet.topo import Topo
pytestmark = [pytest.mark.ospfd]
+
class OSPFTopo(Topo):
"Test topology builder"
diff --git a/tests/topotests/ospf-topo1/test_ospf_topo1.py b/tests/topotests/ospf-topo1/test_ospf_topo1.py
index 7197c05812..5bb6c2c818 100644
--- a/tests/topotests/ospf-topo1/test_ospf_topo1.py
+++ b/tests/topotests/ospf-topo1/test_ospf_topo1.py
@@ -47,6 +47,7 @@ from mininet.topo import Topo
pytestmark = [pytest.mark.ospfd]
+
class OSPFTopo(Topo):
"Test topology builder"
diff --git a/tests/topotests/ospf6-topo1/test_ospf6_topo1.py b/tests/topotests/ospf6-topo1/test_ospf6_topo1.py
index c3efb6ff22..6ae886b76e 100644
--- a/tests/topotests/ospf6-topo1/test_ospf6_topo1.py
+++ b/tests/topotests/ospf6-topo1/test_ospf6_topo1.py
@@ -384,15 +384,14 @@ def test_linux_ipv6_kernel_routingTable():
% (i, diff)
)
else:
- logger.error(
- "r{} failed - no nhid ref file: {}".format(i, refTableFile)
- )
+ logger.error("r{} failed - no nhid ref file: {}".format(i, refTableFile))
assert False, (
"Linux Kernel IPv6 Routing Table verification failed for router r%s\n"
% (i)
)
+
def test_shutdown_check_stderr():
tgen = get_topogen()
diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_chaos.py b/tests/topotests/ospf_basic_functionality/test_ospf_chaos.py
index ca7cb736f9..0dedc8f0df 100644
--- a/tests/topotests/ospf_basic_functionality/test_ospf_chaos.py
+++ b/tests/topotests/ospf_basic_functionality/test_ospf_chaos.py
@@ -49,15 +49,14 @@ from lib.common_config import (
shutdown_bringup_interface,
topo_daemons,
verify_rib,
- stop_router, start_router,
+ stop_router,
+ start_router,
create_static_routes,
start_router_daemons,
- kill_router_daemons
+ kill_router_daemons,
)
-from lib.ospf import (
- verify_ospf_neighbor, verify_ospf_rib,
- create_router_ospf)
+from lib.ospf import verify_ospf_neighbor, verify_ospf_rib, create_router_ospf
from lib.topolog import logger
from lib.topojson import build_topo_from_json, build_config_from_json
@@ -69,8 +68,13 @@ pytestmark = [pytest.mark.ospfd, pytest.mark.staticd]
topo = None
NETWORK = {
- "ipv4": ["11.0.20.1/32", "11.0.20.2/32", "11.0.20.3/32", "11.0.20.4/32",
- "11.0.20.5/32"]
+ "ipv4": [
+ "11.0.20.1/32",
+ "11.0.20.2/32",
+ "11.0.20.3/32",
+ "11.0.20.4/32",
+ "11.0.20.5/32",
+ ]
}
"""
Topology:
@@ -102,6 +106,7 @@ try:
except IOError:
assert False, "Could not read file {}".format(jsonFile)
+
class CreateTopo(Topo):
"""
Test topology builder.
@@ -190,78 +195,70 @@ def test_ospf_chaos_tc31_p1(request):
step(
"Create static routes(10.0.20.1/32) in R1 and redistribute "
- "to OSPF using route map.")
+ "to OSPF using route map."
+ )
# Create Static routes
input_dict = {
"r0": {
"static_routes": [
{
- "network": NETWORK['ipv4'][0],
+ "network": NETWORK["ipv4"][0],
"no_of_ip": 5,
- "next_hop": 'Null0',
+ "next_hop": "Null0",
}
]
}
}
result = create_static_routes(tgen, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- ospf_red_r0 = {
- "r0": {
- "ospf": {
- "redistribute": [{
- "redist_type": "static"
- }]
- }
- }
- }
+ ospf_red_r0 = {"r0": {"ospf": {"redistribute": [{"redist_type": "static"}]}}}
result = create_router_ospf(tgen, topo, ospf_red_r0)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Verify OSPF neighbors after base config is done.")
# Api call verify whether OSPF is converged
ospf_covergence = verify_ospf_neighbor(tgen, topo)
- assert ospf_covergence is True, ("setup_module :Failed \n Error:"
- " {}".format(ospf_covergence))
+ assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
step("Verify that route is advertised to R1.")
- dut = 'r1'
- protocol = 'ospf'
- nh = topo['routers']['r0']['links']['r1']['ipv4'].split('/')[0]
+ dut = "r1"
+ protocol = "ospf"
+ nh = topo["routers"]["r0"]["links"]["r1"]["ipv4"].split("/")[0]
result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- result = verify_rib(
- tgen, "ipv4", dut, input_dict, protocol=protocol, next_hop=nh)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Kill OSPFd daemon on R0.")
kill_router_daemons(tgen, "r0", ["ospfd"])
step("Verify OSPF neighbors are down after killing ospfd in R0")
- dut = 'r0'
+ dut = "r0"
# Api call verify whether OSPF is converged
- ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut,
- expected=False)
- assert ospf_covergence is not True, ("setup_module :Failed \n Error:"
- " {}".format(ospf_covergence))
+ ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut, expected=False)
+ assert ospf_covergence is not True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
step("Verify that route advertised to R1 are deleted from RIB and FIB.")
- dut = 'r1'
- protocol = 'ospf'
+ dut = "r1"
+ protocol = "ospf"
result = verify_ospf_rib(tgen, dut, input_dict, expected=False)
assert result is not True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
- result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol,
- expected=False)
+ result = verify_rib(
+ tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False
+ )
assert result is not True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
step("Bring up OSPFd daemon on R0.")
start_router_daemons(tgen, "r0", ["ospfd"])
@@ -269,33 +266,32 @@ def test_ospf_chaos_tc31_p1(request):
step("Verify OSPF neighbors are up after bringing back ospfd in R0")
# Api call verify whether OSPF is converged
ospf_covergence = verify_ospf_neighbor(tgen, topo)
- assert ospf_covergence is True, ("setup_module :Failed \n Error:"
- " {}".format(ospf_covergence))
+ assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
step(
"All the neighbours are up and routes are installed before the"
- " restart. Verify OSPF route table and ip route table.")
- dut = 'r1'
- protocol = 'ospf'
+ " restart. Verify OSPF route table and ip route table."
+ )
+ dut = "r1"
+ protocol = "ospf"
result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol,
- next_hop=nh)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Kill OSPFd daemon on R1.")
kill_router_daemons(tgen, "r1", ["ospfd"])
step("Verify OSPF neighbors are down after killing ospfd in R1")
- dut = 'r1'
+ dut = "r1"
# Api call verify whether OSPF is converged
- ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut,
- expected=False)
- assert ospf_covergence is not True, ("setup_module :Failed \n Error:"
- " {}".format(ospf_covergence))
+ ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut, expected=False)
+ assert ospf_covergence is not True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
step("Bring up OSPFd daemon on R1.")
start_router_daemons(tgen, "r1", ["ospfd"])
@@ -303,23 +299,22 @@ def test_ospf_chaos_tc31_p1(request):
step("Verify OSPF neighbors are up after bringing back ospfd in R1")
# Api call verify whether OSPF is converged
ospf_covergence = verify_ospf_neighbor(tgen, topo)
- assert ospf_covergence is True, ("setup_module :Failed \n Error:"
- " {}".format(ospf_covergence))
+ assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
step(
"All the neighbours are up and routes are installed before the"
- " restart. Verify OSPF route table and ip route table.")
+ " restart. Verify OSPF route table and ip route table."
+ )
- dut = 'r1'
- protocol = 'ospf'
+ dut = "r1"
+ protocol = "ospf"
result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol,
- next_hop=nh)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
write_test_footer(tc_name)
@@ -335,104 +330,91 @@ def test_ospf_chaos_tc32_p1(request):
step(
"Create static routes(10.0.20.1/32) in R1 and redistribute "
- "to OSPF using route map.")
+ "to OSPF using route map."
+ )
# Create Static routes
input_dict = {
"r0": {
"static_routes": [
{
- "network": NETWORK['ipv4'][0],
+ "network": NETWORK["ipv4"][0],
"no_of_ip": 5,
- "next_hop": 'Null0',
+ "next_hop": "Null0",
}
]
}
}
result = create_static_routes(tgen, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- ospf_red_r0 = {
- "r0": {
- "ospf": {
- "redistribute": [{
- "redist_type": "static"
- }]
- }
- }
- }
+ ospf_red_r0 = {"r0": {"ospf": {"redistribute": [{"redist_type": "static"}]}}}
result = create_router_ospf(tgen, topo, ospf_red_r0)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Verify OSPF neighbors after base config is done.")
# Api call verify whether OSPF is converged
ospf_covergence = verify_ospf_neighbor(tgen, topo)
- assert ospf_covergence is True, ("setup_module :Failed \n Error:"
- " {}".format(ospf_covergence))
+ assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
step("Verify that route is advertised to R1.")
- dut = 'r1'
- protocol = 'ospf'
+ dut = "r1"
+ protocol = "ospf"
- nh = topo['routers']['r0']['links']['r1']['ipv4'].split('/')[0]
+ nh = topo["routers"]["r0"]["links"]["r1"]["ipv4"].split("/")[0]
result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol,
- next_hop=nh)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Restart frr on R0")
- stop_router(tgen, 'r0')
- start_router(tgen, 'r0')
+ stop_router(tgen, "r0")
+ start_router(tgen, "r0")
step("Verify OSPF neighbors are up after restarting R0")
# Api call verify whether OSPF is converged
ospf_covergence = verify_ospf_neighbor(tgen, topo)
- assert ospf_covergence is True, ("setup_module :Failed \n Error:"
- " {}".format(ospf_covergence))
+ assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
step(
"All the neighbours are up and routes are installed before the"
- " restart. Verify OSPF route table and ip route table.")
- dut = 'r1'
- protocol = 'ospf'
+ " restart. Verify OSPF route table and ip route table."
+ )
+ dut = "r1"
+ protocol = "ospf"
result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol,
- next_hop=nh)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Restart frr on R1")
- stop_router(tgen, 'r1')
- start_router(tgen, 'r1')
+ stop_router(tgen, "r1")
+ start_router(tgen, "r1")
step("Verify OSPF neighbors are up after restarting R1")
# Api call verify whether OSPF is converged
ospf_covergence = verify_ospf_neighbor(tgen, topo)
- assert ospf_covergence is True, ("setup_module :Failed \n Error:"
- " {}".format(ospf_covergence))
+ assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
step(
"All the neighbours are up and routes are installed before the"
- " restart. Verify OSPF route table and ip route table.")
- dut = 'r1'
- protocol = 'ospf'
+ " restart. Verify OSPF route table and ip route table."
+ )
+ dut = "r1"
+ protocol = "ospf"
result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol,
- next_hop=nh)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
write_test_footer(tc_name)
@@ -453,70 +435,62 @@ def test_ospf_chaos_tc34_p1(request):
step(
"Create static routes(10.0.20.1/32) in R1 and redistribute "
- "to OSPF using route map.")
+ "to OSPF using route map."
+ )
# Create Static routes
input_dict = {
"r0": {
"static_routes": [
{
- "network": NETWORK['ipv4'][0],
+ "network": NETWORK["ipv4"][0],
"no_of_ip": 5,
- "next_hop": 'Null0',
+ "next_hop": "Null0",
}
]
}
}
result = create_static_routes(tgen, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- ospf_red_r0 = {
- "r0": {
- "ospf": {
- "redistribute": [{
- "redist_type": "static"
- }]
- }
- }
- }
+ ospf_red_r0 = {"r0": {"ospf": {"redistribute": [{"redist_type": "static"}]}}}
result = create_router_ospf(tgen, topo, ospf_red_r0)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Verify OSPF neighbors after base config is done.")
# Api call verify whether OSPF is converged
ospf_covergence = verify_ospf_neighbor(tgen, topo)
- assert ospf_covergence is True, ("setup_module :Failed \n Error:"
- " {}".format(ospf_covergence))
+ assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
step("Verify that route is advertised to R1.")
- dut = 'r1'
- protocol = 'ospf'
- nh = topo['routers']['r0']['links']['r1']['ipv4'].split('/')[0]
+ dut = "r1"
+ protocol = "ospf"
+ nh = topo["routers"]["r0"]["links"]["r1"]["ipv4"].split("/")[0]
result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol,
- next_hop=nh)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Kill staticd daemon on R0.")
kill_router_daemons(tgen, "r0", ["staticd"])
step("Verify that route advertised to R1 are deleted from RIB and FIB.")
- dut = 'r1'
- protocol = 'ospf'
+ dut = "r1"
+ protocol = "ospf"
result = verify_ospf_rib(tgen, dut, input_dict, expected=False)
assert result is not True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
- result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol,
- expected=False)
+ result = verify_rib(
+ tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False
+ )
assert result is not True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
step("Bring up staticd daemon on R0.")
start_router_daemons(tgen, "r0", ["staticd"])
@@ -524,22 +498,21 @@ def test_ospf_chaos_tc34_p1(request):
step("Verify OSPF neighbors are up after bringing back ospfd in R0")
# Api call verify whether OSPF is converged
ospf_covergence = verify_ospf_neighbor(tgen, topo)
- assert ospf_covergence is True, ("setup_module :Failed \n Error:"
- " {}".format(ospf_covergence))
+ assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
step(
"All the neighbours are up and routes are installed before the"
- " restart. Verify OSPF route table and ip route table.")
- dut = 'r1'
- protocol = 'ospf'
+ " restart. Verify OSPF route table and ip route table."
+ )
+ dut = "r1"
+ protocol = "ospf"
result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol,
- next_hop=nh)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Kill staticd daemon on R1.")
kill_router_daemons(tgen, "r1", ["staticd"])
@@ -550,23 +523,22 @@ def test_ospf_chaos_tc34_p1(request):
step("Verify OSPF neighbors are up after bringing back ospfd in R1")
# Api call verify whether OSPF is converged
ospf_covergence = verify_ospf_neighbor(tgen, topo)
- assert ospf_covergence is True, ("setup_module :Failed \n Error:"
- " {}".format(ospf_covergence))
+ assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
step(
"All the neighbours are up and routes are installed before the"
- " restart. Verify OSPF route table and ip route table.")
+ " restart. Verify OSPF route table and ip route table."
+ )
- dut = 'r1'
- protocol = 'ospf'
+ dut = "r1"
+ protocol = "ospf"
result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol,
- next_hop=nh)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
write_test_footer(tc_name)
diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_p2mp.py b/tests/topotests/ospf_basic_functionality/test_ospf_p2mp.py
index c90275ecb0..be18ba5a78 100644
--- a/tests/topotests/ospf_basic_functionality/test_ospf_p2mp.py
+++ b/tests/topotests/ospf_basic_functionality/test_ospf_p2mp.py
@@ -146,7 +146,6 @@ def setup_module(mod):
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
-
logger.info("Running setup_module() done")
@@ -397,10 +396,7 @@ def test_ospf_p2mp_tc1_p0(request):
"links": {
"r3": {
"interface": topo["routers"]["r0"]["links"]["r3"]["interface"],
- "ospf": {
- "area": "0.0.0.0",
- "networkType":"POINTOMULTIPOINT"
- },
+ "ospf": {"area": "0.0.0.0", "networkType": "POINTOMULTIPOINT"},
}
}
}
diff --git a/tests/topotests/pbr-topo1/test_pbr_topo1.py b/tests/topotests/pbr-topo1/test_pbr_topo1.py
index 4b6de51c86..1a024063b8 100644
--- a/tests/topotests/pbr-topo1/test_pbr_topo1.py
+++ b/tests/topotests/pbr-topo1/test_pbr_topo1.py
@@ -82,6 +82,7 @@ class NetworkTopo(Topo):
##
#####################################################
+
def setup_module(module):
"Setup topology"
tgen = Topogen(NetworkTopo, module.__name__)
diff --git a/tests/topotests/pim-basic/test_pim.py b/tests/topotests/pim-basic/test_pim.py
index 918d3847ce..4debbeb851 100644
--- a/tests/topotests/pim-basic/test_pim.py
+++ b/tests/topotests/pim-basic/test_pim.py
@@ -45,6 +45,7 @@ from mininet.topo import Topo
pytestmark = [pytest.mark.pimd]
+
class PIMTopo(Topo):
def build(self, *_args, **_opts):
"Build function"
diff --git a/tests/topotests/pytest.ini b/tests/topotests/pytest.ini
index 0c45a09445..562e754f21 100644
--- a/tests/topotests/pytest.ini
+++ b/tests/topotests/pytest.ini
@@ -24,6 +24,7 @@ markers =
sharpd: Tests that run against SHARPD
staticd: Tests that run against STATICD
vrrpd: Tests that run against VRRPD
+ snmp: Tests that run against snmp changes
[topogen]
# Default configuration values
diff --git a/tests/topotests/simple-snmp-test/test_simple_snmp.py b/tests/topotests/simple-snmp-test/test_simple_snmp.py
index 88ff01bf0a..5647e2b663 100755
--- a/tests/topotests/simple-snmp-test/test_simple_snmp.py
+++ b/tests/topotests/simple-snmp-test/test_simple_snmp.py
@@ -46,6 +46,8 @@ from lib.snmptest import SnmpTester
# Required to instantiate the topology builder class.
from mininet.topo import Topo
+pytestmark = [pytest.mark.bgpd, pytest.mark.isisd, pytest.mark.snmp]
+
class TemplateTopo(Topo):
"Test topology builder"
diff --git a/tests/topotests/static_routing_with_ebgp/test_static_routes_topo1_ebgp.py b/tests/topotests/static_routing_with_ebgp/test_static_routes_topo1_ebgp.py
index 712a40c738..a4cc8e8e7a 100644
--- a/tests/topotests/static_routing_with_ebgp/test_static_routes_topo1_ebgp.py
+++ b/tests/topotests/static_routing_with_ebgp/test_static_routes_topo1_ebgp.py
@@ -35,6 +35,7 @@ import time
import os
import pytest
import platform
+
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CWD, "../"))
@@ -44,6 +45,7 @@ sys.path.append(os.path.join(CWD, "../lib/"))
from mininet.topo import Topo
from lib.topogen import Topogen, get_topogen
from lib.topotest import version_cmp
+
# Import topoJson from lib, to create topology and initial configuration
from lib.common_config import (
start_topology,
@@ -121,9 +123,11 @@ def setup_module(mod):
# Creating configuration from JSON
build_config_from_json(tgen, topo)
- if version_cmp(platform.release(), '4.19') < 0:
- error_msg = ('These tests will not run. (have kernel "{}", '
- 'requires kernel >= 4.19)'.format(platform.release()))
+ if version_cmp(platform.release(), "4.19") < 0:
+ error_msg = (
+ 'These tests will not run. (have kernel "{}", '
+ "requires kernel >= 4.19)".format(platform.release())
+ )
pytest.skip(error_msg)
# Checking BGP convergence
diff --git a/tests/topotests/static_routing_with_ebgp/test_static_routes_topo2_ebgp.py b/tests/topotests/static_routing_with_ebgp/test_static_routes_topo2_ebgp.py
index c009929a48..6649915dec 100644
--- a/tests/topotests/static_routing_with_ebgp/test_static_routes_topo2_ebgp.py
+++ b/tests/topotests/static_routing_with_ebgp/test_static_routes_topo2_ebgp.py
@@ -163,9 +163,11 @@ def setup_module(mod):
# Creating configuration from JSON
build_config_from_json(tgen, topo)
- if version_cmp(platform.release(), '4.19') < 0:
- error_msg = ('These tests will not run. (have kernel "{}", '
- 'requires kernel >= 4.19)'.format(platform.release()))
+ if version_cmp(platform.release(), "4.19") < 0:
+ error_msg = (
+ 'These tests will not run. (have kernel "{}", '
+ "requires kernel >= 4.19)".format(platform.release())
+ )
pytest.skip(error_msg)
# Checking BGP convergence
@@ -854,12 +856,12 @@ def test_static_route_8nh_diff_AD_bgp_ecmp_p1_tc6_ebgp(request):
for addr_type in ADDR_TYPES:
input_dict_4 = {"r2": {"static_routes": [{"network": PREFIX1[addr_type]}]}}
result = verify_rib(tgen, addr_type, dut, input_dict_4, protocol=protocol)
- assert result is True, (
- "Testcase {} : Failed \n"
- "Error: Routes are still present in RIB".format(tc_name)
+ assert (
+ result is True
+ ), "Testcase {} : Failed \n" "Error: Routes are still present in RIB".format(
+ tc_name
)
-
write_test_footer(tc_name)
@@ -1129,9 +1131,10 @@ def test_static_route_8nh_diff_AD_ebgp_ecmp_p1_tc8_ebgp(request):
for addr_type in ADDR_TYPES:
input_dict_4 = {"r2": {"static_routes": [{"network": PREFIX1[addr_type]}]}}
result = verify_rib(tgen, addr_type, dut, input_dict_4, protocol=protocol)
- assert result is True, (
- "Testcase {} : Failed \n"
- "Error: Routes are still present in RIB".format(tc_name)
+ assert (
+ result is True
+ ), "Testcase {} : Failed \n" "Error: Routes are still present in RIB".format(
+ tc_name
)
write_test_footer(tc_name)
@@ -1339,7 +1342,15 @@ def test_static_route_8nh_diff_AD_bgp_ecmp_p1_tc10_ebgp(request):
" value and all the nexthop populated in RIB and FIB again"
)
for addr_type in ADDR_TYPES:
- input_dict_4 = {"r2": {"static_routes": [{"network": PREFIX1[addr_type],}]}}
+ input_dict_4 = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": PREFIX1[addr_type],
+ }
+ ]
+ }
+ }
nh = NEXT_HOP_IP["nh1"][addr_type]
result = verify_rib(
tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol, fib=True
@@ -1468,9 +1479,10 @@ def test_static_route_8nh_diff_AD_bgp_ecmp_p1_tc10_ebgp(request):
protocol=protocol,
fib=True,
)
- assert result is True, (
- "Testcase {} : Failed \nError: Route "
- " is missing in RIB".format(tc_name)
+ assert (
+ result is True
+ ), "Testcase {} : Failed \nError: Route " " is missing in RIB".format(
+ tc_name
)
write_test_footer(tc_name)
diff --git a/tests/topotests/static_routing_with_ebgp/test_static_routes_topo4_ebgp.py b/tests/topotests/static_routing_with_ebgp/test_static_routes_topo4_ebgp.py
index 3d41d89443..9b9749340e 100644
--- a/tests/topotests/static_routing_with_ebgp/test_static_routes_topo4_ebgp.py
+++ b/tests/topotests/static_routing_with_ebgp/test_static_routes_topo4_ebgp.py
@@ -88,6 +88,7 @@ NEXT_HOP_IP = {}
pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
class CreateTopo(Topo):
"""
Test CreateTopo - topology 1.
@@ -126,9 +127,11 @@ def setup_module(mod):
# Creating configuration from JSON
build_config_from_json(tgen, topo)
- if version_cmp(platform.release(), '4.19') < 0:
- error_msg = ('These tests will not run. (have kernel "{}", '
- 'requires kernel >= 4.19)'.format(platform.release()))
+ if version_cmp(platform.release(), "4.19") < 0:
+ error_msg = (
+ 'These tests will not run. (have kernel "{}", '
+ "requires kernel >= 4.19)".format(platform.release())
+ )
pytest.skip(error_msg)
# Checking BGP convergence
diff --git a/tests/topotests/static_routing_with_ibgp/test_static_routes_topo1_ibgp.py b/tests/topotests/static_routing_with_ibgp/test_static_routes_topo1_ibgp.py
index ca67ff6645..8c2fdfca13 100644
--- a/tests/topotests/static_routing_with_ibgp/test_static_routes_topo1_ibgp.py
+++ b/tests/topotests/static_routing_with_ibgp/test_static_routes_topo1_ibgp.py
@@ -125,9 +125,11 @@ def setup_module(mod):
# Creating configuration from JSON
build_config_from_json(tgen, topo)
- if version_cmp(platform.release(), '4.19') < 0:
- error_msg = ('These tests will not run. (have kernel "{}", '
- 'requires kernel >= 4.19)'.format(platform.release()))
+ if version_cmp(platform.release(), "4.19") < 0:
+ error_msg = (
+ 'These tests will not run. (have kernel "{}", '
+ "requires kernel >= 4.19)".format(platform.release())
+ )
pytest.skip(error_msg)
# Checking BGP convergence
diff --git a/tests/topotests/static_routing_with_ibgp/test_static_routes_topo2_ibgp.py b/tests/topotests/static_routing_with_ibgp/test_static_routes_topo2_ibgp.py
index 2dc0a60d51..644ddc02d4 100644
--- a/tests/topotests/static_routing_with_ibgp/test_static_routes_topo2_ibgp.py
+++ b/tests/topotests/static_routing_with_ibgp/test_static_routes_topo2_ibgp.py
@@ -165,9 +165,11 @@ def setup_module(mod):
# Creating configuration from JSON
build_config_from_json(tgen, topo)
- if version_cmp(platform.release(), '4.19') < 0:
- error_msg = ('These tests will not run. (have kernel "{}", '
- 'requires kernel >= 4.19)'.format(platform.release()))
+ if version_cmp(platform.release(), "4.19") < 0:
+ error_msg = (
+ 'These tests will not run. (have kernel "{}", '
+ "requires kernel >= 4.19)".format(platform.release())
+ )
pytest.skip(error_msg)
# Checking BGP convergence
@@ -884,9 +886,10 @@ def test_static_route_8nh_diff_AD_bgp_ecmp_p1_tc6_ibgp(request):
for addr_type in ADDR_TYPES:
input_dict_4 = {"r2": {"static_routes": [{"network": PREFIX1[addr_type]}]}}
result = verify_rib(tgen, addr_type, dut, input_dict_4, protocol=protocol)
- assert result is True, (
- "Testcase {} : Failed \n"
- "Error: Routes are still present in RIB".format(tc_name)
+ assert (
+ result is True
+ ), "Testcase {} : Failed \n" "Error: Routes are still present in RIB".format(
+ tc_name
)
step("BGP neighbor remove and add")
@@ -907,9 +910,10 @@ def test_static_route_8nh_diff_AD_bgp_ecmp_p1_tc6_ibgp(request):
for addr_type in ADDR_TYPES:
input_dict_4 = {"r2": {"static_routes": [{"network": PREFIX1[addr_type]}]}}
result = verify_rib(tgen, addr_type, dut, input_dict_4, protocol=protocol)
- assert result is True, (
- "Testcase {} : Failed \n"
- "Error: Routes are still present in RIB".format(tc_name)
+ assert (
+ result is True
+ ), "Testcase {} : Failed \n" "Error: Routes are still present in RIB".format(
+ tc_name
)
dut = "r3"
@@ -917,9 +921,10 @@ def test_static_route_8nh_diff_AD_bgp_ecmp_p1_tc6_ibgp(request):
for addr_type in ADDR_TYPES:
input_dict_4 = {"r2": {"static_routes": [{"network": PREFIX1[addr_type]}]}}
result = verify_rib(tgen, addr_type, dut, input_dict_4, protocol=protocol)
- assert result is True, (
- "Testcase {} : Failed \n"
- "Error: Routes are still present in RIB".format(tc_name)
+ assert (
+ result is True
+ ), "Testcase {} : Failed \n" "Error: Routes are still present in RIB".format(
+ tc_name
)
step("Remove the redistribute static knob")
@@ -1274,9 +1279,10 @@ def test_static_route_8nh_diff_AD_ibgp_ecmp_p1_tc7_ibgp(request):
for addr_type in ADDR_TYPES:
input_dict_4 = {"r2": {"static_routes": [{"network": PREFIX1[addr_type]}]}}
result = verify_rib(tgen, addr_type, dut, input_dict_4, protocol=protocol)
- assert result is True, (
- "Testcase {} : Failed \n"
- "Error: Routes are still present in RIB".format(tc_name)
+ assert (
+ result is True
+ ), "Testcase {} : Failed \n" "Error: Routes are still present in RIB".format(
+ tc_name
)
step("BGP neighbor remove and add")
@@ -1297,9 +1303,10 @@ def test_static_route_8nh_diff_AD_ibgp_ecmp_p1_tc7_ibgp(request):
for addr_type in ADDR_TYPES:
input_dict_4 = {"r2": {"static_routes": [{"network": PREFIX1[addr_type]}]}}
result = verify_rib(tgen, addr_type, dut, input_dict_4, protocol=protocol)
- assert result is True, (
- "Testcase {} : Failed \n"
- "Error: Routes are still present in RIB".format(tc_name)
+ assert (
+ result is True
+ ), "Testcase {} : Failed \n" "Error: Routes are still present in RIB".format(
+ tc_name
)
dut = "r3"
@@ -1307,9 +1314,10 @@ def test_static_route_8nh_diff_AD_ibgp_ecmp_p1_tc7_ibgp(request):
for addr_type in ADDR_TYPES:
input_dict_4 = {"r2": {"static_routes": [{"network": PREFIX1[addr_type]}]}}
result = verify_rib(tgen, addr_type, dut, input_dict_4, protocol=protocol)
- assert result is True, (
- "Testcase {} : Failed \n"
- "Error: Routes are still present in RIB".format(tc_name)
+ assert (
+ result is True
+ ), "Testcase {} : Failed \n" "Error: Routes are still present in RIB".format(
+ tc_name
)
step("Remove the redistribute static knob")
@@ -1557,7 +1565,15 @@ def test_static_route_8nh_diff_AD_bgp_ecmp_p1_tc10_ibgp(request):
" value and all the nexthop populated in RIB and FIB again"
)
for addr_type in ADDR_TYPES:
- input_dict_4 = {"r2": {"static_routes": [{"network": PREFIX1[addr_type],}]}}
+ input_dict_4 = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": PREFIX1[addr_type],
+ }
+ ]
+ }
+ }
nh = NEXT_HOP_IP["nh1"][addr_type]
result = verify_rib(
tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol, fib=True
@@ -1686,9 +1702,10 @@ def test_static_route_8nh_diff_AD_bgp_ecmp_p1_tc10_ibgp(request):
protocol=protocol,
fib=True,
)
- assert result is True, (
- "Testcase {} : Failed \nError: Route "
- " is missing in RIB".format(tc_name)
+ assert (
+ result is True
+ ), "Testcase {} : Failed \nError: Route " " is missing in RIB".format(
+ tc_name
)
step("Remove the redistribute static knob")
diff --git a/tests/topotests/static_routing_with_ibgp/test_static_routes_topo3_ibgp.py b/tests/topotests/static_routing_with_ibgp/test_static_routes_topo3_ibgp.py
index 3eb431d64f..8f9d88a442 100644
--- a/tests/topotests/static_routing_with_ibgp/test_static_routes_topo3_ibgp.py
+++ b/tests/topotests/static_routing_with_ibgp/test_static_routes_topo3_ibgp.py
@@ -149,9 +149,11 @@ def setup_module(mod):
# Creating configuration from JSON
build_config_from_json(tgen, topo)
- if version_cmp(platform.release(), '4.19') < 0:
- error_msg = ('These tests will not run. (have kernel "{}", '
- 'requires kernel >= 4.19)'.format(platform.release()))
+ if version_cmp(platform.release(), "4.19") < 0:
+ error_msg = (
+ 'These tests will not run. (have kernel "{}", '
+ "requires kernel >= 4.19)".format(platform.release())
+ )
pytest.skip(error_msg)
# Checking BGP convergence
diff --git a/tests/topotests/static_routing_with_ibgp/test_static_routes_topo4_ibgp.py b/tests/topotests/static_routing_with_ibgp/test_static_routes_topo4_ibgp.py
index 01fdff69e6..e9960c7907 100644
--- a/tests/topotests/static_routing_with_ibgp/test_static_routes_topo4_ibgp.py
+++ b/tests/topotests/static_routing_with_ibgp/test_static_routes_topo4_ibgp.py
@@ -69,6 +69,7 @@ from lib.bgp import (
)
from lib.topojson import build_topo_from_json, build_config_from_json
from lib.topotest import version_cmp
+
# Reading the data from JSON File for topology creation
jsonFile = "{}/static_routes_topo4_ibgp.json".format(CWD)
try:
@@ -85,6 +86,7 @@ NEXT_HOP_IP = {}
pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
class CreateTopo(Topo):
"""
Test CreateTopo - topology 1.
@@ -123,9 +125,11 @@ def setup_module(mod):
# Creating configuration from JSON
build_config_from_json(tgen, topo)
- if version_cmp(platform.release(), '4.19') < 0:
- error_msg = ('These tests will not run. (have kernel "{}", '
- 'requires kernel >= 4.19)'.format(platform.release()))
+ if version_cmp(platform.release(), "4.19") < 0:
+ error_msg = (
+ 'These tests will not run. (have kernel "{}", '
+ "requires kernel >= 4.19)".format(platform.release())
+ )
pytest.skip(error_msg)
# Checking BGP convergence
diff --git a/tests/topotests/zebra_rib/test_zebra_rib.py b/tests/topotests/zebra_rib/test_zebra_rib.py
index daf8f7be20..9fcf7b6820 100644
--- a/tests/topotests/zebra_rib/test_zebra_rib.py
+++ b/tests/topotests/zebra_rib/test_zebra_rib.py
@@ -76,9 +76,11 @@ def setup_module(mod):
router_list = tgen.routers()
for rname, router in router_list.items():
router.load_config(
- TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)))
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+ )
router.load_config(
- TopoRouter.RD_SHARP, os.path.join(CWD, "{}/sharpd.conf".format(rname)))
+ TopoRouter.RD_SHARP, os.path.join(CWD, "{}/sharpd.conf".format(rname))
+ )
# Initialize all routers.
tgen.start_router()
@@ -159,6 +161,7 @@ def test_zebra_kernel_override():
_, result = topotest.run_and_expect(test_func, None, count=2, wait=0.5)
assert result is None, '"r1" JSON output mismatches'
+
def test_route_map_usage():
"Test that FRR only reruns over routes associated with the routemap"
logger.info("Test that FRR runs on selected re's on route-map changes")
@@ -174,7 +177,9 @@ def test_route_map_usage():
r1.vtysh_cmd("conf\nroute-map static permit 10\nset src 192.168.215.1")
r1.vtysh_cmd("conf\naccess-list 5 seq 5 permit 10.0.0.44/32")
r1.vtysh_cmd("conf\naccess-list 10 seq 5 permit 10.0.1.0/24")
- r1.vtysh_cmd("conf\nroute-map sharp permit 10\nmatch ip address 10\nset src 192.168.214.1")
+ r1.vtysh_cmd(
+ "conf\nroute-map sharp permit 10\nmatch ip address 10\nset src 192.168.214.1"
+ )
r1.vtysh_cmd("conf\nroute-map sharp permit 20\nset src 192.168.213.1")
r1.vtysh_cmd("conf\nip protocol static route-map static")
r1.vtysh_cmd("conf\nip protocol sharp route-map sharp")
@@ -186,47 +191,57 @@ def test_route_map_usage():
static_rmapfile = "%s/r1/static_rmap.ref" % (thisDir)
expected = open(static_rmapfile).read().rstrip()
- expected = ('\n'.join(expected.splitlines()) + '\n').rstrip()
+ expected = ("\n".join(expected.splitlines()) + "\n").rstrip()
actual = r1.vtysh_cmd("show route-map static")
- actual = ('\n'.join(actual.splitlines()) + '\n').rstrip()
- logger.info("Does the show route-map static command run the correct number of times")
+ actual = ("\n".join(actual.splitlines()) + "\n").rstrip()
+ logger.info(
+ "Does the show route-map static command run the correct number of times"
+ )
- diff = topotest.get_textdiff(actual, expected,
- title1 = "Actual Route-map output",
- title2 = "Expected Route-map output")
+ diff = topotest.get_textdiff(
+ actual,
+ expected,
+ title1="Actual Route-map output",
+ title2="Expected Route-map output",
+ )
if diff:
logger.info("Actual:")
logger.info(actual)
logger.info("Expected:")
logger.info(expected)
srun = r1.vtysh_cmd("show run")
- srun = ('\n'.join(srun.splitlines()) + '\n').rstrip()
+ srun = ("\n".join(srun.splitlines()) + "\n").rstrip()
logger.info("Show run")
logger.info(srun)
assert 0, "r1 static route processing:\n"
sharp_rmapfile = "%s/r1/sharp_rmap.ref" % (thisDir)
expected = open(sharp_rmapfile).read().rstrip()
- expected = ('\n'.join(expected.splitlines()) + '\n').rstrip()
+ expected = ("\n".join(expected.splitlines()) + "\n").rstrip()
actual = r1.vtysh_cmd("show route-map sharp")
- actual = ('\n'.join(actual.splitlines()) + '\n').rstrip()
+ actual = ("\n".join(actual.splitlines()) + "\n").rstrip()
logger.info("Does the show route-map sharp command run the correct number of times")
- diff = topotest.get_textdiff(actual, expected,
- title1 = "Actual Route-map output",
- title2 = "Expected Route-map output")
+ diff = topotest.get_textdiff(
+ actual,
+ expected,
+ title1="Actual Route-map output",
+ title2="Expected Route-map output",
+ )
if diff:
logger.info("Actual:")
logger.info(actual)
logger.info("Expected:")
logger.info(expected)
srun = r1.vtysh_cmd("show run")
- srun = ('\n'.join(srun.splitlines()) + '\n').rstrip()
+ srun = ("\n".join(srun.splitlines()) + "\n").rstrip()
logger.info("Show run:")
logger.info(srun)
assert 0, "r1 sharp route-map processing:\n"
- logger.info("Add a extension to the static route-map to see the static route go away")
+ logger.info(
+ "Add a extension to the static route-map to see the static route go away"
+ )
r1.vtysh_cmd("conf\nroute-map sharp deny 5\nmatch ip address 5")
sleep(2)
# we are only checking the kernel here as that this will give us the implied
@@ -236,9 +251,9 @@ def test_route_map_usage():
logger.info("Test that the routes installed are correct")
sharp_ipfile = "%s/r1/iproute.ref" % (thisDir)
expected = open(sharp_ipfile).read().rstrip()
- expected = ('\n'.join(expected.splitlines()) + '\n').rstrip()
+ expected = ("\n".join(expected.splitlines()) + "\n").rstrip()
actual = r1.run("ip route show")
- actual = ('\n'.join(actual.splitlines()) + '\n').rstrip()
+ actual = ("\n".join(actual.splitlines()) + "\n").rstrip()
actual = re.sub(r" nhid [0-9][0-9]", "", actual)
actual = re.sub(r" proto sharp", " proto XXXX", actual)
actual = re.sub(r" proto static", " proto XXXX", actual)
@@ -250,9 +265,9 @@ def test_route_map_usage():
actual = re.sub(r" proto XXXX ", " proto XXXX ", actual)
actual = re.sub(r" metric", " metric", actual)
actual = re.sub(r" link ", " link ", actual)
- diff = topotest.get_textdiff(actual, expected,
- title1 = "Actual ip route show",
- title2 = "Expected ip route show")
+ diff = topotest.get_textdiff(
+ actual, expected, title1="Actual ip route show", title2="Expected ip route show"
+ )
if diff:
logger.info("Actual:")
@@ -260,11 +275,12 @@ def test_route_map_usage():
logger.info("Expected:")
logger.info(expected)
srun = r1.vtysh_cmd("show run")
- srun = ('\n'.join(srun.splitlines()) + '\n').rstrip()
+ srun = ("\n".join(srun.splitlines()) + "\n").rstrip()
logger.info("Show run:")
logger.info(srun)
assert 0, "r1 ip route show is not correct:"
+
def test_memory_leak():
"Run the memory leak test and report results."
tgen = get_topogen()
diff --git a/vtysh/vtysh.c b/vtysh/vtysh.c
index b41364c04d..6e809a0713 100644
--- a/vtysh/vtysh.c
+++ b/vtysh/vtysh.c
@@ -2851,6 +2851,24 @@ DEFUN (vtysh_show_error_code,
}
/* Northbound. */
+DEFUN (show_config_running,
+ show_config_running_cmd,
+ "show configuration running\
+ [<json|xml> [translate WORD]]\
+ [with-defaults]" DAEMONS_LIST,
+ SHOW_STR
+ "Configuration information\n"
+ "Running configuration\n"
+ "Change output format to JSON\n"
+ "Change output format to XML\n"
+ "Translate output\n"
+ "YANG module translator\n"
+ "Show default values\n"
+ DAEMONS_STR)
+{
+ return show_one_daemon(vty, argv, argc - 1, argv[argc - 1]->text);
+}
+
DEFUN (show_yang_operational_data,
show_yang_operational_data_cmd,
"show yang operational-data XPATH\
@@ -4564,6 +4582,7 @@ void vtysh_init_vty(void)
install_element(CONFIG_NODE, &vtysh_debug_memstats_cmd);
/* northbound */
+ install_element(ENABLE_NODE, &show_config_running_cmd);
install_element(ENABLE_NODE, &show_yang_operational_data_cmd);
install_element(ENABLE_NODE, &debug_nb_cmd);
install_element(CONFIG_NODE, &debug_nb_cmd);
diff --git a/zebra/rt_netlink.c b/zebra/rt_netlink.c
index 46a751ce69..93961686fd 100644
--- a/zebra/rt_netlink.c
+++ b/zebra/rt_netlink.c
@@ -1767,6 +1767,33 @@ ssize_t netlink_route_multipath_msg_encode(int cmd,
nl_attr_nest_end(&req->n, nest);
}
+ /*
+ * Always install blackhole routes without using nexthops, because of
+ * the following kernel problems:
+ * 1. Kernel nexthops don't suport unreachable/prohibit route types.
+ * 2. Blackhole kernel nexthops are deleted when loopback is down.
+ */
+ nexthop = dplane_ctx_get_ng(ctx)->nexthop;
+ if (nexthop) {
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE))
+ nexthop = nexthop->resolved;
+
+ if (nexthop->type == NEXTHOP_TYPE_BLACKHOLE) {
+ switch (nexthop->bh_type) {
+ case BLACKHOLE_ADMINPROHIB:
+ req->r.rtm_type = RTN_PROHIBIT;
+ break;
+ case BLACKHOLE_REJECT:
+ req->r.rtm_type = RTN_UNREACHABLE;
+ break;
+ default:
+ req->r.rtm_type = RTN_BLACKHOLE;
+ break;
+ }
+ return NLMSG_ALIGN(req->n.nlmsg_len);
+ }
+ }
+
if ((!fpm && kernel_nexthops_supported()
&& (!proto_nexthops_only()
|| is_proto_nhg(dplane_ctx_get_nhe_id(ctx), 0)))
@@ -1820,27 +1847,6 @@ ssize_t netlink_route_multipath_msg_encode(int cmd,
if (nexthop_num == 1) {
nexthop_num = 0;
for (ALL_NEXTHOPS_PTR(dplane_ctx_get_ng(ctx), nexthop)) {
- /*
- * So we want to cover 2 types of blackhole
- * routes here:
- * 1) A normal blackhole route( ala from a static
- * install.
- * 2) A recursively resolved blackhole route
- */
- if (nexthop->type == NEXTHOP_TYPE_BLACKHOLE) {
- switch (nexthop->bh_type) {
- case BLACKHOLE_ADMINPROHIB:
- req->r.rtm_type = RTN_PROHIBIT;
- break;
- case BLACKHOLE_REJECT:
- req->r.rtm_type = RTN_UNREACHABLE;
- break;
- default:
- req->r.rtm_type = RTN_BLACKHOLE;
- break;
- }
- return NLMSG_ALIGN(req->n.nlmsg_len);
- }
if (CHECK_FLAG(nexthop->flags,
NEXTHOP_FLAG_RECURSIVE)) {
@@ -3695,14 +3701,6 @@ static ssize_t netlink_neigh_update_ctx(const struct zebra_dplane_ctx *ctx,
/* local neigh */
if (update_flags & DPLANE_NEIGH_SET_STATIC)
ext_flags |= NTF_E_MH_PEER_SYNC;
-
- /* the ndm_state set for local entries can be REACHABLE or
- * STALE. if the dataplane has already establish reachability
- * (in the meantime) FRR must not over-write it with STALE.
- * this accidental race/over-write is avoided by using the
- * WEAK_OVERRIDE_STATE
- */
- ext_flags |= NTF_E_WEAK_OVERRIDE_STATE;
}
if (IS_ZEBRA_DEBUG_KERNEL) {
char buf[INET6_ADDRSTRLEN];