summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--bgpd/bgp_advertise.c5
-rw-r--r--bgpd/bgp_aspath.c125
-rw-r--r--bgpd/bgp_damp.c3
-rw-r--r--bgpd/bgp_evpn_mh.c8
-rw-r--r--bgpd/bgp_evpn_vty.c4
-rw-r--r--bgpd/bgp_flowspec_util.c13
-rw-r--r--bgpd/bgp_route.c8
-rw-r--r--bgpd/rfapi/rfapi.c2
-rw-r--r--bgpd/rfapi/rfapi_import.c6
-rw-r--r--doc/developer/topotests.rst14
-rw-r--r--doc/user/basic.rst4
-rw-r--r--isisd/isis_circuit.c10
-rw-r--r--isisd/isisd.c2
-rw-r--r--lib/link_state.c4
-rw-r--r--lib/northbound.c4
-rw-r--r--lib/vrf.c31
-rw-r--r--ospf6d/ospf6_lsa.c1
-rw-r--r--ospfd/ospf_routemap.c21
-rwxr-xr-xtests/topotests/conftest.py60
-rw-r--r--tests/topotests/lib/bgp.py54
-rw-r--r--tests/topotests/lib/common_config.py20
-rw-r--r--tests/topotests/lib/ospf.py983
-rw-r--r--tests/topotests/lib/pim.py57
-rw-r--r--tests/topotests/lib/topogen.py2
-rw-r--r--tests/topotests/lib/topotest.py9
-rw-r--r--tests/topotests/ospfv3_basic_functionality/ospfv3_rte_calc.json173
-rw-r--r--tests/topotests/ospfv3_basic_functionality/ospfv3_single_area.json190
-rw-r--r--tests/topotests/ospfv3_basic_functionality/test_ospfv3_rte_calc.py374
-rw-r--r--tests/topotests/ospfv3_basic_functionality/test_ospfv3_single_area.py417
-rw-r--r--zebra/zserv.c31
30 files changed, 2405 insertions, 230 deletions
diff --git a/bgpd/bgp_advertise.c b/bgpd/bgp_advertise.c
index 1ebe4e5b53..9da97d110f 100644
--- a/bgpd/bgp_advertise.c
+++ b/bgpd/bgp_advertise.c
@@ -205,6 +205,7 @@ void bgp_adj_in_remove(struct bgp_dest *dest, struct bgp_adj_in *bai)
{
bgp_attr_unintern(&bai->attr);
BGP_ADJ_IN_DEL(dest, bai);
+ bgp_dest_unlock_node(dest);
peer_unlock(bai->peer); /* adj_in peer reference */
XFREE(MTYPE_BGP_ADJ_IN, bai);
}
@@ -223,10 +224,8 @@ bool bgp_adj_in_unset(struct bgp_dest *dest, struct peer *peer,
while (adj) {
adj_next = adj->next;
- if (adj->peer == peer && adj->addpath_rx_id == addpath_id) {
+ if (adj->peer == peer && adj->addpath_rx_id == addpath_id)
bgp_adj_in_remove(dest, adj);
- bgp_dest_unlock_node(dest);
- }
adj = adj_next;
}
diff --git a/bgpd/bgp_aspath.c b/bgpd/bgp_aspath.c
index 5cf3c60fa2..25109e030b 100644
--- a/bgpd/bgp_aspath.c
+++ b/bgpd/bgp_aspath.c
@@ -910,77 +910,70 @@ size_t aspath_put(struct stream *s, struct aspath *as, int use32bit)
if (!seg || seg->length == 0)
return 0;
- if (seg) {
- /*
- * Hey, what do we do when we have > STREAM_WRITABLE(s) here?
- * At the moment, we would write out a partial aspath, and our
- * peer
- * will complain and drop the session :-/
- *
- * The general assumption here is that many things tested will
- * never happen. And, in real live, up to now, they have not.
- */
- while (seg && (ASSEGMENT_LEN(seg, use32bit)
- <= STREAM_WRITEABLE(s))) {
- struct assegment *next = seg->next;
- int written = 0;
- int asns_packed = 0;
- size_t lenp;
-
- /* Overlength segments have to be split up */
- while ((seg->length - written) > AS_SEGMENT_MAX) {
- assegment_header_put(s, seg->type,
- AS_SEGMENT_MAX);
- assegment_data_put(s, (seg->as + written), AS_SEGMENT_MAX,
- use32bit);
- written += AS_SEGMENT_MAX;
- bytes += ASSEGMENT_SIZE(AS_SEGMENT_MAX,
- use32bit);
- }
-
- /* write the final segment, probably is also the first
- */
- lenp = assegment_header_put(s, seg->type,
- seg->length - written);
+ /*
+ * Hey, what do we do when we have > STREAM_WRITABLE(s) here?
+ * At the moment, we would write out a partial aspath, and our
+ * peer
+ * will complain and drop the session :-/
+ *
+ * The general assumption here is that many things tested will
+ * never happen. And, in real live, up to now, they have not.
+ */
+ while (seg && (ASSEGMENT_LEN(seg, use32bit) <= STREAM_WRITEABLE(s))) {
+ struct assegment *next = seg->next;
+ int written = 0;
+ int asns_packed = 0;
+ size_t lenp;
+
+ /* Overlength segments have to be split up */
+ while ((seg->length - written) > AS_SEGMENT_MAX) {
+ assegment_header_put(s, seg->type, AS_SEGMENT_MAX);
assegment_data_put(s, (seg->as + written),
- seg->length - written, use32bit);
+ AS_SEGMENT_MAX, use32bit);
+ written += AS_SEGMENT_MAX;
+ bytes += ASSEGMENT_SIZE(AS_SEGMENT_MAX, use32bit);
+ }
- /* Sequence-type segments can be 'packed' together
- * Case of a segment which was overlength and split up
- * will be missed here, but that doesn't matter.
+ /* write the final segment, probably is also the first
+ */
+ lenp = assegment_header_put(s, seg->type,
+ seg->length - written);
+ assegment_data_put(s, (seg->as + written),
+ seg->length - written, use32bit);
+
+ /* Sequence-type segments can be 'packed' together
+ * Case of a segment which was overlength and split up
+ * will be missed here, but that doesn't matter.
+ */
+ while (next && ASSEGMENTS_PACKABLE(seg, next)) {
+ /* NB: We should never normally get here given
+ * we
+ * normalise aspath data when parse them.
+ * However, better
+ * safe than sorry. We potentially could call
+ * assegment_normalise here instead, but it's
+ * cheaper and
+ * easier to do it on the fly here rather than
+ * go through
+ * the segment list twice every time we write
+ * out
+ * aspath's.
*/
- while (next && ASSEGMENTS_PACKABLE(seg, next)) {
- /* NB: We should never normally get here given
- * we
- * normalise aspath data when parse them.
- * However, better
- * safe than sorry. We potentially could call
- * assegment_normalise here instead, but it's
- * cheaper and
- * easier to do it on the fly here rather than
- * go through
- * the segment list twice every time we write
- * out
- * aspath's.
- */
-
- /* Next segment's data can fit in this one */
- assegment_data_put(s, next->as, next->length,
- use32bit);
-
- /* update the length of the segment header */
- stream_putc_at(s, lenp,
- seg->length - written
- + next->length);
- asns_packed += next->length;
-
- next = next->next;
- }
- bytes += ASSEGMENT_SIZE(
- seg->length - written + asns_packed, use32bit);
- seg = next;
+ /* Next segment's data can fit in this one */
+ assegment_data_put(s, next->as, next->length, use32bit);
+
+ /* update the length of the segment header */
+ stream_putc_at(s, lenp,
+ seg->length - written + next->length);
+ asns_packed += next->length;
+
+ next = next->next;
}
+
+ bytes += ASSEGMENT_SIZE(seg->length - written + asns_packed,
+ use32bit);
+ seg = next;
}
return bytes;
}
diff --git a/bgpd/bgp_damp.c b/bgpd/bgp_damp.c
index 07c70d5aae..2a372c0ba4 100644
--- a/bgpd/bgp_damp.c
+++ b/bgpd/bgp_damp.c
@@ -245,7 +245,6 @@ static int bgp_reuse_timer(struct thread *t)
* list head entry. */
assert(bdc->reuse_offset < bdc->reuse_list_size);
plist = bdc->reuse_list[bdc->reuse_offset];
- node = SLIST_FIRST(&plist);
SLIST_INIT(&bdc->reuse_list[bdc->reuse_offset]);
/* 2. set offset = modulo reuse-list-size ( offset + 1 ), thereby
@@ -788,7 +787,7 @@ const char *bgp_damp_reuse_time_vty(struct vty *vty, struct bgp_path_info *path,
/* If dampening is not enabled or there is no dampening information,
return immediately. */
- if (!bdc || !bdi)
+ if (!bdi)
return NULL;
/* Calculate new penalty. */
diff --git a/bgpd/bgp_evpn_mh.c b/bgpd/bgp_evpn_mh.c
index 59bced6f93..b191840f63 100644
--- a/bgpd/bgp_evpn_mh.c
+++ b/bgpd/bgp_evpn_mh.c
@@ -2070,9 +2070,8 @@ int bgp_evpn_local_es_del(struct bgp *bgp, esi_t *esi)
/* Lookup ESI hash - should exist. */
es = bgp_evpn_es_find(esi);
if (!es) {
- flog_warn(EC_BGP_EVPN_ESI,
- "%u: ES %s missing at local ES DEL",
- bgp->vrf_id, es->esi_str);
+ flog_warn(EC_BGP_EVPN_ESI, "%u: ES missing at local ES DEL",
+ bgp->vrf_id);
return -1;
}
@@ -3317,9 +3316,6 @@ bgp_evpn_es_evi_local_info_clear(struct bgp_evpn_es_evi *es_evi)
{
struct bgpevpn *vpn = es_evi->vpn;
- if (!CHECK_FLAG(es_evi->flags, BGP_EVPNES_EVI_LOCAL))
- return es_evi;
-
UNSET_FLAG(es_evi->flags, BGP_EVPNES_EVI_LOCAL);
list_delete_node(vpn->local_es_evi_list, &es_evi->l2vni_listnode);
diff --git a/bgpd/bgp_evpn_vty.c b/bgpd/bgp_evpn_vty.c
index 192ead6fd4..2a7c2ec853 100644
--- a/bgpd/bgp_evpn_vty.c
+++ b/bgpd/bgp_evpn_vty.c
@@ -768,13 +768,13 @@ static void bgp_evpn_show_routes_mac_ip_es(struct vty *vty, esi_t *esi,
static void bgp_evpn_show_routes_mac_ip_evi_es(struct vty *vty, esi_t *esi,
json_object *json, int detail)
{
- return bgp_evpn_show_routes_mac_ip_es(vty, esi, json, detail, false);
+ bgp_evpn_show_routes_mac_ip_es(vty, esi, json, detail, false);
}
static void bgp_evpn_show_routes_mac_ip_global_es(struct vty *vty, esi_t *esi,
json_object *json, int detail)
{
- return bgp_evpn_show_routes_mac_ip_es(vty, esi, json, detail, true);
+ bgp_evpn_show_routes_mac_ip_es(vty, esi, json, detail, true);
}
static void show_vni_routes(struct bgp *bgp, struct bgpevpn *vpn, int type,
diff --git a/bgpd/bgp_flowspec_util.c b/bgpd/bgp_flowspec_util.c
index b244c87258..23baa0184e 100644
--- a/bgpd/bgp_flowspec_util.c
+++ b/bgpd/bgp_flowspec_util.c
@@ -641,13 +641,12 @@ int bgp_flowspec_match_rules_fill(uint8_t *nlri_content, int len,
__func__, type);
}
}
- if (bpem->match_packet_length_num || bpem->match_fragment_num ||
- bpem->match_tcpflags_num || bpem->match_dscp_num ||
- bpem->match_packet_length_num || bpem->match_icmp_code_num ||
- bpem->match_icmp_type_num || bpem->match_port_num ||
- bpem->match_src_port_num || bpem->match_dst_port_num ||
- bpem->match_protocol_num || bpem->match_bitmask ||
- bpem->match_flowlabel_num)
+ if (bpem->match_packet_length_num || bpem->match_fragment_num
+ || bpem->match_tcpflags_num || bpem->match_dscp_num
+ || bpem->match_icmp_code_num || bpem->match_icmp_type_num
+ || bpem->match_port_num || bpem->match_src_port_num
+ || bpem->match_dst_port_num || bpem->match_protocol_num
+ || bpem->match_bitmask || bpem->match_flowlabel_num)
bpem->type = BGP_PBR_IPSET;
else if ((bpem->match_bitmask_iprule & PREFIX_SRC_PRESENT) ||
(bpem->match_bitmask_iprule & PREFIX_DST_PRESENT))
diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c
index 8e399b9b14..959a87d583 100644
--- a/bgpd/bgp_route.c
+++ b/bgpd/bgp_route.c
@@ -5048,10 +5048,8 @@ static void bgp_clear_route_table(struct peer *peer, afi_t afi, safi_t safi,
while (ain) {
ain_next = ain->next;
- if (ain->peer == peer) {
+ if (ain->peer == peer)
bgp_adj_in_remove(dest, ain);
- bgp_dest_unlock_node(dest);
- }
ain = ain_next;
}
@@ -5157,10 +5155,8 @@ void bgp_clear_adj_in(struct peer *peer, afi_t afi, safi_t safi)
while (ain) {
ain_next = ain->next;
- if (ain->peer == peer) {
+ if (ain->peer == peer)
bgp_adj_in_remove(dest, ain);
- bgp_dest_unlock_node(dest);
- }
ain = ain_next;
}
diff --git a/bgpd/rfapi/rfapi.c b/bgpd/rfapi/rfapi.c
index 8c455c6ea5..f89ef7b0d2 100644
--- a/bgpd/rfapi/rfapi.c
+++ b/bgpd/rfapi/rfapi.c
@@ -2179,7 +2179,7 @@ int rfapi_close(void *handle)
vnc_zlog_debug_verbose("%s administrative close rfd=%p",
__func__, rfd);
- if (h && h->rfp_methods.close_cb) {
+ if (h->rfp_methods.close_cb) {
vnc_zlog_debug_verbose(
"%s calling close callback rfd=%p", __func__,
rfd);
diff --git a/bgpd/rfapi/rfapi_import.c b/bgpd/rfapi/rfapi_import.c
index b2732a40b4..51e051d688 100644
--- a/bgpd/rfapi/rfapi_import.c
+++ b/bgpd/rfapi/rfapi_import.c
@@ -2592,10 +2592,8 @@ static void rfapiCopyUnEncap2VPN(struct bgp_path_info *encap_bpi,
* instrumentation to debug segfault of 091127
*/
vnc_zlog_debug_verbose("%s: vpn_bpi=%p", __func__, vpn_bpi);
- if (vpn_bpi) {
- vnc_zlog_debug_verbose("%s: vpn_bpi->extra=%p",
- __func__, vpn_bpi->extra);
- }
+ vnc_zlog_debug_verbose("%s: vpn_bpi->extra=%p", __func__,
+ vpn_bpi->extra);
vpn_bpi->extra->vnc.import.un_family = AF_INET;
vpn_bpi->extra->vnc.import.un.addr4 =
diff --git a/doc/developer/topotests.rst b/doc/developer/topotests.rst
index 8885dcfce3..ba03aa9045 100644
--- a/doc/developer/topotests.rst
+++ b/doc/developer/topotests.rst
@@ -312,6 +312,20 @@ Here's an example of launching ``zebra`` and ``bgpd`` inside ``gdb`` on router
--gdb-breakpoints=nb_config_diff \
all-protocol-startup
+Detecting Memleaks with Valgrind
+""""""""""""""""""""""""""""""""
+
+Topotest can automatically launch all daemons with ``valgrind`` to check for
+memleaks. This is enabled by specifying 1 or 2 CLI arguments.
+``--valgrind-memleaks`` will enable general memleak detection, and
+``--valgrind-extra`` enables extra functionality including generating a
+suppression file. The suppression file ``tools/valgrind.supp`` is used when
+memleak detection is enabled.
+
+.. code:: shell
+
+ pytest --valgrind-memleaks all-protocol-startup
+
.. _topotests_docker:
Running Tests with Docker
diff --git a/doc/user/basic.rst b/doc/user/basic.rst
index 2def835f0b..0db2361296 100644
--- a/doc/user/basic.rst
+++ b/doc/user/basic.rst
@@ -60,6 +60,10 @@ Basic Config Commands
Set hostname of the router.
+.. clicmd:: domainname DOMAINNAME
+
+ Set domainname of the router.
+
.. clicmd:: password PASSWORD
Set password for vty interface. The ``no`` form of the command deletes the
diff --git a/isisd/isis_circuit.c b/isisd/isis_circuit.c
index 4fa28a4ad9..896bbc2cb0 100644
--- a/isisd/isis_circuit.c
+++ b/isisd/isis_circuit.c
@@ -78,12 +78,14 @@ DEFINE_HOOK(isis_circuit_del_hook, (struct isis_circuit *circuit), (circuit));
static void isis_circuit_enable(struct isis_circuit *circuit)
{
- struct isis_area *area;
+ struct isis_area *area = circuit->area;
struct interface *ifp = circuit->interface;
- area = isis_area_lookup(circuit->tag, ifp->vrf_id);
- if (area)
- isis_area_add_circuit(area, circuit);
+ if (!area) {
+ area = isis_area_lookup(circuit->tag, ifp->vrf_id);
+ if (area)
+ isis_area_add_circuit(area, circuit);
+ }
if (if_is_operative(ifp))
isis_csm_state_change(IF_UP_FROM_Z, circuit, ifp);
diff --git a/isisd/isisd.c b/isisd/isisd.c
index 77b18f9cf7..05d8741991 100644
--- a/isisd/isisd.c
+++ b/isisd/isisd.c
@@ -402,7 +402,7 @@ struct isis_area *isis_area_create(const char *area_tag, const char *vrf_name)
continue;
circuit = ifp->info;
- if (circuit)
+ if (circuit && strmatch(circuit->tag, area->area_tag))
isis_area_add_circuit(area, circuit);
}
}
diff --git a/lib/link_state.c b/lib/link_state.c
index afeb89c592..e8a6b89f89 100644
--- a/lib/link_state.c
+++ b/lib/link_state.c
@@ -79,7 +79,6 @@ void ls_node_del(struct ls_node *node)
return;
XFREE(MTYPE_LS_DB, node);
- node = NULL;
}
int ls_node_same(struct ls_node *n1, struct ls_node *n2)
@@ -168,7 +167,6 @@ void ls_attributes_del(struct ls_attributes *attr)
ls_attributes_srlg_del(attr);
XFREE(MTYPE_LS_DB, attr);
- attr = NULL;
}
int ls_attributes_same(struct ls_attributes *l1, struct ls_attributes *l2)
@@ -221,7 +219,6 @@ void ls_prefix_del(struct ls_prefix *pref)
return;
XFREE(MTYPE_LS_DB, pref);
- pref = NULL;
}
int ls_prefix_same(struct ls_prefix *p1, struct ls_prefix *p2)
@@ -839,7 +836,6 @@ void ls_ted_del(struct ls_ted *ted)
subnets_fini(&ted->subnets);
XFREE(MTYPE_LS_DB, ted);
- ted = NULL;
}
void ls_ted_del_all(struct ls_ted *ted)
diff --git a/lib/northbound.c b/lib/northbound.c
index 47af770189..6edd5184ef 100644
--- a/lib/northbound.c
+++ b/lib/northbound.c
@@ -696,14 +696,14 @@ int nb_candidate_edit(struct nb_config *candidate,
NULL, LYD_NEW_PATH_UPDATE,
&dep_dnode);
/* Create default nodes */
- if (!err)
+ if (!err && dep_dnode)
err = lyd_new_implicit_tree(
dep_dnode,
LYD_IMPLICIT_NO_STATE, NULL);
if (err) {
flog_warn(
EC_LIB_LIBYANG,
- "%s: lyd_new_path(%s) failed: %d",
+ "%s: dependency: lyd_new_path(%s) failed: %d",
__func__, dep_xpath, err);
return NB_ERR;
}
diff --git a/lib/vrf.c b/lib/vrf.c
index de29f45f8f..a04f2ddeb7 100644
--- a/lib/vrf.c
+++ b/lib/vrf.c
@@ -582,29 +582,38 @@ void vrf_init(int (*create)(struct vrf *), int (*enable)(struct vrf *),
cmd_variable_handler_register(vrf_var_handlers);
}
+static void vrf_terminate_single(struct vrf *vrf)
+{
+ /* Clear configured flag and invoke delete. */
+ UNSET_FLAG(vrf->status, VRF_CONFIGURED);
+ vrf_delete(vrf);
+}
+
/* Terminate VRF module. */
void vrf_terminate(void)
{
- struct vrf *vrf;
+ struct vrf *vrf, *tmp;
if (debug_vrf)
zlog_debug("%s: Shutting down vrf subsystem", __func__);
- while (!RB_EMPTY(vrf_id_head, &vrfs_by_id)) {
- vrf = RB_ROOT(vrf_id_head, &vrfs_by_id);
+ RB_FOREACH_SAFE (vrf, vrf_id_head, &vrfs_by_id, tmp) {
+ if (vrf->vrf_id == VRF_DEFAULT)
+ continue;
- /* Clear configured flag and invoke delete. */
- UNSET_FLAG(vrf->status, VRF_CONFIGURED);
- vrf_delete(vrf);
+ vrf_terminate_single(vrf);
}
- while (!RB_EMPTY(vrf_name_head, &vrfs_by_name)) {
- vrf = RB_ROOT(vrf_name_head, &vrfs_by_name);
+ RB_FOREACH_SAFE (vrf, vrf_name_head, &vrfs_by_name, tmp) {
+ if (vrf->vrf_id == VRF_DEFAULT)
+ continue;
- /* Clear configured flag and invoke delete. */
- UNSET_FLAG(vrf->status, VRF_CONFIGURED);
- vrf_delete(vrf);
+ vrf_terminate_single(vrf);
}
+
+ /* Finally terminate default VRF */
+ vrf = vrf_lookup_by_id(VRF_DEFAULT);
+ vrf_terminate_single(vrf);
}
int vrf_socket(int domain, int type, int protocol, vrf_id_t vrf_id,
diff --git a/ospf6d/ospf6_lsa.c b/ospf6d/ospf6_lsa.c
index a8f523295b..d627194252 100644
--- a/ospf6d/ospf6_lsa.c
+++ b/ospf6d/ospf6_lsa.c
@@ -654,6 +654,7 @@ void ospf6_lsa_show(struct vty *vty, struct ospf6_lsa *lsa,
ospf6_lsa_age_current(lsa));
json_object_string_add(json_obj, "type",
ospf6_lstype_name(lsa->header->type));
+ json_object_string_add(json_obj, "linkStateId", id);
json_object_string_add(json_obj, "advertisingRouter",
adv_router);
json_object_int_add(json_obj, "lsSequenceNumber",
diff --git a/ospfd/ospf_routemap.c b/ospfd/ospf_routemap.c
index d3b114840e..2525c1cf3a 100644
--- a/ospfd/ospf_routemap.c
+++ b/ospfd/ospf_routemap.c
@@ -71,19 +71,14 @@ static void ospf_route_map_update(const char *name)
/* Keep old route-map. */
struct route_map *old = ROUTEMAP(red);
- if (!old) {
- /* Route-map creation */
- /* Update route-map. */
- ROUTEMAP(red) =
- route_map_lookup_by_name(
- ROUTEMAP_NAME(red));
-
- route_map_counter_increment(
- ROUTEMAP(red));
- } else {
- /* Route-map deletion */
- ROUTEMAP(red) = NULL;
- }
+ ROUTEMAP(red) =
+ route_map_lookup_by_name(
+ ROUTEMAP_NAME(red));
+
+ if (!old)
+ route_map_counter_increment(
+ ROUTEMAP(red));
+
/* No update for this distribute type.
*/
if (old == NULL
diff --git a/tests/topotests/conftest.py b/tests/topotests/conftest.py
index de5c584e91..e57db7471c 100755
--- a/tests/topotests/conftest.py
+++ b/tests/topotests/conftest.py
@@ -2,8 +2,10 @@
Topotest conftest.py file.
"""
+import glob
import os
import pdb
+import re
import pytest
from lib.topogen import get_topogen, diagnose_env
@@ -11,6 +13,12 @@ from lib.topotest import json_cmp_result
from lib.topotest import g_extra_config as topotest_extra_config
from lib.topolog import logger
+try:
+ from _pytest._code.code import ExceptionInfo
+ leak_check_ok = True
+except ImportError:
+ leak_check_ok = False
+
def pytest_addoption(parser):
"""
@@ -67,6 +75,18 @@ def pytest_addoption(parser):
)
parser.addoption(
+ "--valgrind-extra",
+ action="store_true",
+ help="Generate suppression file, and enable more precise (slower) valgrind checks",
+ )
+
+ parser.addoption(
+ "--valgrind-memleaks",
+ action="store_true",
+ help="Run all daemons under valgrind for memleak detection",
+ )
+
+ parser.addoption(
"--vtysh",
metavar="ROUTER[,ROUTER...]",
help="Comma-separated list of routers to spawn vtysh on, or 'all'",
@@ -79,6 +99,37 @@ def pytest_addoption(parser):
)
+def check_for_memleaks():
+ if not topotest_extra_config["valgrind_memleaks"]:
+ return
+
+ leaks = []
+ tgen = get_topogen()
+ latest = []
+ existing = []
+ if tgen is not None:
+ logdir = "/tmp/topotests/{}".format(tgen.modname)
+ if hasattr(tgen, "valgrind_existing_files"):
+ existing = tgen.valgrind_existing_files
+ latest = glob.glob(os.path.join(logdir, "*.valgrind.*"))
+
+ for vfile in latest:
+ if vfile in existing:
+ continue
+ with open(vfile) as vf:
+ vfcontent = vf.read()
+ match = re.search(r"ERROR SUMMARY: (\d+) errors", vfcontent)
+ if match and match.group(1) != "0":
+ emsg = '{} in {}'.format(match.group(1), vfile)
+ leaks.append(emsg)
+
+ if leaks:
+ if leak_check_ok:
+ pytest.fail("Memleaks found:\n\t" + "\n\t".join(leaks))
+ else:
+ logger.error("Memleaks found:\n\t" + "\n\t".join(leaks))
+
+
def pytest_runtest_call():
"""
This function must be run after setup_module(), it does standarized post
@@ -139,6 +190,9 @@ def pytest_configure(config):
shell_on_error = config.getoption("--shell-on-error")
topotest_extra_config["shell_on_error"] = shell_on_error
+ topotest_extra_config["valgrind_extra"] = config.getoption("--valgrind-extra")
+ topotest_extra_config["valgrind_memleaks"] = config.getoption("--valgrind-memleaks")
+
vtysh = config.getoption("--vtysh")
topotest_extra_config["vtysh"] = vtysh.split(",") if vtysh else []
@@ -159,6 +213,12 @@ def pytest_runtest_makereport(item, call):
else:
pause = False
+ if call.excinfo is None and call.when == "call":
+ try:
+ check_for_memleaks()
+ except:
+ call.excinfo = ExceptionInfo()
+
if call.excinfo is None:
error = False
else:
diff --git a/tests/topotests/lib/bgp.py b/tests/topotests/lib/bgp.py
index 50cb586acd..db7b3586f1 100644
--- a/tests/topotests/lib/bgp.py
+++ b/tests/topotests/lib/bgp.py
@@ -990,7 +990,7 @@ def modify_bgp_config_when_bgpd_down(tgen, topo, input_dict):
# Verification APIs
#############################################
@retry(attempts=4, wait=2, return_is_str=True)
-def verify_router_id(tgen, topo, input_dict):
+def verify_router_id(tgen, topo, input_dict, expected=True):
"""
Running command "show ip bgp json" for DUT and reading router-id
from input_dict and verifying with command output.
@@ -1006,6 +1006,8 @@ def verify_router_id(tgen, topo, input_dict):
* `topo`: input json file data
* `input_dict`: input dictionary, have details of Device Under Test, for
which user wants to test the data
+ * `expected` : expected results from API, by-default True
+
Usage
-----
# Verify if router-id for r1 is 12.12.12.12
@@ -1060,7 +1062,7 @@ def verify_router_id(tgen, topo, input_dict):
@retry(attempts=50, wait=3, return_is_str=True)
-def verify_bgp_convergence(tgen, topo, dut=None):
+def verify_bgp_convergence(tgen, topo, dut=None, expected=True):
"""
API will verify if BGP is converged with in the given time frame.
Running "show bgp summary json" command and verify bgp neighbor
@@ -1070,6 +1072,8 @@ def verify_bgp_convergence(tgen, topo, dut=None):
* `tgen`: topogen object
* `topo`: input json file data
* `dut`: device under test
+ * `expected` : expected results from API, by-default True
+
Usage
-----
# To veriry is BGP is converged for all the routers used in
@@ -1264,7 +1268,7 @@ def verify_bgp_convergence(tgen, topo, dut=None):
@retry(attempts=4, wait=4, return_is_str=True)
def verify_bgp_community(
- tgen, addr_type, router, network, input_dict=None, vrf=None, bestpath=False
+ tgen, addr_type, router, network, input_dict=None, vrf=None, bestpath=False, expected=True
):
"""
API to veiryf BGP large community is attached in route for any given
@@ -1280,6 +1284,7 @@ def verify_bgp_community(
values needs to be verified
* `vrf`: VRF name
* `bestpath`: To check best path cli
+ * `expected` : expected results from API, by-default True
Usage
-----
@@ -1423,7 +1428,7 @@ def modify_as_number(tgen, topo, input_dict):
@retry(attempts=4, wait=2, return_is_str=True)
-def verify_as_numbers(tgen, topo, input_dict):
+def verify_as_numbers(tgen, topo, input_dict, expected=True):
"""
This API is to verify AS numbers for given DUT by running
"show ip bgp neighbor json" command. Local AS and Remote AS
@@ -1435,6 +1440,7 @@ def verify_as_numbers(tgen, topo, input_dict):
* `topo`: input json file data
* `addr_type` : ip type, ipv4/ipv6
* `input_dict`: defines - for which router, AS numbers needs to be verified
+ * `expected` : expected results from API, by-default True
Usage
-----
@@ -1522,7 +1528,7 @@ def verify_as_numbers(tgen, topo, input_dict):
@retry(attempts=50, wait=3, return_is_str=True)
-def verify_bgp_convergence_from_running_config(tgen, dut=None):
+def verify_bgp_convergence_from_running_config(tgen, dut=None, expected=True):
"""
API to verify BGP convergence b/w loopback and physical interface.
This API would be used when routers have BGP neighborship is loopback
@@ -1532,6 +1538,7 @@ def verify_bgp_convergence_from_running_config(tgen, dut=None):
----------
* `tgen`: topogen object
* `dut`: device under test
+ * `expected` : expected results from API, by-default True
Usage
-----
@@ -2086,6 +2093,7 @@ def verify_bgp_attributes(
input_dict=None,
seq_id=None,
nexthop=None,
+ expected=True
):
"""
API will verify BGP attributes set by Route-map for given prefix and
@@ -2101,6 +2109,7 @@ def verify_bgp_attributes(
* `rmap_name`: route map name for which set criteria needs to be verified
* `input_dict`: defines for which router, AS numbers needs
* `seq_id`: sequence number of rmap, default is None
+ * `expected` : expected results from API, by-default True
Usage
-----
@@ -2216,7 +2225,7 @@ def verify_bgp_attributes(
@retry(attempts=4, wait=2, return_is_str=True)
def verify_best_path_as_per_bgp_attribute(
- tgen, addr_type, router, input_dict, attribute
+ tgen, addr_type, router, input_dict, attribute, expected=True
):
"""
API is to verify best path according to BGP attributes for given routes.
@@ -2231,6 +2240,8 @@ def verify_best_path_as_per_bgp_attribute(
* `attribute` : calculate best path using this attribute
* `input_dict`: defines different routes to calculate for which route
best path is selected
+ * `expected` : expected results from API, by-default True
+
Usage
-----
# To verify best path for routes 200.50.2.0/32 and 200.60.2.0/32 from
@@ -2420,7 +2431,7 @@ def verify_best_path_as_per_bgp_attribute(
@retry(attempts=5, wait=2, return_is_str=True)
def verify_best_path_as_per_admin_distance(
- tgen, addr_type, router, input_dict, attribute
+ tgen, addr_type, router, input_dict, attribute, expected=True
):
"""
API is to verify best path according to admin distance for given
@@ -2435,6 +2446,8 @@ def verify_best_path_as_per_admin_distance(
* `attribute` : calculate best path using admin distance
* `input_dict`: defines different routes with different admin distance
to calculate for which route best path is selected
+ * `expected` : expected results from API, by-default True
+
Usage
-----
# To verify best path for route 200.50.2.0/32 from router r2 to
@@ -2532,7 +2545,7 @@ def verify_best_path_as_per_admin_distance(
@retry(attempts=5, wait=2, return_is_str=True, initial_wait=2)
def verify_bgp_rib(
- tgen, addr_type, dut, input_dict, next_hop=None, aspath=None, multi_nh=None
+ tgen, addr_type, dut, input_dict, next_hop=None, aspath=None, multi_nh=None, expected=True
):
"""
This API is to verify whether bgp rib has any
@@ -2547,6 +2560,7 @@ def verify_bgp_rib(
* `next_hop`[optional]: next_hop which needs to be verified,
default = static
* 'aspath'[optional]: aspath which needs to be verified
+ * `expected` : expected results from API, by-default True
Usage
-----
@@ -2833,7 +2847,7 @@ def verify_bgp_rib(
@retry(attempts=5, wait=2, return_is_str=True)
-def verify_graceful_restart(tgen, topo, addr_type, input_dict, dut, peer):
+def verify_graceful_restart(tgen, topo, addr_type, input_dict, dut, peer, expected=True):
"""
This API is to verify verify_graceful_restart configuration of DUT and
cross verify the same from the peer bgp routerrouter.
@@ -2847,6 +2861,7 @@ def verify_graceful_restart(tgen, topo, addr_type, input_dict, dut, peer):
which user wants to test the data
* `dut`: input dut router name
* `peer`: input peer router name
+ * `expected` : expected results from API, by-default True
Usage
-----
@@ -3082,7 +3097,7 @@ def verify_graceful_restart(tgen, topo, addr_type, input_dict, dut, peer):
@retry(attempts=5, wait=2, return_is_str=True)
-def verify_r_bit(tgen, topo, addr_type, input_dict, dut, peer):
+def verify_r_bit(tgen, topo, addr_type, input_dict, dut, peer, expected=True):
"""
This API is to verify r_bit in the BGP gr capability advertised
by the neighbor router
@@ -3096,6 +3111,8 @@ def verify_r_bit(tgen, topo, addr_type, input_dict, dut, peer):
which user wants to test the data
* `dut`: input dut router name
* `peer`: peer name
+ * `expected` : expected results from API, by-default True
+
Usage
-----
input_dict = {
@@ -3200,7 +3217,7 @@ def verify_r_bit(tgen, topo, addr_type, input_dict, dut, peer):
@retry(attempts=5, wait=2, return_is_str=True)
-def verify_eor(tgen, topo, addr_type, input_dict, dut, peer):
+def verify_eor(tgen, topo, addr_type, input_dict, dut, peer, expected=True):
"""
This API is to verify EOR
@@ -3363,7 +3380,7 @@ def verify_eor(tgen, topo, addr_type, input_dict, dut, peer):
@retry(attempts=4, wait=2, return_is_str=True)
-def verify_f_bit(tgen, topo, addr_type, input_dict, dut, peer):
+def verify_f_bit(tgen, topo, addr_type, input_dict, dut, peer, expected=True):
"""
This API is to verify f_bit in the BGP gr capability advertised
by the neighbor router
@@ -3377,6 +3394,7 @@ def verify_f_bit(tgen, topo, addr_type, input_dict, dut, peer):
which user wants to test the data
* `dut`: input dut router name
* `peer`: peer name
+ * `expected` : expected results from API, by-default True
Usage
-----
@@ -3516,6 +3534,8 @@ def verify_graceful_restart_timers(tgen, topo, addr_type, input_dict, dut, peer)
for which user wants to test the data
* `dut`: input dut router name
* `peer`: peer name
+ * `expected` : expected results from API, by-default True
+
Usage
-----
# Configure graceful-restart
@@ -3629,7 +3649,7 @@ def verify_graceful_restart_timers(tgen, topo, addr_type, input_dict, dut, peer)
@retry(attempts=4, wait=2, return_is_str=True)
-def verify_gr_address_family(tgen, topo, addr_type, addr_family, dut):
+def verify_gr_address_family(tgen, topo, addr_type, addr_family, dut, expected=True):
"""
This API is to verify gr_address_family in the BGP gr capability advertised
by the neighbor router
@@ -3641,6 +3661,7 @@ def verify_gr_address_family(tgen, topo, addr_type, addr_family, dut):
* `addr_type` : ip type ipv4/ipv6
* `addr_type` : ip type IPV4 Unicast/IPV6 Unicast
* `dut`: input dut router name
+ * `expected` : expected results from API, by-default True
Usage
-----
@@ -3730,6 +3751,7 @@ def verify_attributes_for_evpn_routes(
ipLen=None,
rd_peer=None,
rt_peer=None,
+ expected=True
):
"""
API to verify rd and rt value using "sh bgp l2vpn evpn 10.1.1.1"
@@ -3747,6 +3769,8 @@ def verify_attributes_for_evpn_routes(
* `ipLen` : IP prefix length
* `rd_peer` : Peer name from which RD will be auto-generated
* `rt_peer` : Peer name from which RT will be auto-generated
+ * `expected` : expected results from API, by-default True
+
Usage
-----
input_dict_1 = {
@@ -4117,7 +4141,7 @@ def verify_attributes_for_evpn_routes(
@retry(attempts=5, wait=2, return_is_str=True)
def verify_evpn_routes(
- tgen, topo, dut, input_dict, routeType=5, EthTag=0, next_hop=None
+ tgen, topo, dut, input_dict, routeType=5, EthTag=0, next_hop=None, expected=True
):
"""
API to verify evpn routes using "sh bgp l2vpn evpn"
@@ -4132,6 +4156,8 @@ def verify_evpn_routes(
* `route_type` : Route type 5 is supported as of now
* `EthTag` : Ethernet tag, by-default is 0
* `next_hop` : Prefered nexthop for the evpn routes
+ * `expected` : expected results from API, by-default True
+
Usage
-----
input_dict_1 = {
diff --git a/tests/topotests/lib/common_config.py b/tests/topotests/lib/common_config.py
index ee7cd6a7af..3f78f020bc 100644
--- a/tests/topotests/lib/common_config.py
+++ b/tests/topotests/lib/common_config.py
@@ -123,6 +123,17 @@ DEBUG_LOGS = {
"debug ospf te",
"debug ospf zebra",
],
+ "ospf6": [
+ "debug ospf6 event",
+ "debug ospf6 ism",
+ "debug ospf6 lsa",
+ "debug ospf6 nsm",
+ "debug ospf6 nssa",
+ "debug ospf6 packet all",
+ "debug ospf6 sr",
+ "debug ospf6 te",
+ "debug ospf6 zebra",
+ ],
}
if config.has_option("topogen", "verbosity"):
@@ -422,7 +433,10 @@ def check_router_status(tgen):
daemons.append("zebra")
if "pimd" in result:
daemons.append("pimd")
-
+ if "ospfd" in result:
+ daemons.append("ospfd")
+ if "ospf6d" in result:
+ daemons.append("ospf6d")
rnode.startDaemons(daemons)
except Exception as e:
@@ -890,6 +904,10 @@ def topo_daemons(tgen, topo):
for val in topo["routers"][rtr]["links"].values():
if "pim" in val and "pimd" not in daemon_list:
daemon_list.append("pimd")
+ if "ospf" in val and "ospfd" not in daemon_list:
+ daemon_list.append("ospfd")
+ if "ospf6" in val and "ospf6d" not in daemon_list:
+ daemon_list.append("ospf6d")
break
return daemon_list
diff --git a/tests/topotests/lib/ospf.py b/tests/topotests/lib/ospf.py
index 7ad64de4a1..3f39b93d8c 100644
--- a/tests/topotests/lib/ospf.py
+++ b/tests/topotests/lib/ospf.py
@@ -18,13 +18,16 @@
# OF THIS SOFTWARE.
#
-from copy import deepcopy
import traceback
+import ipaddr
+import ipaddress
+import sys
+
+from copy import deepcopy
from time import sleep
from lib.topolog import logger
-import ipaddr
from lib.topotest import frr_unicode
-
+from ipaddress import IPv6Address
# Import common_config to use commomnly used APIs
from lib.common_config import (
create_common_configuration,
@@ -86,10 +89,21 @@ def create_router_ospf(tgen, topo, input_dict=None, build=False, load_config=Tru
logger.debug("Router %s: 'ospf' not present in input_dict", router)
continue
- result = __create_ospf_global(tgen, input_dict, router, build, load_config)
+ result = __create_ospf_global(
+ tgen, input_dict, router, build, load_config)
if result is True:
ospf_data = input_dict[router]["ospf"]
+ for router in input_dict.keys():
+ if "ospf6" not in input_dict[router]:
+ logger.debug("Router %s: 'ospf6' not present in input_dict", router)
+ continue
+
+ result = __create_ospf_global(
+ tgen, input_dict, router, build, load_config, ospf='ospf6')
+ if result is True:
+ ospf_data = input_dict[router]["ospf6"]
+
logger.debug("Exiting lib API: create_router_ospf()")
return result
@@ -158,6 +172,7 @@ def __create_ospf_global(
config_data.append(cmd)
+
# router id
router_id = ospf_data.setdefault("router_id", None)
del_router_id = ospf_data.setdefault("del_router_id", False)
@@ -166,6 +181,33 @@ def __create_ospf_global(
if router_id:
config_data.append("{} router-id {}".format(ospf, router_id))
+ # log-adjacency-changes
+ log_adj_changes = ospf_data.setdefault("log_adj_changes", None)
+ del_log_adj_changes = ospf_data.setdefault("del_log_adj_changes", False)
+ if del_log_adj_changes:
+ config_data.append("no log-adjacency-changes detail")
+ if log_adj_changes:
+ config_data.append("log-adjacency-changes {}".format(
+ log_adj_changes))
+
+ # aggregation timer
+ aggr_timer = ospf_data.setdefault("aggr_timer", None)
+ del_aggr_timer = ospf_data.setdefault("del_aggr_timer", False)
+ if del_aggr_timer:
+ config_data.append("no aggregation timer")
+ if aggr_timer:
+ config_data.append("aggregation timer {}".format(
+ aggr_timer))
+
+ # maximum path information
+ ecmp_data = ospf_data.setdefault("maximum-paths", {})
+ if ecmp_data:
+ cmd = "maximum-paths {}".format(ecmp_data)
+ del_action = ospf_data.setdefault("del_max_path", False)
+ if del_action:
+ cmd = "no maximum-paths"
+ config_data.append(cmd)
+
# redistribute command
redistribute_data = ospf_data.setdefault("redistribute", {})
if redistribute_data:
@@ -203,6 +245,34 @@ def __create_ospf_global(
cmd = "no {}".format(cmd)
config_data.append(cmd)
+ #def route information
+ def_rte_data = ospf_data.setdefault("default-information", {})
+ if def_rte_data:
+ if "originate" not in def_rte_data:
+ logger.debug("Router %s: 'originate key' not present in "
+ "input_dict", router)
+ else:
+ cmd = "default-information originate"
+
+ if "always" in def_rte_data:
+ cmd = cmd + " always"
+
+ if "metric" in def_rte_data:
+ cmd = cmd + " metric {}".format(def_rte_data["metric"])
+
+ if "metric-type" in def_rte_data:
+ cmd = cmd + " metric-type {}".format(def_rte_data[
+ "metric-type"])
+
+ if "route-map" in def_rte_data:
+ cmd = cmd + " route-map {}".format(def_rte_data[
+ "route-map"])
+
+ del_action = def_rte_data.setdefault("delete", False)
+ if del_action:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
# area interface information for ospf6d only
if ospf == "ospf6":
area_iface = ospf_data.setdefault("neighbors", {})
@@ -217,6 +287,21 @@ def __create_ospf_global(
cmd = "no {}".format(cmd)
config_data.append(cmd)
+ try:
+ if "area" in input_dict[router]['links'][neighbor][
+ 'ospf6']:
+ iface = input_dict[router]["links"][neighbor]["interface"]
+ cmd = "interface {} area {}".format(
+ iface, input_dict[router]['links'][neighbor][
+ 'ospf6']['area'])
+ if input_dict[router]['links'][neighbor].setdefault(
+ "delete", False):
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+ except KeyError:
+ pass
+
+
# summary information
summary_data = ospf_data.setdefault("summary-address", {})
if summary_data:
@@ -427,11 +512,11 @@ def config_ospf_interface(tgen, topo, input_dict=None, build=False, load_config=
result = create_common_configuration(
tgen, router, config_data, "interface_config", build=build
)
- logger.debug("Exiting lib API: create_igmp_config()")
+ logger.debug("Exiting lib API: config_ospf_interface()")
return result
-def clear_ospf(tgen, router):
+def clear_ospf(tgen, router, ospf=None):
"""
This API is to clear ospf neighborship by running
clear ip ospf interface * command,
@@ -451,11 +536,16 @@ def clear_ospf(tgen, router):
return False
rnode = tgen.routers()[router]
-
# Clearing OSPF
- logger.info("Clearing ospf process for router %s..", router)
+ if ospf:
+ version = "ipv6"
+ else:
+ version = "ip"
- run_frr_cmd(rnode, "clear ip ospf interface ")
+ cmd = "clear {} ospf interface".format(version)
+ logger.info(
+ "Clearing ospf process on router %s.. using command '%s'", router, cmd)
+ run_frr_cmd(rnode, cmd)
logger.debug("Exiting lib API: clear_ospf()")
@@ -490,7 +580,7 @@ def redistribute_ospf(tgen, topo, dut, route_type, **kwargs):
# Verification procs
################################
@retry(attempts=40, wait=2, return_is_str=True)
-def verify_ospf_neighbor(tgen, topo, dut=None, input_dict=None, lan=False):
+def verify_ospf_neighbor(tgen, topo, dut=None, input_dict=None, lan=False, expected=True):
"""
This API is to verify ospf neighborship by running
show ip ospf neighbour command,
@@ -502,6 +592,7 @@ def verify_ospf_neighbor(tgen, topo, dut=None, input_dict=None, lan=False):
* `dut`: device under test
* `input_dict` : Input dict data, required when configuring from testcase
* `lan` : verify neighbors in lan topology
+ * `expected` : expected results from API, by-default True
Usage
-----
@@ -683,70 +774,194 @@ def verify_ospf_neighbor(tgen, topo, dut=None, input_dict=None, lan=False):
################################
# Verification procs
################################
-@retry(attempts=40, wait=2, return_is_str=True)
-def verify_ospf6_neighbor(tgen, topo):
+@retry(attempts=10, wait=2, return_is_str=True)
+def verify_ospf6_neighbor(tgen, topo, dut=None, input_dict=None, lan=False):
"""
This API is to verify ospf neighborship by running
- show ip ospf neighbour command,
+ show ipv6 ospf neighbour command,
Parameters
----------
* `tgen` : Topogen object
* `topo` : json file data
+ * `dut`: device under test
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `lan` : verify neighbors in lan topology
Usage
-----
- Check FULL neighbors.
- verify_ospf_neighbor(tgen, topo)
+ 1. To check FULL neighbors.
+ verify_ospf_neighbor(tgen, topo, dut=dut)
- result = verify_ospf_neighbor(tgen, topo)
+ 2. To check neighbors with their roles.
+ input_dict = {
+ "r0": {
+ "ospf6": {
+ "neighbors": {
+ "r1": {
+ "state": "Full",
+ "role": "DR"
+ },
+ "r2": {
+ "state": "Full",
+ "role": "DROther"
+ },
+ "r3": {
+ "state": "Full",
+ "role": "DROther"
+ }
+ }
+ }
+ }
+ }
+ result = verify_ospf6_neighbor(tgen, topo, dut, input_dict, lan=True)
Returns
-------
True or False (Error Message)
"""
-
- logger.debug("Entering lib API: verify_ospf6_neighbor()")
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
result = False
- for router, rnode in tgen.routers().items():
- if "ospf6" not in topo["routers"][router]:
- continue
- logger.info("Verifying OSPF6 neighborship on router %s:", router)
- show_ospf_json = run_frr_cmd(
- rnode, "show ipv6 ospf6 neighbor json", isjson=True
- )
+ if input_dict:
+ for router, rnode in tgen.routers().items():
+ if 'ospf6' not in topo['routers'][router]:
+ continue
- if not show_ospf_json:
- return "OSPF6 is not running"
-
- ospf_nbr_list = topo["routers"][router]["ospf6"]["neighbors"]
- no_of_peer = 0
- for ospf_nbr in ospf_nbr_list:
- ospf_nbr_rid = topo["routers"][ospf_nbr]["ospf6"]["router_id"]
- for neighbor in show_ospf_json["neighbors"]:
- if neighbor["neighborId"] == ospf_nbr_rid:
- nh_state = neighbor["state"]
- break
- else:
- return "[DUT: {}] OSPF6 peer {} missing".format(router, ospf_nbr_rid)
+ if dut is not None and dut != router:
+ continue
+
+ logger.info("Verifying OSPF neighborship on router %s:", router)
+ show_ospf_json = run_frr_cmd(rnode,
+ "show ipv6 ospf neighbor json", isjson=True)
+ # Verifying output dictionary show_ospf_json is empty or not
+ if not bool(show_ospf_json):
+ errormsg = "OSPF6 is not running"
+ return errormsg
+
+ ospf_data_list = input_dict[router]["ospf6"]
+ ospf_nbr_list = ospf_data_list['neighbors']
+
+ for ospf_nbr, nbr_data in ospf_nbr_list.items():
+ data_ip = data_rid = topo['routers'][ospf_nbr]['ospf6']['router_id']
+ if ospf_nbr in data_ip:
+ nbr_details = nbr_data[ospf_nbr]
+ elif lan:
+ for switch in topo['switches']:
+ if 'ospf6' in topo['switches'][switch]['links'][router]:
+ neighbor_ip = data_ip
+ else:
+ continue
+ else:
+ neighbor_ip = data_ip[router]['ipv6'].split("/")[0]
- if nh_state == "Full":
- no_of_peer += 1
+ nh_state = None
+ neighbor_ip = neighbor_ip.lower()
+ nbr_rid = data_rid
+ get_index_val = dict((d['neighborId'], dict( \
+ d, index=index)) for (index, d) in enumerate( \
+ show_ospf_json['neighbors']))
+ try:
+ nh_state = get_index_val.get(neighbor_ip)['state']
+ intf_state = get_index_val.get(neighbor_ip)['ifState']
+ except TypeError:
+ errormsg = "[DUT: {}] OSPF peer {} missing,from "\
+ "{} ".format(router,
+ nbr_rid, ospf_nbr)
+ return errormsg
- if no_of_peer == len(ospf_nbr_list):
- logger.info("[DUT: {}] OSPF6 is Converged".format(router))
- result = True
- else:
- return "[DUT: {}] OSPF6 is not Converged".format(router)
+ nbr_state = nbr_data.setdefault("state",None)
+ nbr_role = nbr_data.setdefault("role",None)
- logger.debug("Exiting API: verify_ospf6_neighbor()")
+ if nbr_state:
+ if nbr_state == nh_state:
+ logger.info("[DUT: {}] OSPF6 Nbr is {}:{} State {}".format
+ (router, ospf_nbr, nbr_rid, nh_state))
+ result = True
+ else:
+ errormsg = ("[DUT: {}] OSPF6 is not Converged, neighbor"
+ " state is {} , Expected state is {}".format(router,
+ nh_state, nbr_state))
+ return errormsg
+ if nbr_role:
+ if nbr_role == intf_state:
+ logger.info("[DUT: {}] OSPF6 Nbr is {}: {} Role {}".format(
+ router, ospf_nbr, nbr_rid, nbr_role))
+ else:
+ errormsg = ("[DUT: {}] OSPF6 is not Converged with rid"
+ "{}, role is {}, Expected role is {}".format(router,
+ nbr_rid, intf_state, nbr_role))
+ return errormsg
+ continue
+ else:
+
+ for router, rnode in tgen.routers().items():
+ if 'ospf6' not in topo['routers'][router]:
+ continue
+
+ if dut is not None and dut != router:
+ continue
+
+ logger.info("Verifying OSPF6 neighborship on router %s:", router)
+ show_ospf_json = run_frr_cmd(rnode,
+ "show ipv6 ospf neighbor json", isjson=True)
+ # Verifying output dictionary show_ospf_json is empty or not
+ if not bool(show_ospf_json):
+ errormsg = "OSPF6 is not running"
+ return errormsg
+
+ ospf_data_list = topo["routers"][router]["ospf6"]
+ ospf_neighbors = ospf_data_list['neighbors']
+ total_peer = 0
+ total_peer = len(ospf_neighbors.keys())
+ no_of_ospf_nbr = 0
+ ospf_nbr_list = ospf_data_list['neighbors']
+ no_of_peer = 0
+ for ospf_nbr, nbr_data in ospf_nbr_list.items():
+ data_ip = data_rid = topo['routers'][ospf_nbr]['ospf6']['router_id']
+ if ospf_nbr in data_ip:
+ nbr_details = nbr_data[ospf_nbr]
+ elif lan:
+ for switch in topo['switches']:
+ if 'ospf6' in topo['switches'][switch]['links'][router]:
+ neighbor_ip = data_ip
+ else:
+ continue
+ else:
+ neighbor_ip = data_ip
+
+ nh_state = None
+ neighbor_ip = neighbor_ip.lower()
+ nbr_rid = data_rid
+ get_index_val = dict((d['neighborId'], dict( \
+ d, index=index)) for (index, d) in enumerate( \
+ show_ospf_json['neighbors']))
+ try:
+ nh_state = get_index_val.get(neighbor_ip)['state']
+ intf_state = get_index_val.get(neighbor_ip)['ifState']
+ except TypeError:
+ errormsg = "[DUT: {}] OSPF peer {} missing,from "\
+ "{} ".format(router,
+ nbr_rid, ospf_nbr)
+ return errormsg
+
+ if nh_state == 'Full':
+ no_of_peer += 1
+
+ if no_of_peer == total_peer:
+ logger.info("[DUT: {}] OSPF6 is Converged".format(router))
+ result = True
+ else:
+ errormsg = ("[DUT: {}] OSPF6 is not Converged".format(router))
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return result
@retry(attempts=21, wait=2, return_is_str=True)
def verify_ospf_rib(
- tgen, dut, input_dict, next_hop=None, tag=None, metric=None, fib=None
+ tgen, dut, input_dict, next_hop=None, tag=None, metric=None, fib=None, expected=True
):
"""
This API is to verify ospf routes by running
@@ -761,6 +976,7 @@ def verify_ospf_rib(
* `tag` : tag to be verified
* `metric` : metric to be verified
* `fib` : True if the route is installed in FIB.
+ * `expected` : expected results from API, by-default True
Usage
-----
@@ -1021,7 +1237,7 @@ def verify_ospf_rib(
@retry(attempts=10, wait=2, return_is_str=True)
-def verify_ospf_interface(tgen, topo, dut=None, lan=False, input_dict=None):
+def verify_ospf_interface(tgen, topo, dut=None, lan=False, input_dict=None, expected=True):
"""
This API is to verify ospf routes by running
show ip ospf interface command.
@@ -1033,6 +1249,7 @@ def verify_ospf_interface(tgen, topo, dut=None, lan=False, input_dict=None):
* `dut`: device under test
* `lan`: if set to true this interface belongs to LAN.
* `input_dict` : Input dict data, required when configuring from testcase
+ * `expected` : expected results from API, by-default True
Usage
-----
@@ -1110,7 +1327,7 @@ def verify_ospf_interface(tgen, topo, dut=None, lan=False, input_dict=None):
@retry(attempts=11, wait=2, return_is_str=True)
-def verify_ospf_database(tgen, topo, dut, input_dict):
+def verify_ospf_database(tgen, topo, dut, input_dict, expected=True):
"""
This API is to verify ospf lsa's by running
show ip ospf database command.
@@ -1121,6 +1338,7 @@ def verify_ospf_database(tgen, topo, dut, input_dict):
* `dut`: device under test
* `input_dict` : Input dict data, required when configuring from testcase
* `topo` : next to be verified
+ * `expected` : expected results from API, by-default True
Usage
-----
@@ -1273,7 +1491,7 @@ def verify_ospf_database(tgen, topo, dut, input_dict):
@retry(attempts=10, wait=2, return_is_str=True)
-def verify_ospf_summary(tgen, topo, dut, input_dict):
+def verify_ospf_summary(tgen, topo, dut, input_dict, expected=True):
"""
This API is to verify ospf routes by running
show ip ospf interface command.
@@ -1284,6 +1502,7 @@ def verify_ospf_summary(tgen, topo, dut, input_dict):
* `topo` : topology descriptions
* `dut`: device under test
* `input_dict` : Input dict data, required when configuring from testcase
+ * `expected` : expected results from API, by-default True
Usage
-----
@@ -1349,3 +1568,667 @@ def verify_ospf_summary(tgen, topo, dut, input_dict):
logger.debug("Exiting API: verify_ospf_summary()")
return result
+
+
+
+@retry(attempts=10, wait=3, return_is_str=True)
+def verify_ospf6_rib(tgen, dut, input_dict, next_hop=None,
+ tag=None, metric=None, fib=None):
+ """
+ This API is to verify ospf routes by running
+ show ip ospf route command.
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `dut`: device under test
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `next_hop` : next to be verified
+ * `tag` : tag to be verified
+ * `metric` : metric to be verified
+ * `fib` : True if the route is installed in FIB.
+
+ Usage
+ -----
+ input_dict = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": ip_net,
+ "no_of_ip": 1,
+ "routeType": "N"
+ }
+ ]
+ }
+ }
+
+ result = verify_ospf6_rib(tgen, dut, input_dict,next_hop=nh)
+
+ Returns
+ -------
+ True or False (Error Message)
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ result = False
+ router_list = tgen.routers()
+ additional_nexthops_in_required_nhs = []
+ found_hops = []
+ for routerInput in input_dict.keys():
+ for router, rnode in router_list.iteritems():
+ if router != dut:
+ continue
+
+ logger.info("Checking router %s RIB:", router)
+
+ # Verifying RIB routes
+ command = "show ipv6 ospf route"
+
+ found_routes = []
+ missing_routes = []
+
+ if "static_routes" in input_dict[routerInput] or \
+ "prefix" in input_dict[routerInput]:
+ if "prefix" in input_dict[routerInput]:
+ static_routes = input_dict[routerInput]["prefix"]
+ else:
+ static_routes = input_dict[routerInput]["static_routes"]
+
+
+ for static_route in static_routes:
+ cmd = "{}".format(command)
+
+ cmd = "{} json".format(cmd)
+
+ ospf_rib_json = run_frr_cmd(rnode, cmd, isjson=True)
+
+ # Fix for PR 2644182
+ try:
+ ospf_rib_json = ospf_rib_json['routes']
+ except KeyError:
+ pass
+
+ # Verifying output dictionary ospf_rib_json is not empty
+ if bool(ospf_rib_json) is False:
+ errormsg = "[DUT: {}] No routes found in OSPF6 route " \
+ "table".format(router)
+ return errormsg
+
+ network = static_route["network"]
+ no_of_ip = static_route.setdefault("no_of_ip", 1)
+ _tag = static_route.setdefault("tag", None)
+ _rtype = static_route.setdefault("routeType", None)
+
+
+ # Generating IPs for verification
+ ip_list = generate_ips(network, no_of_ip)
+ st_found = False
+ nh_found = False
+ for st_rt in ip_list:
+ st_rt = str(ipaddress.ip_network(frr_unicode(st_rt)))
+
+ _addr_type = validate_ip_address(st_rt)
+ if _addr_type != 'ipv6':
+ continue
+
+ if st_rt in ospf_rib_json:
+
+ st_found = True
+ found_routes.append(st_rt)
+
+ if fib and next_hop:
+ if type(next_hop) is not list:
+ next_hop = [next_hop]
+
+ for mnh in range(0, len(ospf_rib_json[st_rt])):
+ if 'fib' in ospf_rib_json[st_rt][
+ mnh]["nextHops"][0]:
+ found_hops.append([rib_r[
+ "ip"] for rib_r in ospf_rib_json[
+ st_rt][mnh]["nextHops"]])
+
+ if found_hops[0]:
+ missing_list_of_nexthops = \
+ set(found_hops[0]).difference(next_hop)
+ additional_nexthops_in_required_nhs = \
+ set(next_hop).difference(found_hops[0])
+
+ if additional_nexthops_in_required_nhs:
+ logger.info(
+ "Nexthop "
+ "%s is not active for route %s in "
+ "RIB of router %s\n",
+ additional_nexthops_in_required_nhs,
+ st_rt, dut)
+ errormsg = (
+ "Nexthop {} is not active"
+ " for route {} in RIB of router"
+ " {}\n".format(
+ additional_nexthops_in_required_nhs,
+ st_rt, dut))
+ return errormsg
+ else:
+ nh_found = True
+
+ elif next_hop and fib is None:
+ if type(next_hop) is not list:
+ next_hop = [next_hop]
+ found_hops = [rib_r['nextHop'] for rib_r in
+ ospf_rib_json[st_rt][
+ "nextHops"]]
+
+ if found_hops:
+ missing_list_of_nexthops = \
+ set(found_hops).difference(next_hop)
+ additional_nexthops_in_required_nhs = \
+ set(next_hop).difference(found_hops)
+ if additional_nexthops_in_required_nhs:
+ logger.info(
+ "Missing nexthop %s for route"\
+ " %s in RIB of router %s\n", \
+ additional_nexthops_in_required_nhs, \
+ st_rt, dut)
+ errormsg=("Nexthop {} is Missing for "\
+ "route {} in RIB of router {}\n".format(
+ additional_nexthops_in_required_nhs,
+ st_rt, dut))
+ return errormsg
+ else:
+ nh_found = True
+ if _rtype:
+ if "destinationType" not in ospf_rib_json[
+ st_rt]:
+ errormsg = ("[DUT: {}]: destinationType missing"
+ "for route {} in OSPF RIB \n".\
+ format(dut, st_rt))
+ return errormsg
+ elif _rtype != ospf_rib_json[st_rt][
+ "destinationType"]:
+ errormsg = ("[DUT: {}]: destinationType mismatch"
+ "for route {} in OSPF RIB \n".\
+ format(dut, st_rt))
+ return errormsg
+ else:
+ logger.info("DUT: {}]: Found destinationType {}"
+ "for route {}".\
+ format(dut, _rtype, st_rt))
+ if tag:
+ if "tag" not in ospf_rib_json[
+ st_rt]:
+ errormsg = ("[DUT: {}]: tag is not"
+ " present for"
+ " route {} in RIB \n".\
+ format(dut, st_rt
+ ))
+ return errormsg
+
+ if _tag != ospf_rib_json[
+ st_rt]["tag"]:
+ errormsg = ("[DUT: {}]: tag value {}"
+ " is not matched for"
+ " route {} in RIB \n".\
+ format(dut, _tag, st_rt,
+ ))
+ return errormsg
+
+ if metric is not None:
+ if "type2cost" not in ospf_rib_json[
+ st_rt]:
+ errormsg = ("[DUT: {}]: metric is"
+ " not present for"
+ " route {} in RIB \n".\
+ format(dut, st_rt))
+ return errormsg
+
+ if metric != ospf_rib_json[
+ st_rt]["type2cost"]:
+ errormsg = ("[DUT: {}]: metric value "
+ "{} is not matched for "
+ "route {} in RIB \n".\
+ format(dut, metric, st_rt,
+ ))
+ return errormsg
+
+ else:
+ missing_routes.append(st_rt)
+
+ if nh_found:
+ logger.info("[DUT: {}]: Found next_hop {} for all OSPF"
+ " routes in RIB".format(router, next_hop))
+
+ if len(missing_routes) > 0:
+ errormsg = ("[DUT: {}]: Missing route in RIB, "
+ "routes: {}".\
+ format(dut, missing_routes))
+ return errormsg
+
+ if found_routes:
+ logger.info("[DUT: %s]: Verified routes in RIB, found"
+ " routes are: %s\n", dut, found_routes)
+ result = True
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return result
+
+
+@retry(attempts=3, wait=2, return_is_str=True)
+def verify_ospf6_interface(tgen, topo, dut=None,lan=False, input_dict=None):
+ """
+ This API is to verify ospf routes by running
+ show ip ospf interface command.
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `topo` : topology descriptions
+ * `dut`: device under test
+ * `lan`: if set to true this interface belongs to LAN.
+ * `input_dict` : Input dict data, required when configuring from testcase
+
+ Usage
+ -----
+ input_dict= {
+ 'r0': {
+ 'links':{
+ 's1': {
+ 'ospf6':{
+ 'priority':98,
+ 'timerDeadSecs': 4,
+ 'area': '0.0.0.3',
+ 'mcastMemberOspfDesignatedRouters': True,
+ 'mcastMemberOspfAllRouters': True,
+ 'ospfEnabled': True,
+
+ }
+ }
+ }
+ }
+ }
+ result = verify_ospf_interface(tgen, topo, dut=dut, input_dict=input_dict)
+
+ Returns
+ -------
+ True or False (Error Message)
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ result = False
+
+ for router, rnode in tgen.routers().iteritems():
+ if 'ospf6' not in topo['routers'][router]:
+ continue
+
+ if dut is not None and dut != router:
+ continue
+
+ logger.info("Verifying OSPF interface on router %s:", router)
+ show_ospf_json = run_frr_cmd(rnode, "show ipv6 ospf interface json",
+ isjson=True)
+
+ # Verifying output dictionary show_ospf_json is empty or not
+ if not bool(show_ospf_json):
+ errormsg = "OSPF6 is not running"
+ return errormsg
+
+ # To find neighbor ip type
+ ospf_intf_data = input_dict[router]["links"]
+ for ospf_intf, intf_data in ospf_intf_data.items():
+ intf = topo['routers'][router]['links'][ospf_intf]['interface']
+ if intf in show_ospf_json:
+ for intf_attribute in intf_data['ospf6']:
+ if intf_data['ospf6'][intf_attribute] is not list:
+ if intf_data['ospf6'][intf_attribute] == show_ospf_json[
+ intf][intf_attribute]:
+ logger.info("[DUT: %s] OSPF6 interface %s: %s is %s",
+ router, intf, intf_attribute, intf_data['ospf6'][
+ intf_attribute])
+ elif intf_data['ospf6'][intf_attribute] is list:
+ for addr_list in len(show_ospf_json[intf][intf_attribute]):
+ if show_ospf_json[intf][intf_attribute][addr_list][
+ 'address'].split('/')[0] == intf_data['ospf6'][
+ 'internetAddress'][0]['address']:
+ break
+ else:
+ errormsg= "[DUT: {}] OSPF6 interface {}: {} is {}, \
+ Expected is {}".format(router, intf, intf_attribute,
+ intf_data['ospf6'][intf_attribute], intf_data['ospf6'][
+ intf_attribute])
+ return errormsg
+ else:
+ errormsg= "[DUT: {}] OSPF6 interface {}: {} is {}, \
+ Expected is {}".format(router, intf, intf_attribute,
+ intf_data['ospf6'][intf_attribute], intf_data['ospf6'][
+ intf_attribute])
+ return errormsg
+ result = True
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return result
+
+
+@retry(attempts=11, wait=2, return_is_str=True)
+def verify_ospf6_database(tgen, topo, dut, input_dict):
+ """
+ This API is to verify ospf lsa's by running
+ show ip ospf database command.
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `dut`: device under test
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `topo` : next to be verified
+
+ Usage
+ -----
+ input_dict = {
+ "areas": {
+ "0.0.0.0": {
+ "routerLinkStates": {
+ "100.1.1.0-100.1.1.0": {
+ "LSID": "100.1.1.0",
+ "Advertised router": "100.1.1.0",
+ "LSA Age": 130,
+ "Sequence Number": "80000006",
+ "Checksum": "a703",
+ "Router links": 3
+ }
+ },
+ "networkLinkStates": {
+ "10.0.0.2-100.1.1.1": {
+ "LSID": "10.0.0.2",
+ "Advertised router": "100.1.1.1",
+ "LSA Age": 137,
+ "Sequence Number": "80000001",
+ "Checksum": "9583"
+ }
+ },
+ },
+ }
+ }
+ result = verify_ospf_database(tgen, topo, dut, input_dict)
+
+ Returns
+ -------
+ True or False (Error Message)
+ """
+
+ result = False
+ router = dut
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ if 'ospf' not in topo['routers'][dut]:
+ errormsg = "[DUT: {}] OSPF is not configured on the router.".format(
+ dut)
+ return errormsg
+
+ rnode = tgen.routers()[dut]
+
+ logger.info("Verifying OSPF interface on router %s:", dut)
+ show_ospf_json = run_frr_cmd(rnode, "show ip ospf database json",
+ isjson=True)
+ # Verifying output dictionary show_ospf_json is empty or not
+ if not bool(show_ospf_json):
+ errormsg = "OSPF is not running"
+ return errormsg
+
+ # for inter and inter lsa's
+ ospf_db_data = input_dict.setdefault("areas", None)
+ ospf_external_lsa = input_dict.setdefault(
+ 'asExternalLinkStates', None)
+
+ if ospf_db_data:
+ for ospf_area, area_lsa in ospf_db_data.items():
+ if ospf_area in show_ospf_json['areas']:
+ if 'routerLinkStates' in area_lsa:
+ for lsa in area_lsa['routerLinkStates']:
+ for rtrlsa in show_ospf_json['areas'][ospf_area][
+ 'routerLinkStates']:
+ if lsa['lsaId'] == rtrlsa['lsaId'] and \
+ lsa['advertisedRouter'] == rtrlsa[
+ 'advertisedRouter']:
+ result = True
+ break
+ if result:
+ logger.info(
+ "[DUT: %s] OSPF LSDB area %s:Router "
+ "LSA %s", router, ospf_area, lsa)
+ break
+ else:
+ errormsg = \
+ "[DUT: {}] OSPF LSDB area {}: expected" \
+ " Router LSA is {}".format(router, ospf_area, lsa)
+ return errormsg
+
+ if 'networkLinkStates' in area_lsa:
+ for lsa in area_lsa['networkLinkStates']:
+ for netlsa in show_ospf_json['areas'][ospf_area][
+ 'networkLinkStates']:
+ if lsa in show_ospf_json['areas'][ospf_area][
+ 'networkLinkStates']:
+ if lsa['lsaId'] == netlsa['lsaId'] and \
+ lsa['advertisedRouter'] == netlsa[
+ 'advertisedRouter']:
+ result = True
+ break
+ if result:
+ logger.info(
+ "[DUT: %s] OSPF LSDB area %s:Network "
+ "LSA %s", router, ospf_area, lsa)
+ break
+ else:
+ errormsg = \
+ "[DUT: {}] OSPF LSDB area {}: expected" \
+ " Network LSA is {}".format(router, ospf_area, lsa)
+ return errormsg
+
+ if 'summaryLinkStates' in area_lsa:
+ for lsa in area_lsa['summaryLinkStates']:
+ for t3lsa in show_ospf_json['areas'][ospf_area][
+ 'summaryLinkStates']:
+ if lsa['lsaId'] == t3lsa['lsaId'] and \
+ lsa['advertisedRouter'] == t3lsa[
+ 'advertisedRouter']:
+ result = True
+ break
+ if result:
+ logger.info(
+ "[DUT: %s] OSPF LSDB area %s:Summary "
+ "LSA %s", router, ospf_area, lsa)
+ break
+ else:
+ errormsg = \
+ "[DUT: {}] OSPF LSDB area {}: expected" \
+ " Summary LSA is {}".format(router, ospf_area, lsa)
+ return errormsg
+
+ if 'nssaExternalLinkStates' in area_lsa:
+ for lsa in area_lsa['nssaExternalLinkStates']:
+ for t7lsa in show_ospf_json['areas'][ospf_area][
+ 'nssaExternalLinkStates']:
+ if lsa['lsaId'] == t7lsa['lsaId'] and \
+ lsa['advertisedRouter'] == t7lsa[
+ 'advertisedRouter']:
+ result = True
+ break
+ if result:
+ logger.info(
+ "[DUT: %s] OSPF LSDB area %s:Type7 "
+ "LSA %s", router, ospf_area, lsa)
+ break
+ else:
+ errormsg = \
+ "[DUT: {}] OSPF LSDB area {}: expected" \
+ " Type7 LSA is {}".format(router, ospf_area, lsa)
+ return errormsg
+
+ if 'asbrSummaryLinkStates' in area_lsa:
+ for lsa in area_lsa['asbrSummaryLinkStates']:
+ for t4lsa in show_ospf_json['areas'][ospf_area][
+ 'asbrSummaryLinkStates']:
+ if lsa['lsaId'] == t4lsa['lsaId'] and \
+ lsa['advertisedRouter'] == t4lsa[
+ 'advertisedRouter']:
+ result = True
+ break
+ if result:
+ logger.info(
+ "[DUT: %s] OSPF LSDB area %s:ASBR Summary "
+ "LSA %s", router, ospf_area, lsa)
+ result = True
+ else:
+ errormsg = \
+ "[DUT: {}] OSPF LSDB area {}: expected" \
+ " ASBR Summary LSA is {}".format(
+ router, ospf_area, lsa)
+ return errormsg
+
+ if 'linkLocalOpaqueLsa' in area_lsa:
+ for lsa in area_lsa['linkLocalOpaqueLsa']:
+ try:
+ for lnklsa in show_ospf_json['areas'][ospf_area][
+ 'linkLocalOpaqueLsa']:
+ if lsa['lsaId'] in lnklsa['lsaId'] and \
+ 'linkLocalOpaqueLsa' in show_ospf_json[
+ 'areas'][ospf_area]:
+ logger.info((
+ "[DUT: FRR] OSPF LSDB area %s:Opaque-LSA"
+ "%s", ospf_area, lsa))
+ result = True
+ else:
+ errormsg = ("[DUT: FRR] OSPF LSDB area: {} "
+ "expected Opaque-LSA is {}, Found is {}".format(
+ ospf_area, lsa, show_ospf_json))
+ raise ValueError (errormsg)
+ return errormsg
+ except KeyError:
+ errormsg = ("[DUT: FRR] linkLocalOpaqueLsa Not "
+ "present")
+ return errormsg
+
+ if ospf_external_lsa:
+ for lsa in ospf_external_lsa:
+ try:
+ for t5lsa in show_ospf_json['asExternalLinkStates']:
+ if lsa['lsaId'] == t5lsa['lsaId'] and \
+ lsa['advertisedRouter'] == t5lsa[
+ 'advertisedRouter']:
+ result = True
+ break
+ except KeyError:
+ result = False
+ if result:
+ logger.info(
+ "[DUT: %s] OSPF LSDB:External LSA %s",
+ router, lsa)
+ result = True
+ else:
+ errormsg = \
+ "[DUT: {}] OSPF LSDB : expected" \
+ " External LSA is {}".format(router, lsa)
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return result
+
+
+
+def config_ospf6_interface (tgen, topo, input_dict=None, build=False,
+ load_config=True):
+ """
+ API to configure ospf on router.
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `build` : Only for initial setup phase this is set as True.
+ * `load_config` : Loading the config to router this is set as True.
+
+ Usage
+ -----
+ r1_ospf_auth = {
+ "r1": {
+ "links": {
+ "r2": {
+ "ospf": {
+ "authentication": 'message-digest',
+ "authentication-key": "ospf",
+ "message-digest-key": "10"
+ }
+ }
+ }
+ }
+ }
+ result = config_ospf6_interface(tgen, topo, r1_ospf_auth)
+
+ Returns
+ -------
+ True or False
+ """
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ result = False
+ if not input_dict:
+ input_dict = deepcopy(topo)
+ else:
+ input_dict = deepcopy(input_dict)
+ for router in input_dict.keys():
+ config_data = []
+ for lnk in input_dict[router]['links'].keys():
+ if "ospf6" not in input_dict[router]['links'][lnk]:
+ logger.debug("Router %s: ospf6 configs is not present in"
+ "input_dict, passed input_dict", router,
+ input_dict)
+ continue
+ ospf_data = input_dict[router]['links'][lnk]['ospf6']
+ data_ospf_area = ospf_data.setdefault("area", None)
+ data_ospf_auth = ospf_data.setdefault("authentication", None)
+ data_ospf_dr_priority = ospf_data.setdefault("priority", None)
+ data_ospf_cost = ospf_data.setdefault("cost", None)
+ data_ospf_mtu = ospf_data.setdefault("mtu_ignore", None)
+
+ try:
+ intf = topo['routers'][router]['links'][lnk]['interface']
+ except KeyError:
+ intf = topo['switches'][router]['links'][lnk]['interface']
+
+ # interface
+ cmd = "interface {}".format(intf)
+
+ config_data.append(cmd)
+ # interface area config
+ if data_ospf_area:
+ cmd = "ipv6 ospf area {}".format(data_ospf_area)
+ config_data.append(cmd)
+
+ # interface ospf dr priority
+ if data_ospf_dr_priority:
+ cmd = "ipv6 ospf priority {}".format(
+ ospf_data["priority"])
+ if 'del_action' in ospf_data:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ # interface ospf cost
+ if data_ospf_cost:
+ cmd = "ipv6 ospf cost {}".format(
+ ospf_data["cost"])
+ if 'del_action' in ospf_data:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ # interface ospf mtu
+ if data_ospf_mtu:
+ cmd = "ipv6 ospf mtu-ignore"
+ if 'del_action' in ospf_data:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ if build:
+ return config_data
+ else:
+ result = create_common_configuration(tgen, router, config_data,
+ "interface_config",
+ build=build)
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return result
diff --git a/tests/topotests/lib/pim.py b/tests/topotests/lib/pim.py
index 61a5705a5d..ce90717fa4 100644
--- a/tests/topotests/lib/pim.py
+++ b/tests/topotests/lib/pim.py
@@ -496,7 +496,7 @@ def configure_pim_force_expire(tgen, topo, input_dict, build=False):
# Verification APIs
#############################################
@retry(attempts=6, wait=2, return_is_str=True)
-def verify_pim_neighbors(tgen, topo, dut=None, iface=None, nbr_ip=None):
+def verify_pim_neighbors(tgen, topo, dut=None, iface=None, nbr_ip=None, expected=True):
"""
Verify all PIM neighbors are up and running, config is verified
using "show ip pim neighbor" cli
@@ -508,6 +508,7 @@ def verify_pim_neighbors(tgen, topo, dut=None, iface=None, nbr_ip=None):
* `dut` : dut info
* `iface` : link for which PIM nbr need to check
* `nbr_ip` : neighbor ip of interface
+ * `expected` : expected results from API, by-default True
Usage
-----
@@ -619,7 +620,7 @@ def verify_pim_neighbors(tgen, topo, dut=None, iface=None, nbr_ip=None):
@retry(attempts=21, wait=2, return_is_str=True)
-def verify_igmp_groups(tgen, dut, interface, group_addresses):
+def verify_igmp_groups(tgen, dut, interface, group_addresses, expected=True):
"""
Verify IGMP groups are received from an intended interface
by running "show ip igmp groups" command
@@ -630,6 +631,7 @@ def verify_igmp_groups(tgen, dut, interface, group_addresses):
* `dut`: device under test
* `interface`: interface, from which IGMP groups would be received
* `group_addresses`: IGMP group address
+ * `expected` : expected results from API, by-default True
Usage
-----
@@ -693,7 +695,7 @@ def verify_igmp_groups(tgen, dut, interface, group_addresses):
@retry(attempts=31, wait=2, return_is_str=True)
def verify_upstream_iif(
- tgen, dut, iif, src_address, group_addresses, joinState=None, refCount=1
+ tgen, dut, iif, src_address, group_addresses, joinState=None, refCount=1, expected=True
):
"""
Verify upstream inbound interface is updated correctly
@@ -708,6 +710,7 @@ def verify_upstream_iif(
* `group_addresses`: IGMP group address
* `joinState`: upstream join state
* `refCount`: refCount value
+ * `expected` : expected results from API, by-default True
Usage
-----
@@ -845,7 +848,7 @@ def verify_upstream_iif(
@retry(attempts=6, wait=2, return_is_str=True)
-def verify_join_state_and_timer(tgen, dut, iif, src_address, group_addresses):
+def verify_join_state_and_timer(tgen, dut, iif, src_address, group_addresses, expected=True):
"""
Verify join state is updated correctly and join timer is
running with the help of "show ip pim upstream" cli
@@ -857,6 +860,7 @@ def verify_join_state_and_timer(tgen, dut, iif, src_address, group_addresses):
* `iif`: inbound interface
* `src_address`: source address
* `group_addresses`: IGMP group address
+ * `expected` : expected results from API, by-default True
Usage
-----
@@ -964,7 +968,7 @@ def verify_join_state_and_timer(tgen, dut, iif, src_address, group_addresses):
@retry(attempts=41, wait=2, return_is_dict=True)
def verify_ip_mroutes(
- tgen, dut, src_address, group_addresses, iif, oil, return_uptime=False, mwait=0
+ tgen, dut, src_address, group_addresses, iif, oil, return_uptime=False, mwait=0, expected=True
):
"""
Verify ip mroutes and make sure (*, G)/(S, G) is present in mroutes
@@ -980,7 +984,7 @@ def verify_ip_mroutes(
* `oil`: Outgoing interface
* `return_uptime`: If True, return uptime dict, default is False
* `mwait`: Wait time, default is 0
-
+ * `expected` : expected results from API, by-default True
Usage
-----
@@ -1161,7 +1165,7 @@ def verify_ip_mroutes(
@retry(attempts=31, wait=2, return_is_str=True)
def verify_pim_rp_info(
- tgen, topo, dut, group_addresses, oif=None, rp=None, source=None, iamrp=None
+ tgen, topo, dut, group_addresses, oif=None, rp=None, source=None, iamrp=None, expected=True
):
"""
Verify pim rp info by running "show ip pim rp-info" cli
@@ -1176,6 +1180,7 @@ def verify_pim_rp_info(
* `rp`: RP address
* `source`: Source of RP
* `iamrp`: User defined RP
+ * `expected` : expected results from API, by-default True
Usage
-----
@@ -1317,7 +1322,7 @@ def verify_pim_rp_info(
@retry(attempts=31, wait=2, return_is_str=True)
def verify_pim_state(
- tgen, dut, iif, oil, group_addresses, src_address=None, installed_fl=None
+ tgen, dut, iif, oil, group_addresses, src_address=None, installed_fl=None, expected=True
):
"""
Verify pim state by running "show ip pim state" cli
@@ -1331,6 +1336,7 @@ def verify_pim_state(
* `group_addresses`: IGMP group address
* `src_address`: source address, default = None
* installed_fl` : Installed flag
+ * `expected` : expected results from API, by-default True
Usage
-----
@@ -1485,7 +1491,7 @@ def verify_pim_interface_traffic(tgen, input_dict):
@retry(attempts=21, wait=2, return_is_str=True)
-def verify_pim_interface(tgen, topo, dut, interface=None, interface_ip=None):
+def verify_pim_interface(tgen, topo, dut, interface=None, interface_ip=None, expected=True):
"""
Verify all PIM interface are up and running, config is verified
using "show ip pim interface" cli
@@ -1497,6 +1503,7 @@ def verify_pim_interface(tgen, topo, dut, interface=None, interface_ip=None):
* `dut` : device under test
* `interface` : interface name
* `interface_ip` : interface ip address
+ * `expected` : expected results from API, by-default True
Usage
-----
@@ -1791,7 +1798,7 @@ def clear_ip_igmp_interfaces(tgen, dut):
@retry(attempts=10, wait=2, return_is_str=True)
-def clear_ip_mroute_verify(tgen, dut):
+def clear_ip_mroute_verify(tgen, dut, expected=True):
"""
Clear ip mroute by running "clear ip mroute" cli and verify
mroutes are up again after mroute clear
@@ -1800,6 +1807,8 @@ def clear_ip_mroute_verify(tgen, dut):
----------
* `tgen`: topogen object
* `dut`: Device Under Test
+ * `expected` : expected results from API, by-default True
+
Usage
-----
@@ -2165,7 +2174,7 @@ def find_rp_from_bsrp_info(tgen, dut, bsr, grp=None):
@retry(attempts=6, wait=2, return_is_str=True)
-def verify_pim_grp_rp_source(tgen, topo, dut, grp_addr, rp_source, rpadd=None):
+def verify_pim_grp_rp_source(tgen, topo, dut, grp_addr, rp_source, rpadd=None, expected=True):
"""
Verify pim rp info by running "show ip pim rp-info" cli
@@ -2177,6 +2186,7 @@ def verify_pim_grp_rp_source(tgen, topo, dut, grp_addr, rp_source, rpadd=None):
* `grp_addr`: IGMP group address
* 'rp_source': source from which rp installed
* 'rpadd': rp address
+ * `expected` : expected results from API, by-default True
Usage
-----
@@ -2267,7 +2277,7 @@ def verify_pim_grp_rp_source(tgen, topo, dut, grp_addr, rp_source, rpadd=None):
@retry(attempts=31, wait=2, return_is_str=True)
-def verify_pim_bsr(tgen, topo, dut, bsr_ip):
+def verify_pim_bsr(tgen, topo, dut, bsr_ip, expected=True):
"""
Verify all PIM interface are up and running, config is verified
using "show ip pim interface" cli
@@ -2278,6 +2288,7 @@ def verify_pim_bsr(tgen, topo, dut, bsr_ip):
* `topo` : json file data
* `dut` : device under test
* 'bsr' : bsr ip to be verified
+ * `expected` : expected results from API, by-default True
Usage
-----
@@ -2322,7 +2333,7 @@ def verify_pim_bsr(tgen, topo, dut, bsr_ip):
@retry(attempts=31, wait=2, return_is_str=True)
-def verify_ip_pim_upstream_rpf(tgen, topo, dut, interface, group_addresses, rp=None):
+def verify_ip_pim_upstream_rpf(tgen, topo, dut, interface, group_addresses, rp=None, expected=True):
"""
Verify IP PIM upstream rpf, config is verified
using "show ip pim neighbor" cli
@@ -2336,6 +2347,7 @@ def verify_ip_pim_upstream_rpf(tgen, topo, dut, interface, group_addresses, rp=N
* `group_addresses` : list of group address for which upstream info
needs to be checked
* `rp` : RP address
+ * `expected` : expected results from API, by-default True
Usage
-----
@@ -2519,7 +2531,7 @@ def enable_disable_pim_bsm(tgen, router, intf, enable=True):
@retry(attempts=31, wait=2, return_is_str=True)
-def verify_ip_pim_join(tgen, topo, dut, interface, group_addresses, src_address=None):
+def verify_ip_pim_join(tgen, topo, dut, interface, group_addresses, src_address=None, expected=True):
"""
Verify ip pim join by running "show ip pim join" cli
@@ -2531,6 +2543,7 @@ def verify_ip_pim_join(tgen, topo, dut, interface, group_addresses, src_address=
* `interface`: interface name, from which PIM join would come
* `group_addresses`: IGMP group address
* `src_address`: Source address
+ * `expected` : expected results from API, by-default True
Usage
-----
@@ -2609,7 +2622,7 @@ def verify_ip_pim_join(tgen, topo, dut, interface, group_addresses, src_address=
@retry(attempts=31, wait=2, return_is_dict=True)
-def verify_igmp_config(tgen, input_dict, stats_return=False):
+def verify_igmp_config(tgen, input_dict, stats_return=False, expected=True):
"""
Verify igmp interface details, verifying following configs:
timerQueryInterval
@@ -2623,6 +2636,7 @@ def verify_igmp_config(tgen, input_dict, stats_return=False):
* `input_dict` : Input dict data, required to verify
timer
* `stats_return`: If user wants API to return statistics
+ * `expected` : expected results from API, by-default True
Usage
-----
@@ -2898,7 +2912,7 @@ def verify_igmp_config(tgen, input_dict, stats_return=False):
@retry(attempts=31, wait=2, return_is_str=True)
-def verify_pim_config(tgen, input_dict):
+def verify_pim_config(tgen, input_dict, expected=True):
"""
Verify pim interface details, verifying following configs:
drPriority
@@ -2912,6 +2926,7 @@ def verify_pim_config(tgen, input_dict):
* `tgen`: topogen object
* `input_dict` : Input dict data, required to verify
timer
+ * `expected` : expected results from API, by-default True
Usage
-----
@@ -3023,7 +3038,7 @@ def verify_pim_config(tgen, input_dict):
@retry(attempts=21, wait=2, return_is_dict=True)
-def verify_multicast_traffic(tgen, input_dict, return_traffic=False):
+def verify_multicast_traffic(tgen, input_dict, return_traffic=False, expected=True):
"""
Verify multicast traffic by running
"show multicast traffic count json" cli
@@ -3034,6 +3049,8 @@ def verify_multicast_traffic(tgen, input_dict, return_traffic=False):
* `input_dict(dict)`: defines DUT, what and for which interfaces
traffic needs to be verified
* `return_traffic`: returns traffic stats
+ * `expected` : expected results from API, by-default True
+
Usage
-----
input_dict = {
@@ -3264,7 +3281,7 @@ def get_refCount_for_mroute(tgen, dut, iif, src_address, group_addresses):
@retry(attempts=21, wait=2, return_is_str=True)
-def verify_multicast_flag_state(tgen, dut, src_address, group_addresses, flag):
+def verify_multicast_flag_state(tgen, dut, src_address, group_addresses, flag, expected=True):
"""
Verify flag state for mroutes and make sure (*, G)/(S, G) are having
coorect flags by running "show ip mroute" cli
@@ -3276,6 +3293,7 @@ def verify_multicast_flag_state(tgen, dut, src_address, group_addresses, flag):
* `src_address`: source address
* `group_addresses`: IGMP group address
* `flag`: flag state, needs to be verified
+ * `expected` : expected results from API, by-default True
Usage
-----
@@ -3358,7 +3376,7 @@ def verify_multicast_flag_state(tgen, dut, src_address, group_addresses, flag):
@retry(attempts=21, wait=2, return_is_str=True)
-def verify_igmp_interface(tgen, topo, dut, igmp_iface, interface_ip):
+def verify_igmp_interface(tgen, topo, dut, igmp_iface, interface_ip, expected=True):
"""
Verify all IGMP interface are up and running, config is verified
using "show ip igmp interface" cli
@@ -3370,6 +3388,7 @@ def verify_igmp_interface(tgen, topo, dut, igmp_iface, interface_ip):
* `dut` : device under test
* `igmp_iface` : interface name
* `interface_ip` : interface ip address
+ * `expected` : expected results from API, by-default True
Usage
-----
diff --git a/tests/topotests/lib/topogen.py b/tests/topotests/lib/topogen.py
index 4b0f07eb1e..ade5933504 100644
--- a/tests/topotests/lib/topogen.py
+++ b/tests/topotests/lib/topogen.py
@@ -657,6 +657,8 @@ class TopoRouter(TopoGear):
# Try to find relevant old logfiles in /tmp and delete them
map(os.remove, glob.glob("{}/{}/*.log".format(self.logdir, self.name)))
+ # Remove old valgrind files
+ map(os.remove, glob.glob("{}/{}.valgrind.*".format(self.logdir, self.name)))
# Remove old core files
map(os.remove, glob.glob("{}/{}/*.dmp".format(self.logdir, self.name)))
diff --git a/tests/topotests/lib/topotest.py b/tests/topotests/lib/topotest.py
index 2a5bd17361..d1f60bfe0d 100644
--- a/tests/topotests/lib/topotest.py
+++ b/tests/topotests/lib/topotest.py
@@ -1454,6 +1454,8 @@ class Router(Node):
gdb_breakpoints = g_extra_config["gdb_breakpoints"]
gdb_daemons = g_extra_config["gdb_daemons"]
gdb_routers = g_extra_config["gdb_routers"]
+ valgrind_extra = g_extra_config["valgrind_extra"]
+ valgrind_memleaks = g_extra_config["valgrind_memleaks"]
bundle_data = ""
@@ -1503,7 +1505,14 @@ class Router(Node):
) + "/var/run/{}/snmpd.pid -x /etc/frr/agentx".format(self.routertype)
else:
binary = os.path.join(self.daemondir, daemon)
+
cmdenv = "ASAN_OPTIONS=log_path={0}.asan".format(daemon)
+ if valgrind_memleaks:
+ this_dir = os.path.dirname(os.path.abspath(os.path.realpath(__file__)))
+ supp_file = os.path.abspath(os.path.join(this_dir, "../../../tools/valgrind.supp"))
+ cmdenv += " /usr/bin/valgrind --num-callers=50 --log-file={1}/{2}.valgrind.{0}.%p --leak-check=full --suppressions={3}".format(daemon, self.logdir, self.name, supp_file)
+ if valgrind_extra:
+ cmdenv += "--gen-suppressions=all --expensive-definedness-checks=yes"
cmdopt = "{} --log file:{}.log --log-level debug".format(
daemon_opts, daemon
)
diff --git a/tests/topotests/ospfv3_basic_functionality/ospfv3_rte_calc.json b/tests/topotests/ospfv3_basic_functionality/ospfv3_rte_calc.json
new file mode 100644
index 0000000000..3669b3a554
--- /dev/null
+++ b/tests/topotests/ospfv3_basic_functionality/ospfv3_rte_calc.json
@@ -0,0 +1,173 @@
+{
+ "feature": [
+ "bgp"
+ ],
+ "address_types": [
+ "ipv6"
+ ],
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {
+ "ipv6": "fd00::",
+ "v6mask": 64
+ },
+ "lo_prefix": {
+ "ipv6": "2001:db8:f::",
+ "v6mask": 128
+ },
+ "routers": {
+ "r0": {
+ "links": {
+ "lo": {
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r1": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r2": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r3": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ }
+ },
+ "ospf6": {
+ "router_id": "100.1.1.0",
+ "neighbors": {
+ "r1": {},
+ "r2": {}
+ }
+ }
+ },
+ "r1": {
+ "links": {
+ "lo": {
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r0": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r2": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r3": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ }
+ },
+ "ospf6": {
+ "router_id": "100.1.1.1",
+ "neighbors": {
+ "r0": {},
+ "r2": {},
+ "r3": {}
+ }
+ }
+ },
+ "r2": {
+ "links": {
+ "lo": {
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r0": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r1": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r3": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ }
+ },
+ "ospf6": {
+ "router_id": "100.1.1.2",
+ "neighbors": {
+ "r1": {},
+ "r0": {},
+ "r3": {}
+ }
+ }
+ },
+ "r3": {
+ "links": {
+ "lo": {
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r0": {
+ "ipv6": "auto"
+ },
+ "r1": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r2": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ }
+ },
+ "ospf6": {
+ "router_id": "100.1.1.3",
+ "neighbors": {
+ "r1": {},
+ "r2": {}
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/tests/topotests/ospfv3_basic_functionality/ospfv3_single_area.json b/tests/topotests/ospfv3_basic_functionality/ospfv3_single_area.json
new file mode 100644
index 0000000000..d93eb1f217
--- /dev/null
+++ b/tests/topotests/ospfv3_basic_functionality/ospfv3_single_area.json
@@ -0,0 +1,190 @@
+{
+ "address_types": [
+ "ipv6"
+ ],
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {
+ "ipv6": "fd00::",
+ "v6mask": 64
+ },
+ "lo_prefix": {
+ "ipv6": "2001:db8:f::",
+ "v6mask": 128
+ },
+ "routers": {
+ "r0": {
+ "links": {
+ "lo": {
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r1": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r2": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r3": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "network": "point-to-point"
+ }
+ }
+ },
+ "ospf6": {
+ "router_id": "100.1.1.0",
+ "neighbors": {
+ "r1": {},
+ "r2": {},
+ "r3": {}
+ }
+ }
+ },
+ "r1": {
+ "links": {
+ "lo": {
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r0": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r2": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r3": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r3-link0": {
+ "ipv6": "auto",
+ "description": "DummyIntftoR3"
+ }
+ },
+ "ospf6": {
+ "router_id": "100.1.1.1",
+ "neighbors": {
+ "r0": {},
+ "r2": {},
+ "r3": {}
+ }
+ }
+ },
+ "r2": {
+ "links": {
+ "lo": {
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r0": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r1": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r3": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ }
+ },
+ "ospf6": {
+ "router_id": "100.1.1.2",
+ "neighbors": {
+ "r1": {},
+ "r0": {},
+ "r3": {}
+ }
+ }
+ },
+ "r3": {
+ "links": {
+ "lo": {
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r0": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "network": "point-to-point"
+ }
+ },
+ "r1": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r2": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r1-link0": {
+ "ipv6": "auto",
+ "description": "DummyIntftoR1",
+ "ospf6": {
+ "area": "0.0.0.0"
+ }
+ }
+ },
+ "ospf6": {
+ "router_id": "1.0.4.17",
+ "neighbors": {
+ "r0": {},
+ "r1": {},
+ "r2": {}
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_rte_calc.py b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_rte_calc.py
new file mode 100644
index 0000000000..4aa71bfb16
--- /dev/null
+++ b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_rte_calc.py
@@ -0,0 +1,374 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2021 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc.
+# ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+
+"""OSPF Basic Functionality Automation."""
+import os
+import sys
+import time
+import pytest
+import json
+from copy import deepcopy
+from ipaddress import IPv4Address
+from lib.topotest import frr_unicode
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from mininet.topo import Topo
+from lib.topogen import Topogen, get_topogen
+import ipaddress
+from lib.bgp import verify_bgp_convergence, create_router_bgp
+
+# Import topoJson from lib, to create topology and initial configuration
+from lib.common_config import (
+ start_topology,
+ write_test_header,
+ write_test_footer,
+ reset_config_on_routers,
+ verify_rib,
+ create_static_routes,
+ step,
+ create_route_maps,
+ shutdown_bringup_interface,
+ create_interfaces_cfg,
+ topo_daemons,
+ get_frr_ipv6_linklocal,
+)
+
+from lib.topolog import logger
+from lib.topojson import build_topo_from_json, build_config_from_json
+
+from lib.ospf import (
+ verify_ospf6_neighbor,
+ config_ospf_interface,
+ clear_ospf,
+ verify_ospf6_rib,
+ create_router_ospf,
+ verify_ospf6_interface,
+ verify_ospf6_database,
+ config_ospf6_interface,
+)
+
+from ipaddress import IPv6Address
+
+# Global variables
+topo = None
+
+# Reading the data from JSON File for topology creation
+jsonFile = "{}/ospfv3_rte_calc.json".format(CWD)
+try:
+ with open(jsonFile, "r") as topoJson:
+ topo = json.load(topoJson)
+except IOError:
+ assert False, "Could not read file {}".format(jsonFile)
+
+NETWORK = {
+ "ipv6": [
+ "11.0.20.1/32",
+ "11.0.20.2/32",
+ "11.0.20.3/32",
+ "11.0.20.4/32",
+ "11.0.20.5/32",
+ ],
+ "ipv6": ["2::1/128", "2::2/128", "2::3/128", "2::4/128", "2::5/128"],
+}
+TOPOOLOGY = """
+ Please view in a fixed-width font such as Courier.
+ +---+ A1 +---+
+ +R1 +------------+R2 |
+ +-+-+- +--++
+ | -- -- |
+ | -- A0 -- |
+ A0| ---- |
+ | ---- | A2
+ | -- -- |
+ | -- -- |
+ +-+-+- +-+-+
+ +R0 +-------------+R3 |
+ +---+ A3 +---+
+"""
+
+TESTCASES = """
+1. OSPF Cost - verifying ospf interface cost functionality
+"""
+
+
+class CreateTopo(Topo):
+ """
+ Test topology builder.
+
+ * `Topo`: Topology object
+ """
+
+ def build(self, *_args, **_opts):
+ """Build function."""
+ tgen = get_topogen(self)
+
+ # Building topology from json file
+ build_topo_from_json(tgen, topo)
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+ global topo
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ tgen = Topogen(CreateTopo, mod.__name__)
+ # ... and here it calls Mininet initialization functions.
+
+ # get list of daemons needs to be started for this suite.
+ daemons = topo_daemons(tgen, topo)
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen, daemons)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ ospf_covergence = verify_ospf6_neighbor(tgen, topo)
+ assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module(mod):
+ """
+ Teardown the pytest environment.
+
+ * `mod`: module name
+ """
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
+
+
+def get_llip(onrouter, intf):
+ """
+ API to get the link local ipv6 address of a perticular interface
+
+ Parameters
+ ----------
+ * `fromnode`: Source node
+ * `tonode` : interface for which link local ip needs to be returned.
+
+ Usage
+ -----
+ result = get_llip('r1', 'r2-link0')
+
+ Returns
+ -------
+ 1) link local ipv6 address from the interface.
+ 2) errormsg - when link local ip not found.
+ """
+ tgen = get_topogen()
+ intf = topo["routers"][onrouter]["links"][intf]["interface"]
+ llip = get_frr_ipv6_linklocal(tgen, onrouter, intf)
+ if llip:
+ logger.info("llip ipv6 address to be set as NH is %s", llip)
+ return llip
+ return None
+
+
+def get_glipv6(onrouter, intf):
+ """
+ API to get the global ipv6 address of a perticular interface
+
+ Parameters
+ ----------
+ * `onrouter`: Source node
+ * `intf` : interface for which link local ip needs to be returned.
+
+ Usage
+ -----
+ result = get_glipv6('r1', 'r2-link0')
+
+ Returns
+ -------
+ 1) global ipv6 address from the interface.
+ 2) errormsg - when link local ip not found.
+ """
+ glipv6 = (topo["routers"][onrouter]["links"][intf]["ipv6"]).split("/")[0]
+ if glipv6:
+ logger.info("Global ipv6 address to be set as NH is %s", glipv6)
+ return glipv6
+ return None
+
+
+def red_static(dut, config=True):
+ """Local def for Redstribute static routes inside ospf."""
+ global topo
+ tgen = get_topogen()
+ if config:
+ ospf_red = {dut: {"ospf6": {"redistribute": [{"redist_type": "static"}]}}}
+ else:
+ ospf_red = {
+ dut: {
+ "ospf6": {
+ "redistribute": [{"redist_type": "static", "del_action": True}]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_red)
+ assert result is True, "Testcase : Failed \n Error: {}".format(result)
+
+
+def red_connected(dut, config=True):
+ """Local def for Redstribute connected routes inside ospf."""
+ global topo
+ tgen = get_topogen()
+ if config:
+ ospf_red = {dut: {"ospf6": {"redistribute": [{"redist_type": "connected"}]}}}
+ else:
+ ospf_red = {
+ dut: {
+ "ospf6": {
+ "redistribute": [{"redist_type": "connected", "del_action": True}]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_red)
+ assert result is True, "Testcase: Failed \n Error: {}".format(result)
+
+
+# ##################################
+# Test cases start here.
+# ##################################
+def test_ospfv3_cost_tc52_p0(request):
+ """OSPF Cost - verifying ospf interface cost functionality"""
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ global topo
+ step("Bring up the base config.")
+ reset_config_on_routers(tgen)
+
+ step(
+ "Configure ospf cost as 20 on interface between R0 and R1. "
+ "Configure ospf cost as 30 between interface between R0 and R2."
+ )
+
+ r0_ospf_cost = {
+ "r0": {"links": {"r1": {"ospf6": {"cost": 20}}, "r2": {"ospf6": {"cost": 30}}}}
+ }
+ result = config_ospf6_interface(tgen, topo, r0_ospf_cost)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that cost is updated in the ospf interface between"
+ " r0 and r1 as 30 and r0 and r2 as 20"
+ )
+ dut = "r0"
+ result = verify_ospf6_interface(tgen, topo, dut=dut, input_dict=r0_ospf_cost)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Swap the costs between interfaces on r0, between r0 and r1 to 30"
+ ", r0 and r2 to 20"
+ )
+
+ r0_ospf_cost = {
+ "r0": {"links": {"r1": {"ospf6": {"cost": 30}}, "r2": {"ospf6": {"cost": 20}}}}
+ }
+ result = config_ospf6_interface(tgen, topo, r0_ospf_cost)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that cost is updated in the ospf interface between r0 "
+ "and r1 as 30 and r0 and r2 as 20."
+ )
+ result = verify_ospf6_interface(tgen, topo, dut=dut, input_dict=r0_ospf_cost)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(" Un configure cost from the interface r0 - r1.")
+
+ r0_ospf_cost = {
+ "r0": {"links": {"r1": {"ospf6": {"cost": 30, "del_action": True}}}}
+ }
+ result = config_ospf6_interface(tgen, topo, r0_ospf_cost)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ input_dict = {
+ "r0": {"links": {"r1": {"ospf6": {"cost": 10}}, "r2": {"ospf6": {"cost": 20}}}}
+ }
+ step(
+ "Verify that cost is updated in the ospf interface between r0"
+ " and r1 as 10 and r0 and r2 as 20."
+ )
+
+ result = verify_ospf6_interface(tgen, topo, dut=dut, input_dict=input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(" Un configure cost from the interface r0 - r2.")
+
+ r0_ospf_cost = {
+ "r0": {"links": {"r2": {"ospf6": {"cost": 20, "del_action": True}}}}
+ }
+ result = config_ospf6_interface(tgen, topo, r0_ospf_cost)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that cost is updated in the ospf interface between r0"
+ "and r1 as 10 and r0 and r2 as 10"
+ )
+
+ input_dict = {
+ "r0": {"links": {"r1": {"ospf6": {"cost": 10}}, "r2": {"ospf6": {"cost": 10}}}}
+ }
+ result = verify_ospf6_interface(tgen, topo, dut=dut, input_dict=input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_single_area.py b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_single_area.py
new file mode 100644
index 0000000000..a84f1a1eb6
--- /dev/null
+++ b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_single_area.py
@@ -0,0 +1,417 @@
+#!/usr/bin/python
+
+#
+# Copyright (c) 2021 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc.
+# ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+
+"""OSPF Basic Functionality Automation."""
+import os
+import sys
+import time
+import pytest
+import json
+from copy import deepcopy
+from ipaddress import IPv4Address
+from lib.topotest import frr_unicode
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from mininet.topo import Topo
+from lib.topogen import Topogen, get_topogen
+import ipaddress
+
+# Import topoJson from lib, to create topology and initial configuration
+from lib.common_config import (
+ start_topology,
+ write_test_header,
+ write_test_footer,
+ reset_config_on_routers,
+ verify_rib,
+ create_static_routes,
+ step,
+ create_route_maps,
+ shutdown_bringup_interface,
+ create_interfaces_cfg,
+ topo_daemons,
+)
+from lib.topolog import logger
+from lib.topojson import build_topo_from_json, build_config_from_json
+
+from lib.ospf import (
+ verify_ospf6_neighbor,
+ config_ospf_interface,
+ clear_ospf,
+ verify_ospf6_rib,
+ create_router_ospf,
+ verify_ospf6_interface,
+ verify_ospf6_database,
+ config_ospf6_interface,
+)
+
+from ipaddress import IPv6Address
+
+# Global variables
+topo = None
+
+# Reading the data from JSON File for topology creation
+jsonFile = "{}/ospfv3_single_area.json".format(CWD)
+try:
+ with open(jsonFile, "r") as topoJson:
+ topo = json.load(topoJson)
+except IOError:
+ assert False, "Could not read file {}".format(jsonFile)
+
+"""
+TOPOOLOGY =
+ Please view in a fixed-width font such as Courier.
+ +---+ A1 +---+
+ +R1 +------------+R2 |
+ +-+-+- +--++
+ | -- -- |
+ | -- A0 -- |
+ A0| ---- |
+ | ---- | A2
+ | -- -- |
+ | -- -- |
+ +-+-+- +-+-+
+ +R0 +-------------+R3 |
+ +---+ A3 +---+
+
+TESTCASES =
+1. OSPF IFSM -Verify state change events on p2p network.
+2. OSPF Timers - Verify OSPF interface timer hello interval functionality
+3. OSPF Timers - Verify OSPF interface timer dead interval functionality
+4. Verify ospf show commands with json output.
+5. Verify NFSM events when ospf nbr changes with different MTU values.
+ """
+
+
+class CreateTopo(Topo):
+ """
+ Test topology builder.
+
+ * `Topo`: Topology object
+ """
+
+ def build(self, *_args, **_opts):
+ """Build function."""
+ tgen = get_topogen(self)
+
+ # Building topology from json file
+ build_topo_from_json(tgen, topo)
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+ global topo
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ tgen = Topogen(CreateTopo, mod.__name__)
+ # ... and here it calls Mininet initialization functions.
+
+ # get list of daemons needs to be started for this suite.
+ daemons = topo_daemons(tgen, topo)
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen, daemons)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ ospf_covergence = verify_ospf6_neighbor(tgen, topo)
+ assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module(mod):
+ """
+ Teardown the pytest environment.
+
+ * `mod`: module name
+ """
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
+
+
+# ##################################
+# Test cases start here.
+# ##################################
+
+
+def test_ospfv3_p2p_tc3_p0(request):
+ """OSPF IFSM -Verify state change events on p2p network."""
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ global topo
+ step("Bring up the base config as per the topology")
+ reset_config_on_routers(tgen)
+ step(
+ "Verify that OSPF is subscribed to multi cast services "
+ "(All SPF, all DR Routers)."
+ )
+ step("Verify that interface is enabled in ospf.")
+ step("Verify that config is successful.")
+ dut = "r0"
+ input_dict = {"r0": {"links": {"r3": {"ospf6": {"ospf6Enabled": True}}}}}
+ result = verify_ospf6_interface(tgen, topo, dut=dut, input_dict=input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Delete the ip address")
+ topo1 = {
+ "r0": {
+ "links": {
+ "r3": {
+ "ipv6": topo["routers"]["r0"]["links"]["r3"]["ipv6"],
+ "interface": topo["routers"]["r0"]["links"]["r3"]["interface"],
+ "delete": True,
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, topo1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Change the ip on the R0 interface")
+
+ topo_modify_change_ip = deepcopy(topo)
+ intf_ip = topo_modify_change_ip["routers"]["r0"]["links"]["r3"]["ipv6"]
+ topo_modify_change_ip["routers"]["r0"]["links"]["r3"]["ipv6"] = str(
+ IPv6Address(frr_unicode(intf_ip.split("/")[0])) + 3
+ ) + "/{}".format(intf_ip.split("/")[1])
+
+ build_config_from_json(tgen, topo_modify_change_ip, save_bkup=False)
+ step("Verify that interface is enabled in ospf.")
+ dut = "r0"
+ input_dict = {
+ "r0": {
+ "links": {
+ "r3": {
+ "ospf6": {
+ "internetAddress": [
+ {
+ "type": "inet6",
+ "address": topo_modify_change_ip["routers"]["r0"][
+ "links"
+ ]["r3"]["ipv6"].split("/")[0],
+ }
+ ],
+ }
+ }
+ }
+ }
+ }
+ result = verify_ospf6_interface(tgen, topo, dut=dut, input_dict=input_dict)
+
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Modify the mask on the R0 interface")
+ ip_addr = topo_modify_change_ip["routers"]["r0"]["links"]["r3"]["ipv6"]
+ mask = topo_modify_change_ip["routers"]["r0"]["links"]["r3"]["ipv6"]
+ step("Delete the ip address")
+ topo1 = {
+ "r0": {
+ "links": {
+ "r3": {
+ "ipv6": ip_addr,
+ "interface": topo["routers"]["r0"]["links"]["r3"]["interface"],
+ "delete": True,
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, topo1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Change the ip on the R0 interface")
+
+ topo_modify_change_ip = deepcopy(topo)
+ intf_ip = topo_modify_change_ip["routers"]["r0"]["links"]["r3"]["ipv6"]
+ topo_modify_change_ip["routers"]["r0"]["links"]["r3"]["ipv6"] = str(
+ IPv6Address(frr_unicode(intf_ip.split("/")[0])) + 3
+ ) + "/{}".format(int(intf_ip.split("/")[1]) + 1)
+
+ build_config_from_json(tgen, topo_modify_change_ip, save_bkup=False)
+ step("Verify that interface is enabled in ospf.")
+ dut = "r0"
+ input_dict = {
+ "r0": {
+ "links": {
+ "r3": {
+ "ospf6": {
+ "internetAddress": [
+ {
+ "type": "inet6",
+ "address": topo_modify_change_ip["routers"]["r0"][
+ "links"
+ ]["r3"]["ipv6"].split("/")[0],
+ }
+ ],
+ }
+ }
+ }
+ }
+ }
+ result = verify_ospf6_interface(tgen, topo, dut=dut, input_dict=input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ topo1 = {
+ "r0": {
+ "links": {
+ "r3": {
+ "ipv6": topo_modify_change_ip["routers"]["r0"]["links"]["r3"][
+ "ipv6"
+ ],
+ "interface": topo_modify_change_ip["routers"]["r0"]["links"]["r3"][
+ "interface"
+ ],
+ "delete": True,
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, topo1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ build_config_from_json(tgen, topo, save_bkup=False)
+
+ step("Change the area id on the interface")
+ input_dict = {
+ "r0": {
+ "links": {
+ "r3": {
+ "interface": topo["routers"]["r0"]["links"]["r3"]["interface"],
+ "ospf6": {"area": "0.0.0.0"},
+ "delete": True,
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ input_dict = {
+ "r0": {
+ "links": {
+ "r3": {
+ "interface": topo["routers"]["r0"]["links"]["r3"]["interface"],
+ "ospf6": {"area": "0.0.0.1"},
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+ step("Verify that interface is enabled in ospf.")
+ dut = "r0"
+ input_dict = {"r0": {"links": {"r3": {"ospf6": {"ospf6Enabled": True}}}}}
+ result = verify_ospf6_interface(tgen, topo, dut=dut, input_dict=input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ input_dict = {
+ "r0": {
+ "links": {
+ "r3": {
+ "interface": topo["routers"]["r0"]["links"]["r3"]["interface"],
+ "ospf6": {"area": "0.0.0.1"},
+ "delete": True,
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ input_dict = {
+ "r0": {
+ "links": {
+ "r3": {
+ "interface": topo["routers"]["r0"]["links"]["r3"]["interface"],
+ "ospf6": {"area": "0.0.0.0"},
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that interface is enabled in ospf.")
+ dut = "r0"
+ input_dict = {"r0": {"links": {"r3": {"ospf6": {"ospf6Enabled": True}}}}}
+ result = verify_ospf6_interface(tgen, topo, dut=dut, input_dict=input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("verify the all neighbors are up after clearing the process.")
+ for rtr in topo["routers"]:
+ clear_ospf(tgen, rtr, ospf="ospf6")
+
+ ospf_covergence = verify_ospf6_neighbor(tgen, topo)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/zebra/zserv.c b/zebra/zserv.c
index 0bf4d8ece2..1d94fcae6b 100644
--- a/zebra/zserv.c
+++ b/zebra/zserv.c
@@ -1070,8 +1070,14 @@ static void zebra_show_client_detail(struct vty *vty, struct zserv *client)
0, client->redist_v4_del_cnt);
vty_out(vty, "Redist:v6 %-12u%-12u%-12u\n", client->redist_v6_add_cnt,
0, client->redist_v6_del_cnt);
+ vty_out(vty, "VRF %-12u%-12u%-12u\n", client->vrfadd_cnt, 0,
+ client->vrfdel_cnt);
vty_out(vty, "Connected %-12u%-12u%-12u\n", client->ifadd_cnt, 0,
client->ifdel_cnt);
+ vty_out(vty, "Interface %-12u%-12u%-12u\n", client->ifup_cnt, 0,
+ client->ifdown_cnt);
+ vty_out(vty, "Intf Addr %-12u%-12u%-12u\n",
+ client->connected_rt_add_cnt, 0, client->connected_rt_del_cnt);
vty_out(vty, "BFD peer %-12u%-12u%-12u\n", client->bfd_peer_add_cnt,
client->bfd_peer_upd8_cnt, client->bfd_peer_del_cnt);
vty_out(vty, "NHT v4 %-12u%-12u%-12u\n",
@@ -1080,20 +1086,17 @@ static void zebra_show_client_detail(struct vty *vty, struct zserv *client)
client->v6_nh_watch_add_cnt, 0, client->v6_nh_watch_rem_cnt);
vty_out(vty, "VxLAN SG %-12u%-12u%-12u\n", client->vxlan_sg_add_cnt,
0, client->vxlan_sg_del_cnt);
- vty_out(vty, "Interface Up Notifications: %u\n", client->ifup_cnt);
- vty_out(vty, "Interface Down Notifications: %u\n", client->ifdown_cnt);
- vty_out(vty, "VNI add notifications: %u\n", client->vniadd_cnt);
- vty_out(vty, "VNI delete notifications: %u\n", client->vnidel_cnt);
- vty_out(vty, "L3-VNI add notifications: %u\n", client->l3vniadd_cnt);
- vty_out(vty, "L3-VNI delete notifications: %u\n", client->l3vnidel_cnt);
- vty_out(vty, "MAC-IP add notifications: %u\n", client->macipadd_cnt);
- vty_out(vty, "MAC-IP delete notifications: %u\n", client->macipdel_cnt);
- vty_out(vty, "ES add notifications: %u\n", client->local_es_add_cnt);
- vty_out(vty, "ES delete notifications: %u\n", client->local_es_del_cnt);
- vty_out(vty, "ES-EVI add notifications: %u\n",
- client->local_es_evi_add_cnt);
- vty_out(vty, "ES-EVI delete notifications: %u\n",
- client->local_es_evi_del_cnt);
+ vty_out(vty, "VNI %-12u%-12u%-12u\n", client->vniadd_cnt, 0,
+ client->vnidel_cnt);
+ vty_out(vty, "L3-VNI %-12u%-12u%-12u\n", client->l3vniadd_cnt, 0,
+ client->l3vnidel_cnt);
+ vty_out(vty, "MAC-IP %-12u%-12u%-12u\n", client->macipadd_cnt, 0,
+ client->macipdel_cnt);
+ vty_out(vty, "ES %-12u%-12u%-12u\n", client->local_es_add_cnt,
+ 0, client->local_es_del_cnt);
+ vty_out(vty, "ES-EVI %-12u%-12u%-12u\n",
+ client->local_es_evi_add_cnt, 0, client->local_es_evi_del_cnt);
+ vty_out(vty, "Errors: %u\n", client->error_cnt);
TAILQ_FOREACH (info, &client->gr_info_queue, gr_info) {
vty_out(vty, "VRF : %s\n", vrf_id_to_name(info->vrf_id));