summaryrefslogtreecommitdiff
path: root/tests/topotests/lib
diff options
context:
space:
mode:
Diffstat (limited to 'tests/topotests/lib')
-rw-r--r--tests/topotests/lib/bgp.py412
-rw-r--r--tests/topotests/lib/common_config.py128
-rw-r--r--tests/topotests/lib/ospf.py810
-rw-r--r--tests/topotests/lib/topogen.py4
-rw-r--r--tests/topotests/lib/topotest.py63
5 files changed, 842 insertions, 575 deletions
diff --git a/tests/topotests/lib/bgp.py b/tests/topotests/lib/bgp.py
index a236a916b5..2f1f67439f 100644
--- a/tests/topotests/lib/bgp.py
+++ b/tests/topotests/lib/bgp.py
@@ -44,6 +44,7 @@ from lib.common_config import (
FRRCFG_FILE,
retry,
get_ipv6_linklocal_address,
+ get_frr_ipv6_linklocal
)
LOGDIR = "/tmp/topotests/"
@@ -265,6 +266,11 @@ def __create_bgp_global(tgen, input_dict, router, build=False):
config_data.append("bgp router-id {}".format(router_id))
config_data.append("no bgp network import-check")
+ bgp_peer_grp_data = bgp_data.setdefault("peer-group", {})
+
+ if "peer-group" in bgp_data and bgp_peer_grp_data:
+ peer_grp_data = __create_bgp_peer_group(tgen, bgp_peer_grp_data, router)
+ config_data.extend(peer_grp_data)
bst_path = bgp_data.setdefault("bestpath", None)
if bst_path:
@@ -380,6 +386,7 @@ def __create_bgp_unicast_neighbor(
addr_data = addr_dict["unicast"]
if addr_data:
config_data.append("address-family {} unicast".format(addr_type))
+
advertise_network = addr_data.setdefault("advertise_networks", [])
for advertise_network_dict in advertise_network:
network = advertise_network_dict["network"]
@@ -404,14 +411,29 @@ def __create_bgp_unicast_neighbor(
config_data.append(cmd)
+ import_cmd = addr_data.setdefault("import", {})
+ if import_cmd:
+ try:
+ if import_cmd["delete"]:
+ config_data.append("no import vrf {}".format(import_cmd["vrf"]))
+ except KeyError:
+ config_data.append("import vrf {}".format(import_cmd["vrf"]))
+
max_paths = addr_data.setdefault("maximum_paths", {})
if max_paths:
ibgp = max_paths.setdefault("ibgp", None)
ebgp = max_paths.setdefault("ebgp", None)
+ del_cmd = max_paths.setdefault("delete", False)
if ibgp:
- config_data.append("maximum-paths ibgp {}".format(ibgp))
+ if del_cmd:
+ config_data.append("no maximum-paths ibgp {}".format(ibgp))
+ else:
+ config_data.append("maximum-paths ibgp {}".format(ibgp))
if ebgp:
- config_data.append("maximum-paths {}".format(ebgp))
+ if del_cmd:
+ config_data.append("no maximum-paths {}".format(ebgp))
+ else:
+ config_data.append("maximum-paths {}".format(ebgp))
aggregate_addresses = addr_data.setdefault("aggregate_address", [])
for aggregate_address in aggregate_addresses:
@@ -649,6 +671,38 @@ def __create_l2vpn_evpn_address_family(
return config_data
+def __create_bgp_peer_group(topo, input_dict, router):
+ """
+ Helper API to create neighbor specific configuration
+
+ Parameters
+ ----------
+ * `topo` : json file data
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `router` : router id to be configured
+ """
+ config_data = []
+ logger.debug("Entering lib API: __create_bgp_peer_group()")
+
+ for grp, grp_dict in input_dict.items():
+ config_data.append("neighbor {} peer-group".format(grp))
+ neigh_cxt = "neighbor {} ".format(grp)
+ update_source = grp_dict.setdefault("update-source", None)
+ remote_as = grp_dict.setdefault("remote-as", None)
+ capability = grp_dict.setdefault("capability", None)
+ if update_source:
+ config_data.append("{} update-source {}".format(neigh_cxt, update_source))
+
+ if remote_as:
+ config_data.append("{} remote-as {}".format(neigh_cxt, remote_as))
+
+ if capability:
+ config_data.append("{} capability {}".format(neigh_cxt, capability))
+
+ logger.debug("Exiting lib API: __create_bgp_peer_group()")
+ return config_data
+
+
def __create_bgp_neighbor(topo, input_dict, router, addr_type, add_neigh=True):
"""
Helper API to create neighbor specific configuration
@@ -660,10 +714,9 @@ def __create_bgp_neighbor(topo, input_dict, router, addr_type, add_neigh=True):
* `input_dict` : Input dict data, required when configuring from testcase
* `router` : router id to be configured
"""
-
config_data = []
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
-
+ tgen = get_topogen()
bgp_data = input_dict["address_family"]
neigh_data = bgp_data[addr_type]["unicast"]["neighbor"]
@@ -672,35 +725,91 @@ def __create_bgp_neighbor(topo, input_dict, router, addr_type, add_neigh=True):
nh_details = topo[name]
if "vrfs" in topo[router] or type(nh_details["bgp"]) is list:
- remote_as = nh_details["bgp"][0]["local_as"]
+ for vrf_data in nh_details["bgp"]:
+ if "vrf" in nh_details["links"][dest_link] and "vrf" in vrf_data:
+ if nh_details["links"][dest_link]["vrf"] == vrf_data["vrf"]:
+ remote_as = vrf_data["local_as"]
+ break
+ else:
+ if "vrf" not in vrf_data:
+ remote_as = vrf_data["local_as"]
+ break
+
else:
remote_as = nh_details["bgp"]["local_as"]
update_source = None
- if dest_link in nh_details["links"].keys():
- ip_addr = nh_details["links"][dest_link][addr_type].split("/")[0]
- # Loopback interface
- if "source_link" in peer and peer["source_link"] == "lo":
- update_source = topo[router]["links"]["lo"][addr_type].split("/")[0]
+ if "neighbor_type" in peer and peer["neighbor_type"] == "unnumbered":
+ ip_addr = nh_details["links"][dest_link]["peer-interface"]
+ elif "neighbor_type" in peer and peer["neighbor_type"] == "link-local":
+ intf = topo[name]["links"][dest_link]["interface"]
+ ip_addr = get_frr_ipv6_linklocal(tgen, name, intf)
+ elif dest_link in nh_details["links"].keys():
+ try:
+ ip_addr = nh_details["links"][dest_link][addr_type].split("/")[0]
+ except KeyError:
+ intf = topo[name]["links"][dest_link]["interface"]
+ ip_addr = get_frr_ipv6_linklocal(tgen, name, intf)
+ if "delete" in peer and peer["delete"]:
+ neigh_cxt = "no neighbor {}".format(ip_addr)
+ config_data.append("{}".format(neigh_cxt))
+ return config_data
+ else:
+ neigh_cxt = "neighbor {}".format(ip_addr)
- neigh_cxt = "neighbor {}".format(ip_addr)
+ if "peer-group" in peer:
+ config_data.append(
+ "neighbor {} interface peer-group {}".format(
+ ip_addr, peer["peer-group"]
+ )
+ )
+
+ # Loopback interface
+ if "source_link" in peer:
+ if peer["source_link"] == "lo":
+ update_source = topo[router]["links"]["lo"][addr_type].split("/")[0]
+ else:
+ update_source = topo[router]["links"][peer["source_link"]][
+ "interface"
+ ]
+ if "peer-group" not in peer:
+ if "neighbor_type" in peer and peer["neighbor_type"] == "unnumbered":
+ config_data.append(
+ "{} interface remote-as {}".format(neigh_cxt, remote_as)
+ )
+ elif add_neigh:
+ config_data.append("{} remote-as {}".format(neigh_cxt, remote_as))
- if add_neigh:
- config_data.append("{} remote-as {}".format(neigh_cxt, remote_as))
if addr_type == "ipv6":
config_data.append("address-family ipv6 unicast")
config_data.append("{} activate".format(neigh_cxt))
+ if "neighbor_type" in peer and peer["neighbor_type"] == "link-local":
+ config_data.append(
+ "{} update-source {}".format(
+ neigh_cxt, nh_details["links"][dest_link]["peer-interface"]
+ )
+ )
+ config_data.append(
+ "{} interface {}".format(
+ neigh_cxt, nh_details["links"][dest_link]["peer-interface"]
+ )
+ )
+
disable_connected = peer.setdefault("disable_connected_check", False)
keep_alive = peer.setdefault("keepalivetimer", 3)
hold_down = peer.setdefault("holddowntimer", 10)
password = peer.setdefault("password", None)
no_password = peer.setdefault("no_password", None)
+ capability = peer.setdefault("capability", None)
max_hop_limit = peer.setdefault("ebgp_multihop", 1)
+
graceful_restart = peer.setdefault("graceful-restart", None)
graceful_restart_helper = peer.setdefault("graceful-restart-helper", None)
graceful_restart_disable = peer.setdefault("graceful-restart-disable", None)
+ if capability:
+ config_data.append("{} capability {}".format(neigh_cxt, capability))
if update_source:
config_data.append(
@@ -718,7 +827,6 @@ def __create_bgp_neighbor(topo, input_dict, router, addr_type, add_neigh=True):
config_data.append(
"{} timers {} {}".format(neigh_cxt, keep_alive, hold_down)
)
-
if graceful_restart:
config_data.append("{} graceful-restart".format(neigh_cxt))
elif graceful_restart == False:
@@ -768,7 +876,7 @@ def __create_bgp_unicast_address_family(
config_data = []
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
-
+ tgen = get_topogen()
bgp_data = input_dict["address_family"]
neigh_data = bgp_data[addr_type]["unicast"]["neighbor"]
@@ -784,16 +892,34 @@ def __create_bgp_unicast_address_family(
for destRouterLink, data in sorted(nh_details["links"].items()):
if "type" in data and data["type"] == "loopback":
if dest_link == destRouterLink:
- ip_addr = nh_details["links"][destRouterLink][
- addr_type
- ].split("/")[0]
+ ip_addr = (
+ nh_details["links"][destRouterLink][addr_type]
+ .split("/")[0]
+ .lower()
+ )
# Physical interface
else:
- if dest_link in nh_details["links"].keys():
-
- ip_addr = nh_details["links"][dest_link][addr_type].split("/")[0]
- if addr_type == "ipv4" and bgp_data["ipv6"]:
+ # check the neighbor type if un numbered nbr, use interface.
+ if "neighbor_type" in peer and peer["neighbor_type"] == "unnumbered":
+ ip_addr = nh_details["links"][dest_link]["peer-interface"]
+ elif "neighbor_type" in peer and peer["neighbor_type"] == "link-local":
+ intf = topo[peer_name]["links"][dest_link]["interface"]
+ ip_addr = get_frr_ipv6_linklocal(tgen, peer_name, intf)
+ elif dest_link in nh_details["links"].keys():
+ try:
+ ip_addr = nh_details["links"][dest_link][addr_type].split("/")[
+ 0
+ ]
+ except KeyError:
+ intf = topo[peer_name]["links"][dest_link]["interface"]
+ ip_addr = get_frr_ipv6_linklocal(tgen, peer_name, intf)
+ if (
+ addr_type == "ipv4"
+ and bgp_data["ipv6"]
+ and check_address_types("ipv6")
+ and "ipv6" in nh_details["links"][dest_link]
+ ):
deactivate = nh_details["links"][dest_link]["ipv6"].split("/")[
0
]
@@ -822,6 +948,7 @@ def __create_bgp_unicast_address_family(
prefix_lists = peer.setdefault("prefix_lists", {})
route_maps = peer.setdefault("route_maps", {})
no_send_community = peer.setdefault("no_send_community", None)
+ capability = peer.setdefault("capability", None)
allowas_in = peer.setdefault("allowas-in", None)
# next-hop-self
@@ -841,6 +968,11 @@ def __create_bgp_unicast_address_family(
"no {} send-community {}".format(neigh_cxt, no_send_community)
)
+ # capability_ext_nh
+ if capability and addr_type == "ipv6":
+ config_data.append("address-family ipv4 unicast")
+ config_data.append("{} activate".format(neigh_cxt))
+
if "allowas_in" in peer:
allow_as_in = peer["allowas_in"]
config_data.append("{} allowas-in {}".format(neigh_cxt, allow_as_in))
@@ -1067,33 +1199,37 @@ def verify_bgp_convergence(tgen, topo, dut=None, expected=True):
API will verify if BGP is converged with in the given time frame.
Running "show bgp summary json" command and verify bgp neighbor
state is established,
+
Parameters
----------
* `tgen`: topogen object
* `topo`: input json file data
* `dut`: device under test
- * `expected` : expected results from API, by-default True
Usage
-----
# To veriry is BGP is converged for all the routers used in
topology
results = verify_bgp_convergence(tgen, topo, dut="r1")
+
Returns
-------
errormsg(str) or True
"""
- logger.debug("Entering lib API: verify_bgp_convergence()")
+ result = False
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ tgen = get_topogen()
for router, rnode in tgen.routers().items():
- if dut is not None and dut != router:
+ if 'bgp' not in topo['routers'][router]:
continue
- if "bgp" not in topo["routers"][router]:
+ if dut is not None and dut != router:
continue
logger.info("Verifying BGP Convergence on router %s:", router)
- show_bgp_json = run_frr_cmd(rnode, "show bgp vrf all summary json", isjson=True)
+ show_bgp_json = run_frr_cmd(rnode, "show bgp vrf all summary json",
+ isjson=True)
# Verifying output dictionary show_bgp_json is empty or not
if not bool(show_bgp_json):
errormsg = "BGP is not running"
@@ -1115,100 +1251,6 @@ def verify_bgp_convergence(tgen, topo, dut=None, expected=True):
# To find neighbor ip type
bgp_addr_type = bgp_data["address_family"]
- if "ipv4" in bgp_addr_type or "ipv6" in bgp_addr_type:
- for addr_type in bgp_addr_type.keys():
- if not check_address_types(addr_type):
- continue
- total_peer = 0
-
- bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
-
- for bgp_neighbor in bgp_neighbors:
- total_peer += len(bgp_neighbors[bgp_neighbor]["dest_link"])
-
- for addr_type in bgp_addr_type.keys():
- if not check_address_types(addr_type):
- continue
- bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
-
- no_of_peer = 0
- for bgp_neighbor, peer_data in bgp_neighbors.items():
- for dest_link in peer_data["dest_link"].keys():
- data = topo["routers"][bgp_neighbor]["links"]
- if dest_link in data:
- peer_details = peer_data["dest_link"][dest_link]
- # for link local neighbors
- if (
- "neighbor_type" in peer_details
- and peer_details["neighbor_type"] == "link-local"
- ):
- neighbor_ip = get_ipv6_linklocal_address(
- topo["routers"], bgp_neighbor, dest_link
- )
- elif "source_link" in peer_details:
- neighbor_ip = topo["routers"][bgp_neighbor][
- "links"
- ][peer_details["source_link"]][addr_type].split(
- "/"
- )[
- 0
- ]
- elif (
- "neighbor_type" in peer_details
- and peer_details["neighbor_type"] == "unnumbered"
- ):
- neighbor_ip = data[dest_link]["peer-interface"]
- else:
- neighbor_ip = data[dest_link][addr_type].split("/")[
- 0
- ]
- nh_state = None
-
- if addr_type == "ipv4":
- if "ipv4Unicast" in show_bgp_json[vrf]:
- ipv4_data = show_bgp_json[vrf]["ipv4Unicast"][
- "peers"
- ]
- nh_state = ipv4_data[neighbor_ip]["state"]
- else:
- if "ipv6Unicast" in show_bgp_json[vrf]:
- ipv6_data = show_bgp_json[vrf]["ipv6Unicast"][
- "peers"
- ]
- nh_state = ipv6_data[neighbor_ip]["state"]
- if nh_state == "Established":
- no_of_peer += 1
-
- if "l2vpn" in bgp_addr_type:
- if "neighbor" not in bgp_addr_type["l2vpn"]["evpn"]:
- if no_of_peer == total_peer:
- logger.info(
- "[DUT: %s] VRF: %s, BGP is Converged for %s address-family",
- router,
- vrf,
- addr_type,
- )
- else:
- errormsg = (
- "[DUT: %s] VRF: %s, BGP is not converged for %s address-family"
- % (router, vrf, addr_type)
- )
- return errormsg
- else:
- if no_of_peer == total_peer:
- logger.info(
- "[DUT: %s] VRF: %s, BGP is Converged for %s address-family",
- router,
- vrf,
- addr_type,
- )
- else:
- errormsg = (
- "[DUT: %s] VRF: %s, BGP is not converged for %s address-family"
- % (router, vrf, addr_type)
- )
- return errormsg
-
if "l2vpn" in bgp_addr_type:
total_evpn_peer = 0
@@ -1224,46 +1266,120 @@ def verify_bgp_convergence(tgen, topo, dut=None, expected=True):
data = topo["routers"][bgp_neighbor]["links"]
for dest_link in dest_link_dict.keys():
if dest_link in data:
- peer_details = peer_data[_addr_type][dest_link]
+ peer_details = \
+ peer_data[_addr_type][dest_link]
- neighbor_ip = data[dest_link][_addr_type].split("/")[0]
+ neighbor_ip = \
+ data[dest_link][_addr_type].split(
+ "/")[0]
nh_state = None
- if (
- "ipv4Unicast" in show_bgp_json[vrf]
- or "ipv6Unicast" in show_bgp_json[vrf]
- ):
- errormsg = (
- "[DUT: %s] VRF: %s, "
- "ipv4Unicast/ipv6Unicast"
- " address-family present"
- " under l2vpn" % (router, vrf)
- )
+ if "ipv4Unicast" in show_bgp_json[vrf] or \
+ "ipv6Unicast" in show_bgp_json[vrf]:
+ errormsg = ("[DUT: %s] VRF: %s, "
+ "ipv4Unicast/ipv6Unicast"
+ " address-family present"
+ " under l2vpn" % (router,
+ vrf))
return errormsg
- l2VpnEvpn_data = show_bgp_json[vrf]["l2VpnEvpn"][
- "peers"
- ]
- nh_state = l2VpnEvpn_data[neighbor_ip]["state"]
+ l2VpnEvpn_data = \
+ show_bgp_json[vrf]["l2VpnEvpn"][
+ "peers"]
+ nh_state = \
+ l2VpnEvpn_data[neighbor_ip]["state"]
if nh_state == "Established":
no_of_evpn_peer += 1
if no_of_evpn_peer == total_evpn_peer:
- logger.info(
- "[DUT: %s] VRF: %s, BGP is Converged for " "epvn peers",
- router,
- vrf,
- )
+ logger.info("[DUT: %s] VRF: %s, BGP is Converged for "
+ "epvn peers", router, vrf)
+ result = True
else:
- errormsg = (
- "[DUT: %s] VRF: %s, BGP is not converged "
- "for evpn peers" % (router, vrf)
- )
+ errormsg = ("[DUT: %s] VRF: %s, BGP is not converged "
+ "for evpn peers" % (router, vrf))
return errormsg
+ else:
+ total_peer = 0
+ for addr_type in bgp_addr_type.keys():
+ if not check_address_types(addr_type):
+ continue
- logger.debug("Exiting API: verify_bgp_convergence()")
- return True
+ bgp_neighbors = \
+ bgp_addr_type[addr_type]["unicast"]["neighbor"]
+
+ for bgp_neighbor in bgp_neighbors:
+ total_peer += \
+ len(bgp_neighbors[bgp_neighbor]["dest_link"])
+
+ no_of_peer = 0
+ for addr_type in bgp_addr_type.keys():
+ if not check_address_types(addr_type):
+ continue
+ bgp_neighbors = \
+ bgp_addr_type[addr_type]["unicast"]["neighbor"]
+
+ for bgp_neighbor, peer_data in bgp_neighbors.items():
+ for dest_link in peer_data["dest_link"].\
+ keys():
+ data = \
+ topo["routers"][bgp_neighbor]["links"]
+ if dest_link in data:
+ peer_details = \
+ peer_data['dest_link'][dest_link]
+ # for link local neighbors
+ if "neighbor_type" in peer_details and \
+ peer_details["neighbor_type"] == \
+ 'link-local':
+ intf = topo["routers"][bgp_neighbor][
+ "links"][dest_link]["interface"]
+ neighbor_ip = get_frr_ipv6_linklocal(
+ tgen, bgp_neighbor, intf)
+ elif "source_link" in peer_details:
+ neighbor_ip = \
+ topo["routers"][bgp_neighbor][
+ "links"][peer_details[
+ 'source_link']][
+ addr_type].\
+ split("/")[0]
+ elif "neighbor_type" in peer_details and \
+ peer_details["neighbor_type"] == \
+ 'unnumbered':
+ neighbor_ip = \
+ data[dest_link]["peer-interface"]
+ else:
+ neighbor_ip = \
+ data[dest_link][addr_type].split(
+ "/")[0]
+ nh_state = None
+ neighbor_ip = neighbor_ip.lower()
+ if addr_type == "ipv4":
+ ipv4_data = show_bgp_json[vrf][
+ "ipv4Unicast"]["peers"]
+ nh_state = \
+ ipv4_data[neighbor_ip]["state"]
+ else:
+ ipv6_data = show_bgp_json[vrf][
+ "ipv6Unicast"]["peers"]
+ if neighbor_ip in ipv6_data:
+ nh_state = \
+ ipv6_data[neighbor_ip]["state"]
+
+ if nh_state == "Established":
+ no_of_peer += 1
+
+ if no_of_peer == total_peer and no_of_peer > 0:
+ logger.info("[DUT: %s] VRF: %s, BGP is Converged",
+ router, vrf)
+ result = True
+ else:
+ errormsg = ("[DUT: %s] VRF: %s, BGP is not converged"
+ % (router, vrf))
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return result
@retry(retry_timeout=16)
diff --git a/tests/topotests/lib/common_config.py b/tests/topotests/lib/common_config.py
index d659b8d52d..07bb5153ab 100644
--- a/tests/topotests/lib/common_config.py
+++ b/tests/topotests/lib/common_config.py
@@ -365,7 +365,7 @@ def create_common_configuration(
return True
-def kill_router_daemons(tgen, router, daemons):
+def kill_router_daemons(tgen, router, daemons, save_config=True):
"""
Router's current config would be saved to /etc/frr/ for each daemon
and daemon would be killed forcefully using SIGKILL.
@@ -379,9 +379,10 @@ def kill_router_daemons(tgen, router, daemons):
try:
router_list = tgen.routers()
- # Saving router config to /etc/frr, which will be loaded to router
- # when it starts
- router_list[router].vtysh_cmd("write memory")
+ if save_config:
+ # Saving router config to /etc/frr, which will be loaded to router
+ # when it starts
+ router_list[router].vtysh_cmd("write memory")
# Kill Daemons
result = router_list[router].killDaemons(daemons)
@@ -496,7 +497,7 @@ def reset_config_on_routers(tgen, routerName=None):
f.close()
run_cfg_file = "{}/{}/frr.sav".format(TMPDIR, rname)
init_cfg_file = "{}/{}/frr_json_initial.conf".format(TMPDIR, rname)
- command = "/usr/lib/frr/frr-reload.py --input {} --test {} > {}".format(
+ command = "/usr/lib/frr/frr-reload.py --test --test-reset --input {} {} > {}".format(
run_cfg_file, init_cfg_file, dname
)
result = call(command, shell=True, stderr=SUB_STDOUT, stdout=SUB_PIPE)
@@ -526,37 +527,9 @@ def reset_config_on_routers(tgen, routerName=None):
raise InvalidCLIError(out_data)
raise InvalidCLIError("Unknown error in %s", output)
- f = open(dname, "r")
delta = StringIO()
- delta.write("configure terminal\n")
- t_delta = f.read()
-
- # Don't disable debugs
- check_debug = True
-
- for line in t_delta.split("\n"):
- line = line.strip()
- if line == "Lines To Delete" or line == "===============" or not line:
- continue
-
- if line == "Lines To Add":
- check_debug = False
- continue
-
- if line == "============" or not line:
- continue
-
- # Leave debugs and log output alone
- if check_debug:
- if "debug" in line or "log file" in line:
- continue
-
- delta.write(line)
- delta.write("\n")
-
- f.close()
-
- delta.write("end\n")
+ with open(dname, "r") as f:
+ delta.write(f.read())
output = router.vtysh_multicmd(delta.getvalue(), pretty_output=False)
@@ -636,6 +609,7 @@ def load_config_to_router(tgen, routerName, save_bkup=False):
return True
+
def get_frr_ipv6_linklocal(tgen, router, intf=None, vrf=None):
"""
API to get the link local ipv6 address of a particular interface using
@@ -668,38 +642,48 @@ def get_frr_ipv6_linklocal(tgen, router, intf=None, vrf=None):
else:
cmd = "show interface"
- ifaces = router_list[router].run('vtysh -c "{}"'.format(cmd))
-
- # Fix newlines (make them all the same)
- ifaces = ("\n".join(ifaces.splitlines()) + "\n").splitlines()
-
- interface = None
- ll_per_if_count = 0
- for line in ifaces:
- # Interface name
- m = re_search("Interface ([a-zA-Z0-9-]+) is", line)
- if m:
- interface = m.group(1).split(" ")[0]
- ll_per_if_count = 0
-
- # Interface ip
- m1 = re_search("inet6 (fe80[:a-fA-F0-9]+[/0-9]+)", line)
- if m1:
- local = m1.group(1)
- ll_per_if_count += 1
- if ll_per_if_count > 1:
- linklocal += [["%s-%s" % (interface, ll_per_if_count), local]]
- else:
- linklocal += [[interface, local]]
-
- if linklocal:
- if intf:
- return [_linklocal[1] for _linklocal in linklocal if _linklocal[0] == intf][
- 0
- ].split("/")[0]
- return linklocal
- else:
- errormsg = "Link local ip missing on router {}"
+ linklocal = []
+ if vrf:
+ cmd = "show interface vrf {}".format(vrf)
+ else:
+ cmd = "show interface"
+ for chk_ll in range(0, 60):
+ sleep(1/4)
+ ifaces = router_list[router].run('vtysh -c "{}"'.format(cmd))
+ # Fix newlines (make them all the same)
+ ifaces = ('\n'.join(ifaces.splitlines()) + '\n').splitlines()
+
+ interface = None
+ ll_per_if_count = 0
+ for line in ifaces:
+ # Interface name
+ m = re_search('Interface ([a-zA-Z0-9-]+) is', line)
+ if m:
+ interface = m.group(1).split(" ")[0]
+ ll_per_if_count = 0
+
+ # Interface ip
+ m1 = re_search('inet6 (fe80[:a-fA-F0-9]+[\/0-9]+)',
+ line)
+ if m1:
+ local = m1.group(1)
+ ll_per_if_count += 1
+ if ll_per_if_count > 1:
+ linklocal += [["%s-%s" %
+ (interface, ll_per_if_count), local]]
+ else:
+ linklocal += [[interface, local]]
+
+ try:
+ if linklocal:
+ if intf:
+ return [_linklocal[1] for _linklocal in linklocal if _linklocal[0]==intf][0].\
+ split("/")[0]
+ return linklocal
+ except IndexError:
+ continue
+
+ errormsg = "Link local ip missing on router {}".format(router)
return errormsg
@@ -1845,6 +1829,14 @@ def create_interfaces_cfg(tgen, topo, build=False):
else:
interface_data.append("ipv6 address {}".format(intf_addr))
+ # Wait for vrf interfaces to get link local address once they are up
+ if not destRouterLink == 'lo' and 'vrf' in topo[c_router][
+ 'links'][destRouterLink]:
+ vrf = topo[c_router]['links'][destRouterLink]['vrf']
+ intf = topo[c_router]['links'][destRouterLink]['interface']
+ ll = get_frr_ipv6_linklocal(tgen, c_router, intf=intf,
+ vrf = vrf)
+
if "ipv6-link-local" in data:
intf_addr = c_data["links"][destRouterLink]["ipv6-link-local"]
@@ -1867,7 +1859,7 @@ def create_interfaces_cfg(tgen, topo, build=False):
)
if "ospf6" in data:
interface_data += _create_interfaces_ospf_cfg(
- "ospf6", c_data, data, ospf_keywords
+ "ospf6", c_data, data, ospf_keywords + ["area"]
)
result = create_common_configuration(
diff --git a/tests/topotests/lib/ospf.py b/tests/topotests/lib/ospf.py
index dc9fe0fcca..6aa7a2c0a9 100644
--- a/tests/topotests/lib/ospf.py
+++ b/tests/topotests/lib/ospf.py
@@ -28,6 +28,7 @@ from time import sleep
from lib.topolog import logger
from lib.topotest import frr_unicode
from ipaddress import IPv6Address
+
# Import common_config to use commomnly used APIs
from lib.common_config import (
create_common_configuration,
@@ -89,8 +90,7 @@ def create_router_ospf(tgen, topo, input_dict=None, build=False, load_config=Tru
logger.debug("Router %s: 'ospf' not present in input_dict", router)
continue
- result = __create_ospf_global(
- tgen, input_dict, router, build, load_config)
+ result = __create_ospf_global(tgen, input_dict, router, build, load_config)
if result is True:
ospf_data = input_dict[router]["ospf"]
@@ -100,7 +100,8 @@ def create_router_ospf(tgen, topo, input_dict=None, build=False, load_config=Tru
continue
result = __create_ospf_global(
- tgen, input_dict, router, build, load_config, ospf='ospf6')
+ tgen, input_dict, router, build, load_config, ospf="ospf6"
+ )
if result is True:
ospf_data = input_dict[router]["ospf6"]
@@ -172,7 +173,6 @@ def __create_ospf_global(
config_data.append(cmd)
-
# router id
router_id = ospf_data.setdefault("router_id", None)
del_router_id = ospf_data.setdefault("del_router_id", False)
@@ -187,8 +187,7 @@ def __create_ospf_global(
if del_log_adj_changes:
config_data.append("no log-adjacency-changes detail")
if log_adj_changes:
- config_data.append("log-adjacency-changes {}".format(
- log_adj_changes))
+ config_data.append("log-adjacency-changes {}".format(log_adj_changes))
# aggregation timer
aggr_timer = ospf_data.setdefault("aggr_timer", None)
@@ -196,8 +195,7 @@ def __create_ospf_global(
if del_aggr_timer:
config_data.append("no aggregation timer")
if aggr_timer:
- config_data.append("aggregation timer {}".format(
- aggr_timer))
+ config_data.append("aggregation timer {}".format(aggr_timer))
# maximum path information
ecmp_data = ospf_data.setdefault("maximum-paths", {})
@@ -245,12 +243,13 @@ def __create_ospf_global(
cmd = "no {}".format(cmd)
config_data.append(cmd)
- #def route information
+ # def route information
def_rte_data = ospf_data.setdefault("default-information", {})
if def_rte_data:
if "originate" not in def_rte_data:
- logger.debug("Router %s: 'originate key' not present in "
- "input_dict", router)
+ logger.debug(
+ "Router %s: 'originate key' not present in " "input_dict", router
+ )
else:
cmd = "default-information originate"
@@ -261,12 +260,10 @@ def __create_ospf_global(
cmd = cmd + " metric {}".format(def_rte_data["metric"])
if "metric-type" in def_rte_data:
- cmd = cmd + " metric-type {}".format(def_rte_data[
- "metric-type"])
+ cmd = cmd + " metric-type {}".format(def_rte_data["metric-type"])
if "route-map" in def_rte_data:
- cmd = cmd + " route-map {}".format(def_rte_data[
- "route-map"])
+ cmd = cmd + " route-map {}".format(def_rte_data["route-map"])
del_action = def_rte_data.setdefault("delete", False)
if del_action:
@@ -288,19 +285,19 @@ def __create_ospf_global(
config_data.append(cmd)
try:
- if "area" in input_dict[router]['links'][neighbor][
- 'ospf6']:
+ if "area" in input_dict[router]["links"][neighbor]["ospf6"]:
iface = input_dict[router]["links"][neighbor]["interface"]
cmd = "interface {} area {}".format(
- iface, input_dict[router]['links'][neighbor][
- 'ospf6']['area'])
- if input_dict[router]['links'][neighbor].setdefault(
- "delete", False):
+ iface,
+ input_dict[router]["links"][neighbor]["ospf6"]["area"],
+ )
+ if input_dict[router]["links"][neighbor].setdefault(
+ "delete", False
+ ):
cmd = "no {}".format(cmd)
config_data.append(cmd)
except KeyError:
- pass
-
+ pass
# summary information
summary_data = ospf_data.setdefault("summary-address", {})
@@ -420,6 +417,7 @@ def config_ospf_interface(tgen, topo, input_dict=None, build=False, load_config=
True or False
"""
logger.debug("Enter lib config_ospf_interface")
+ result = False
if not input_dict:
input_dict = deepcopy(topo)
else:
@@ -502,7 +500,7 @@ def config_ospf_interface(tgen, topo, input_dict=None, build=False, load_config=
# interface ospf mtu
if data_ospf_mtu:
cmd = "ip ospf mtu-ignore"
- if 'del_action' in ospf_data:
+ if "del_action" in ospf_data:
cmd = "no {}".format(cmd)
config_data.append(cmd)
@@ -543,8 +541,7 @@ def clear_ospf(tgen, router, ospf=None):
version = "ip"
cmd = "clear {} ospf interface".format(version)
- logger.info(
- "Clearing ospf process on router %s.. using command '%s'", router, cmd)
+ logger.info("Clearing ospf process on router %s.. using command '%s'", router, cmd)
run_frr_cmd(rnode, cmd)
logger.debug("Exiting lib API: clear_ospf()")
@@ -774,7 +771,7 @@ def verify_ospf_neighbor(tgen, topo, dut=None, input_dict=None, lan=False, expec
################################
# Verification procs
################################
-@retry(retry_timeout=20)
+@retry(retry_timeout=50)
def verify_ospf6_neighbor(tgen, topo, dut=None, input_dict=None, lan=False):
"""
This API is to verify ospf neighborship by running
@@ -825,105 +822,133 @@ def verify_ospf6_neighbor(tgen, topo, dut=None, input_dict=None, lan=False):
if input_dict:
for router, rnode in tgen.routers().items():
- if 'ospf6' not in topo['routers'][router]:
+ if "ospf6" not in topo["routers"][router]:
continue
if dut is not None and dut != router:
continue
logger.info("Verifying OSPF neighborship on router %s:", router)
- show_ospf_json = run_frr_cmd(rnode,
- "show ipv6 ospf neighbor json", isjson=True)
+ show_ospf_json = run_frr_cmd(
+ rnode, "show ipv6 ospf neighbor json", isjson=True
+ )
# Verifying output dictionary show_ospf_json is empty or not
if not bool(show_ospf_json):
errormsg = "OSPF6 is not running"
return errormsg
ospf_data_list = input_dict[router]["ospf6"]
- ospf_nbr_list = ospf_data_list['neighbors']
+ ospf_nbr_list = ospf_data_list["neighbors"]
for ospf_nbr, nbr_data in ospf_nbr_list.items():
- data_ip = data_rid = topo['routers'][ospf_nbr]['ospf6']['router_id']
+
+ try:
+ data_ip = data_rid = topo["routers"][ospf_nbr]["ospf6"]["router_id"]
+ except KeyError:
+ data_ip = data_rid = topo["routers"][nbr_data["nbr"]]["ospf6"][
+ "router_id"
+ ]
+
if ospf_nbr in data_ip:
nbr_details = nbr_data[ospf_nbr]
elif lan:
- for switch in topo['switches']:
- if 'ospf6' in topo['switches'][switch]['links'][router]:
+ for switch in topo["switches"]:
+ if "ospf6" in topo["switches"][switch]["links"][router]:
neighbor_ip = data_ip
else:
continue
else:
- neighbor_ip = data_ip[router]['ipv6'].split("/")[0]
+ neighbor_ip = data_ip[router]["ipv6"].split("/")[0]
nh_state = None
neighbor_ip = neighbor_ip.lower()
nbr_rid = data_rid
- get_index_val = dict((d['neighborId'], dict( \
- d, index=index)) for (index, d) in enumerate( \
- show_ospf_json['neighbors']))
+ get_index_val = dict(
+ (d["neighborId"], dict(d, index=index))
+ for (index, d) in enumerate(show_ospf_json["neighbors"])
+ )
try:
- nh_state = get_index_val.get(neighbor_ip)['state']
- intf_state = get_index_val.get(neighbor_ip)['ifState']
+ nh_state = get_index_val.get(neighbor_ip)["state"]
+ intf_state = get_index_val.get(neighbor_ip)["ifState"]
except TypeError:
- errormsg = "[DUT: {}] OSPF peer {} missing,from "\
- "{} ".format(router,
- nbr_rid, ospf_nbr)
+ errormsg = "[DUT: {}] OSPF peer {} missing,from " "{} ".format(
+ router, nbr_rid, ospf_nbr
+ )
return errormsg
- nbr_state = nbr_data.setdefault("state",None)
- nbr_role = nbr_data.setdefault("role",None)
+ nbr_state = nbr_data.setdefault("state", None)
+ nbr_role = nbr_data.setdefault("role", None)
if nbr_state:
if nbr_state == nh_state:
- logger.info("[DUT: {}] OSPF6 Nbr is {}:{} State {}".format
- (router, ospf_nbr, nbr_rid, nh_state))
+ logger.info(
+ "[DUT: {}] OSPF6 Nbr is {}:{} State {}".format(
+ router, ospf_nbr, nbr_rid, nh_state
+ )
+ )
result = True
else:
- errormsg = ("[DUT: {}] OSPF6 is not Converged, neighbor"
- " state is {} , Expected state is {}".format(router,
- nh_state, nbr_state))
+ errormsg = (
+ "[DUT: {}] OSPF6 is not Converged, neighbor"
+ " state is {} , Expected state is {}".format(
+ router, nh_state, nbr_state
+ )
+ )
return errormsg
if nbr_role:
if nbr_role == intf_state:
- logger.info("[DUT: {}] OSPF6 Nbr is {}: {} Role {}".format(
- router, ospf_nbr, nbr_rid, nbr_role))
+ logger.info(
+ "[DUT: {}] OSPF6 Nbr is {}: {} Role {}".format(
+ router, ospf_nbr, nbr_rid, nbr_role
+ )
+ )
else:
- errormsg = ("[DUT: {}] OSPF6 is not Converged with rid"
- "{}, role is {}, Expected role is {}".format(router,
- nbr_rid, intf_state, nbr_role))
+ errormsg = (
+ "[DUT: {}] OSPF6 is not Converged with rid"
+ "{}, role is {}, Expected role is {}".format(
+ router, nbr_rid, intf_state, nbr_role
+ )
+ )
return errormsg
continue
else:
for router, rnode in tgen.routers().items():
- if 'ospf6' not in topo['routers'][router]:
+ if "ospf6" not in topo["routers"][router]:
continue
if dut is not None and dut != router:
continue
logger.info("Verifying OSPF6 neighborship on router %s:", router)
- show_ospf_json = run_frr_cmd(rnode,
- "show ipv6 ospf neighbor json", isjson=True)
+ show_ospf_json = run_frr_cmd(
+ rnode, "show ipv6 ospf neighbor json", isjson=True
+ )
# Verifying output dictionary show_ospf_json is empty or not
if not bool(show_ospf_json):
errormsg = "OSPF6 is not running"
return errormsg
ospf_data_list = topo["routers"][router]["ospf6"]
- ospf_neighbors = ospf_data_list['neighbors']
+ ospf_neighbors = ospf_data_list["neighbors"]
total_peer = 0
total_peer = len(ospf_neighbors.keys())
no_of_ospf_nbr = 0
- ospf_nbr_list = ospf_data_list['neighbors']
+ ospf_nbr_list = ospf_data_list["neighbors"]
no_of_peer = 0
for ospf_nbr, nbr_data in ospf_nbr_list.items():
- data_ip = data_rid = topo['routers'][ospf_nbr]['ospf6']['router_id']
+ try:
+ data_ip = data_rid = topo["routers"][ospf_nbr]["ospf6"]["router_id"]
+ except KeyError:
+ data_ip = data_rid = topo["routers"][nbr_data["nbr"]]["ospf6"][
+ "router_id"
+ ]
+
if ospf_nbr in data_ip:
nbr_details = nbr_data[ospf_nbr]
elif lan:
- for switch in topo['switches']:
- if 'ospf6' in topo['switches'][switch]['links'][router]:
+ for switch in topo["switches"]:
+ if "ospf6" in topo["switches"][switch]["links"][router]:
neighbor_ip = data_ip
else:
continue
@@ -933,26 +958,27 @@ def verify_ospf6_neighbor(tgen, topo, dut=None, input_dict=None, lan=False):
nh_state = None
neighbor_ip = neighbor_ip.lower()
nbr_rid = data_rid
- get_index_val = dict((d['neighborId'], dict( \
- d, index=index)) for (index, d) in enumerate( \
- show_ospf_json['neighbors']))
+ get_index_val = dict(
+ (d["neighborId"], dict(d, index=index))
+ for (index, d) in enumerate(show_ospf_json["neighbors"])
+ )
try:
- nh_state = get_index_val.get(neighbor_ip)['state']
- intf_state = get_index_val.get(neighbor_ip)['ifState']
+ nh_state = get_index_val.get(neighbor_ip)["state"]
+ intf_state = get_index_val.get(neighbor_ip)["ifState"]
except TypeError:
- errormsg = "[DUT: {}] OSPF peer {} missing,from "\
- "{} ".format(router,
- nbr_rid, ospf_nbr)
+ errormsg = "[DUT: {}] OSPF peer {} missing,from " "{} ".format(
+ router, nbr_rid, ospf_nbr
+ )
return errormsg
- if nh_state == 'Full':
+ if nh_state == "Full":
no_of_peer += 1
if no_of_peer == total_peer:
logger.info("[DUT: {}] OSPF6 is Converged".format(router))
result = True
else:
- errormsg = ("[DUT: {}] OSPF6 is not Converged".format(router))
+ errormsg = "[DUT: {}] OSPF6 is not Converged".format(router)
return errormsg
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
@@ -1627,31 +1653,34 @@ def verify_ospf6_rib(tgen, dut, input_dict, next_hop=None,
found_routes = []
missing_routes = []
- if "static_routes" in input_dict[routerInput] or \
- "prefix" in input_dict[routerInput]:
+ if (
+ "static_routes" in input_dict[routerInput]
+ or "prefix" in input_dict[routerInput]
+ ):
if "prefix" in input_dict[routerInput]:
static_routes = input_dict[routerInput]["prefix"]
else:
static_routes = input_dict[routerInput]["static_routes"]
-
for static_route in static_routes:
cmd = "{}".format(command)
cmd = "{} json".format(cmd)
- ospf_rib_json = run_frr_cmd(rnode, cmd, isjson=True)
+ ospf_rib_json = run_frr_cmd(rnode, cmd, isjson=True)
# Fix for PR 2644182
try:
- ospf_rib_json = ospf_rib_json['routes']
+ ospf_rib_json = ospf_rib_json["routes"]
except KeyError:
pass
# Verifying output dictionary ospf_rib_json is not empty
if bool(ospf_rib_json) is False:
- errormsg = "[DUT: {}] No routes found in OSPF6 route " \
+ errormsg = (
+ "[DUT: {}] No routes found in OSPF6 route "
"table".format(router)
+ )
return errormsg
network = static_route["network"]
@@ -1659,7 +1688,6 @@ def verify_ospf6_rib(tgen, dut, input_dict, next_hop=None,
_tag = static_route.setdefault("tag", None)
_rtype = static_route.setdefault("routeType", None)
-
# Generating IPs for verification
ip_list = generate_ips(network, no_of_ip)
st_found = False
@@ -1668,7 +1696,7 @@ def verify_ospf6_rib(tgen, dut, input_dict, next_hop=None,
st_rt = str(ipaddress.ip_network(frr_unicode(st_rt)))
_addr_type = validate_ip_address(st_rt)
- if _addr_type != 'ipv6':
+ if _addr_type != "ipv6":
continue
if st_rt in ospf_rib_json:
@@ -1681,17 +1709,26 @@ def verify_ospf6_rib(tgen, dut, input_dict, next_hop=None,
next_hop = [next_hop]
for mnh in range(0, len(ospf_rib_json[st_rt])):
- if 'fib' in ospf_rib_json[st_rt][
- mnh]["nextHops"][0]:
- found_hops.append([rib_r[
- "ip"] for rib_r in ospf_rib_json[
- st_rt][mnh]["nextHops"]])
+ if (
+ "fib"
+ in ospf_rib_json[st_rt][mnh]["nextHops"][0]
+ ):
+ found_hops.append(
+ [
+ rib_r["ip"]
+ for rib_r in ospf_rib_json[st_rt][mnh][
+ "nextHops"
+ ]
+ ]
+ )
if found_hops[0]:
- missing_list_of_nexthops = \
- set(found_hops[0]).difference(next_hop)
- additional_nexthops_in_required_nhs = \
- set(next_hop).difference(found_hops[0])
+ missing_list_of_nexthops = set(
+ found_hops[0]
+ ).difference(next_hop)
+ additional_nexthops_in_required_nhs = set(
+ next_hop
+ ).difference(found_hops[0])
if additional_nexthops_in_required_nhs:
logger.info(
@@ -1699,13 +1736,18 @@ def verify_ospf6_rib(tgen, dut, input_dict, next_hop=None,
"%s is not active for route %s in "
"RIB of router %s\n",
additional_nexthops_in_required_nhs,
- st_rt, dut)
+ st_rt,
+ dut,
+ )
errormsg = (
"Nexthop {} is not active"
" for route {} in RIB of router"
" {}\n".format(
- additional_nexthops_in_required_nhs,
- st_rt, dut))
+ additional_nexthops_in_required_nhs,
+ st_rt,
+ dut,
+ )
+ )
return errormsg
else:
nh_found = True
@@ -1713,98 +1755,118 @@ def verify_ospf6_rib(tgen, dut, input_dict, next_hop=None,
elif next_hop and fib is None:
if type(next_hop) is not list:
next_hop = [next_hop]
- found_hops = [rib_r['nextHop'] for rib_r in
- ospf_rib_json[st_rt][
- "nextHops"]]
+ found_hops = [
+ rib_r["nextHop"]
+ for rib_r in ospf_rib_json[st_rt]["nextHops"]
+ ]
if found_hops:
- missing_list_of_nexthops = \
- set(found_hops).difference(next_hop)
- additional_nexthops_in_required_nhs = \
- set(next_hop).difference(found_hops)
+ missing_list_of_nexthops = set(
+ found_hops
+ ).difference(next_hop)
+ additional_nexthops_in_required_nhs = set(
+ next_hop
+ ).difference(found_hops)
if additional_nexthops_in_required_nhs:
logger.info(
- "Missing nexthop %s for route"\
- " %s in RIB of router %s\n", \
- additional_nexthops_in_required_nhs, \
- st_rt, dut)
- errormsg=("Nexthop {} is Missing for "\
- "route {} in RIB of router {}\n".format(
+ "Missing nexthop %s for route"
+ " %s in RIB of router %s\n",
additional_nexthops_in_required_nhs,
- st_rt, dut))
+ st_rt,
+ dut,
+ )
+ errormsg = (
+ "Nexthop {} is Missing for "
+ "route {} in RIB of router {}\n".format(
+ additional_nexthops_in_required_nhs,
+ st_rt,
+ dut,
+ )
+ )
return errormsg
else:
nh_found = True
if _rtype:
- if "destinationType" not in ospf_rib_json[
- st_rt]:
- errormsg = ("[DUT: {}]: destinationType missing"
- "for route {} in OSPF RIB \n".\
- format(dut, st_rt))
+ if "destinationType" not in ospf_rib_json[st_rt]:
+ errormsg = (
+ "[DUT: {}]: destinationType missing"
+ "for route {} in OSPF RIB \n".format(dut, st_rt)
+ )
return errormsg
- elif _rtype != ospf_rib_json[st_rt][
- "destinationType"]:
- errormsg = ("[DUT: {}]: destinationType mismatch"
- "for route {} in OSPF RIB \n".\
- format(dut, st_rt))
+ elif _rtype != ospf_rib_json[st_rt]["destinationType"]:
+ errormsg = (
+ "[DUT: {}]: destinationType mismatch"
+ "for route {} in OSPF RIB \n".format(dut, st_rt)
+ )
return errormsg
else:
- logger.info("DUT: {}]: Found destinationType {}"
- "for route {}".\
- format(dut, _rtype, st_rt))
+ logger.info(
+ "DUT: {}]: Found destinationType {}"
+ "for route {}".format(dut, _rtype, st_rt)
+ )
if tag:
- if "tag" not in ospf_rib_json[
- st_rt]:
- errormsg = ("[DUT: {}]: tag is not"
- " present for"
- " route {} in RIB \n".\
- format(dut, st_rt
- ))
+ if "tag" not in ospf_rib_json[st_rt]:
+ errormsg = (
+ "[DUT: {}]: tag is not"
+ " present for"
+ " route {} in RIB \n".format(dut, st_rt)
+ )
return errormsg
- if _tag != ospf_rib_json[
- st_rt]["tag"]:
- errormsg = ("[DUT: {}]: tag value {}"
- " is not matched for"
- " route {} in RIB \n".\
- format(dut, _tag, st_rt,
- ))
+ if _tag != ospf_rib_json[st_rt]["tag"]:
+ errormsg = (
+ "[DUT: {}]: tag value {}"
+ " is not matched for"
+ " route {} in RIB \n".format(
+ dut,
+ _tag,
+ st_rt,
+ )
+ )
return errormsg
if metric is not None:
- if "type2cost" not in ospf_rib_json[
- st_rt]:
- errormsg = ("[DUT: {}]: metric is"
- " not present for"
- " route {} in RIB \n".\
- format(dut, st_rt))
+ if "type2cost" not in ospf_rib_json[st_rt]:
+ errormsg = (
+ "[DUT: {}]: metric is"
+ " not present for"
+ " route {} in RIB \n".format(dut, st_rt)
+ )
return errormsg
- if metric != ospf_rib_json[
- st_rt]["type2cost"]:
- errormsg = ("[DUT: {}]: metric value "
- "{} is not matched for "
- "route {} in RIB \n".\
- format(dut, metric, st_rt,
- ))
+ if metric != ospf_rib_json[st_rt]["type2cost"]:
+ errormsg = (
+ "[DUT: {}]: metric value "
+ "{} is not matched for "
+ "route {} in RIB \n".format(
+ dut,
+ metric,
+ st_rt,
+ )
+ )
return errormsg
else:
missing_routes.append(st_rt)
if nh_found:
- logger.info("[DUT: {}]: Found next_hop {} for all OSPF"
- " routes in RIB".format(router, next_hop))
+ logger.info(
+ "[DUT: {}]: Found next_hop {} for all OSPF"
+ " routes in RIB".format(router, next_hop)
+ )
if len(missing_routes) > 0:
- errormsg = ("[DUT: {}]: Missing route in RIB, "
- "routes: {}".\
- format(dut, missing_routes))
+ errormsg = "[DUT: {}]: Missing route in RIB, " "routes: {}".format(
+ dut, missing_routes
+ )
return errormsg
if found_routes:
- logger.info("[DUT: %s]: Verified routes in RIB, found"
- " routes are: %s\n", dut, found_routes)
+ logger.info(
+ "[DUT: %s]: Verified routes in RIB, found" " routes are: %s\n",
+ dut,
+ found_routes,
+ )
result = True
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
@@ -1855,15 +1917,16 @@ def verify_ospf6_interface(tgen, topo, dut=None,lan=False, input_dict=None):
result = False
for router, rnode in tgen.routers().iteritems():
- if 'ospf6' not in topo['routers'][router]:
+ if "ospf6" not in topo["routers"][router]:
continue
if dut is not None and dut != router:
continue
logger.info("Verifying OSPF interface on router %s:", router)
- show_ospf_json = run_frr_cmd(rnode, "show ipv6 ospf interface json",
- isjson=True)
+ show_ospf_json = run_frr_cmd(
+ rnode, "show ipv6 ospf interface json", isjson=True
+ )
# Verifying output dictionary show_ospf_json is empty or not
if not bool(show_ospf_json):
@@ -1873,32 +1936,49 @@ def verify_ospf6_interface(tgen, topo, dut=None,lan=False, input_dict=None):
# To find neighbor ip type
ospf_intf_data = input_dict[router]["links"]
for ospf_intf, intf_data in ospf_intf_data.items():
- intf = topo['routers'][router]['links'][ospf_intf]['interface']
- if intf in show_ospf_json:
- for intf_attribute in intf_data['ospf6']:
- if intf_data['ospf6'][intf_attribute] is not list:
- if intf_data['ospf6'][intf_attribute] == show_ospf_json[
- intf][intf_attribute]:
- logger.info("[DUT: %s] OSPF6 interface %s: %s is %s",
- router, intf, intf_attribute, intf_data['ospf6'][
- intf_attribute])
- elif intf_data['ospf6'][intf_attribute] is list:
+ intf = topo["routers"][router]["links"][ospf_intf]["interface"]
+ if intf in show_ospf_json:
+ for intf_attribute in intf_data["ospf6"]:
+ if intf_data["ospf6"][intf_attribute] is not list:
+ if (
+ intf_data["ospf6"][intf_attribute]
+ == show_ospf_json[intf][intf_attribute]
+ ):
+ logger.info(
+ "[DUT: %s] OSPF6 interface %s: %s is %s",
+ router,
+ intf,
+ intf_attribute,
+ intf_data["ospf6"][intf_attribute],
+ )
+ elif intf_data["ospf6"][intf_attribute] is list:
for addr_list in len(show_ospf_json[intf][intf_attribute]):
- if show_ospf_json[intf][intf_attribute][addr_list][
- 'address'].split('/')[0] == intf_data['ospf6'][
- 'internetAddress'][0]['address']:
- break
+ if (
+ show_ospf_json[intf][intf_attribute][addr_list][
+ "address"
+ ].split("/")[0]
+ == intf_data["ospf6"]["internetAddress"][0]["address"]
+ ):
+ break
else:
- errormsg= "[DUT: {}] OSPF6 interface {}: {} is {}, \
- Expected is {}".format(router, intf, intf_attribute,
- intf_data['ospf6'][intf_attribute], intf_data['ospf6'][
- intf_attribute])
+ errormsg = "[DUT: {}] OSPF6 interface {}: {} is {}, \
+ Expected is {}".format(
+ router,
+ intf,
+ intf_attribute,
+ intf_data["ospf6"][intf_attribute],
+ intf_data["ospf6"][intf_attribute],
+ )
return errormsg
else:
- errormsg= "[DUT: {}] OSPF6 interface {}: {} is {}, \
- Expected is {}".format(router, intf, intf_attribute,
- intf_data['ospf6'][intf_attribute], intf_data['ospf6'][
- intf_attribute])
+ errormsg = "[DUT: {}] OSPF6 interface {}: {} is {}, \
+ Expected is {}".format(
+ router,
+ intf,
+ intf_attribute,
+ intf_data["ospf6"][intf_attribute],
+ intf_data["ospf6"][intf_attribute],
+ )
return errormsg
result = True
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
@@ -1956,16 +2036,14 @@ def verify_ospf6_database(tgen, topo, dut, input_dict):
router = dut
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
- if 'ospf' not in topo['routers'][dut]:
- errormsg = "[DUT: {}] OSPF is not configured on the router.".format(
- dut)
+ if "ospf" not in topo["routers"][dut]:
+ errormsg = "[DUT: {}] OSPF is not configured on the router.".format(dut)
return errormsg
rnode = tgen.routers()[dut]
logger.info("Verifying OSPF interface on router %s:", dut)
- show_ospf_json = run_frr_cmd(rnode, "show ip ospf database json",
- isjson=True)
+ show_ospf_json = run_frr_cmd(rnode, "show ip ospf database json", isjson=True)
# Verifying output dictionary show_ospf_json is empty or not
if not bool(show_ospf_json):
errormsg = "OSPF is not running"
@@ -1973,167 +2051,209 @@ def verify_ospf6_database(tgen, topo, dut, input_dict):
# for inter and inter lsa's
ospf_db_data = input_dict.setdefault("areas", None)
- ospf_external_lsa = input_dict.setdefault(
- 'asExternalLinkStates', None)
+ ospf_external_lsa = input_dict.setdefault("asExternalLinkStates", None)
if ospf_db_data:
- for ospf_area, area_lsa in ospf_db_data.items():
- if ospf_area in show_ospf_json['areas']:
- if 'routerLinkStates' in area_lsa:
- for lsa in area_lsa['routerLinkStates']:
- for rtrlsa in show_ospf_json['areas'][ospf_area][
- 'routerLinkStates']:
- if lsa['lsaId'] == rtrlsa['lsaId'] and \
- lsa['advertisedRouter'] == rtrlsa[
- 'advertisedRouter']:
- result = True
- break
- if result:
- logger.info(
- "[DUT: %s] OSPF LSDB area %s:Router "
- "LSA %s", router, ospf_area, lsa)
+ for ospf_area, area_lsa in ospf_db_data.items():
+ if ospf_area in show_ospf_json["areas"]:
+ if "routerLinkStates" in area_lsa:
+ for lsa in area_lsa["routerLinkStates"]:
+ for rtrlsa in show_ospf_json["areas"][ospf_area][
+ "routerLinkStates"
+ ]:
+ if (
+ lsa["lsaId"] == rtrlsa["lsaId"]
+ and lsa["advertisedRouter"]
+ == rtrlsa["advertisedRouter"]
+ ):
+ result = True
break
- else:
- errormsg = \
- "[DUT: {}] OSPF LSDB area {}: expected" \
+ if result:
+ logger.info(
+ "[DUT: %s] OSPF LSDB area %s:Router " "LSA %s",
+ router,
+ ospf_area,
+ lsa,
+ )
+ break
+ else:
+ errormsg = (
+ "[DUT: {}] OSPF LSDB area {}: expected"
" Router LSA is {}".format(router, ospf_area, lsa)
- return errormsg
+ )
+ return errormsg
- if 'networkLinkStates' in area_lsa:
- for lsa in area_lsa['networkLinkStates']:
- for netlsa in show_ospf_json['areas'][ospf_area][
- 'networkLinkStates']:
- if lsa in show_ospf_json['areas'][ospf_area][
- 'networkLinkStates']:
- if lsa['lsaId'] == netlsa['lsaId'] and \
- lsa['advertisedRouter'] == netlsa[
- 'advertisedRouter']:
- result = True
- break
- if result:
- logger.info(
- "[DUT: %s] OSPF LSDB area %s:Network "
- "LSA %s", router, ospf_area, lsa)
- break
- else:
- errormsg = \
- "[DUT: {}] OSPF LSDB area {}: expected" \
+ if "networkLinkStates" in area_lsa:
+ for lsa in area_lsa["networkLinkStates"]:
+ for netlsa in show_ospf_json["areas"][ospf_area][
+ "networkLinkStates"
+ ]:
+ if (
+ lsa
+ in show_ospf_json["areas"][ospf_area][
+ "networkLinkStates"
+ ]
+ ):
+ if (
+ lsa["lsaId"] == netlsa["lsaId"]
+ and lsa["advertisedRouter"]
+ == netlsa["advertisedRouter"]
+ ):
+ result = True
+ break
+ if result:
+ logger.info(
+ "[DUT: %s] OSPF LSDB area %s:Network " "LSA %s",
+ router,
+ ospf_area,
+ lsa,
+ )
+ break
+ else:
+ errormsg = (
+ "[DUT: {}] OSPF LSDB area {}: expected"
" Network LSA is {}".format(router, ospf_area, lsa)
- return errormsg
+ )
+ return errormsg
- if 'summaryLinkStates' in area_lsa:
- for lsa in area_lsa['summaryLinkStates']:
- for t3lsa in show_ospf_json['areas'][ospf_area][
- 'summaryLinkStates']:
- if lsa['lsaId'] == t3lsa['lsaId'] and \
- lsa['advertisedRouter'] == t3lsa[
- 'advertisedRouter']:
- result = True
- break
- if result:
- logger.info(
- "[DUT: %s] OSPF LSDB area %s:Summary "
- "LSA %s", router, ospf_area, lsa)
+ if "summaryLinkStates" in area_lsa:
+ for lsa in area_lsa["summaryLinkStates"]:
+ for t3lsa in show_ospf_json["areas"][ospf_area][
+ "summaryLinkStates"
+ ]:
+ if (
+ lsa["lsaId"] == t3lsa["lsaId"]
+ and lsa["advertisedRouter"] == t3lsa["advertisedRouter"]
+ ):
+ result = True
break
- else:
- errormsg = \
- "[DUT: {}] OSPF LSDB area {}: expected" \
+ if result:
+ logger.info(
+ "[DUT: %s] OSPF LSDB area %s:Summary " "LSA %s",
+ router,
+ ospf_area,
+ lsa,
+ )
+ break
+ else:
+ errormsg = (
+ "[DUT: {}] OSPF LSDB area {}: expected"
" Summary LSA is {}".format(router, ospf_area, lsa)
- return errormsg
+ )
+ return errormsg
- if 'nssaExternalLinkStates' in area_lsa:
- for lsa in area_lsa['nssaExternalLinkStates']:
- for t7lsa in show_ospf_json['areas'][ospf_area][
- 'nssaExternalLinkStates']:
- if lsa['lsaId'] == t7lsa['lsaId'] and \
- lsa['advertisedRouter'] == t7lsa[
- 'advertisedRouter']:
- result = True
- break
- if result:
- logger.info(
- "[DUT: %s] OSPF LSDB area %s:Type7 "
- "LSA %s", router, ospf_area, lsa)
+ if "nssaExternalLinkStates" in area_lsa:
+ for lsa in area_lsa["nssaExternalLinkStates"]:
+ for t7lsa in show_ospf_json["areas"][ospf_area][
+ "nssaExternalLinkStates"
+ ]:
+ if (
+ lsa["lsaId"] == t7lsa["lsaId"]
+ and lsa["advertisedRouter"] == t7lsa["advertisedRouter"]
+ ):
+ result = True
break
- else:
- errormsg = \
- "[DUT: {}] OSPF LSDB area {}: expected" \
+ if result:
+ logger.info(
+ "[DUT: %s] OSPF LSDB area %s:Type7 " "LSA %s",
+ router,
+ ospf_area,
+ lsa,
+ )
+ break
+ else:
+ errormsg = (
+ "[DUT: {}] OSPF LSDB area {}: expected"
" Type7 LSA is {}".format(router, ospf_area, lsa)
- return errormsg
+ )
+ return errormsg
- if 'asbrSummaryLinkStates' in area_lsa:
- for lsa in area_lsa['asbrSummaryLinkStates']:
- for t4lsa in show_ospf_json['areas'][ospf_area][
- 'asbrSummaryLinkStates']:
- if lsa['lsaId'] == t4lsa['lsaId'] and \
- lsa['advertisedRouter'] == t4lsa[
- 'advertisedRouter']:
- result = True
- break
- if result:
- logger.info(
- "[DUT: %s] OSPF LSDB area %s:ASBR Summary "
- "LSA %s", router, ospf_area, lsa)
+ if "asbrSummaryLinkStates" in area_lsa:
+ for lsa in area_lsa["asbrSummaryLinkStates"]:
+ for t4lsa in show_ospf_json["areas"][ospf_area][
+ "asbrSummaryLinkStates"
+ ]:
+ if (
+ lsa["lsaId"] == t4lsa["lsaId"]
+ and lsa["advertisedRouter"] == t4lsa["advertisedRouter"]
+ ):
result = True
- else:
- errormsg = \
- "[DUT: {}] OSPF LSDB area {}: expected" \
- " ASBR Summary LSA is {}".format(
- router, ospf_area, lsa)
- return errormsg
+ break
+ if result:
+ logger.info(
+ "[DUT: %s] OSPF LSDB area %s:ASBR Summary " "LSA %s",
+ router,
+ ospf_area,
+ lsa,
+ )
+ result = True
+ else:
+ errormsg = (
+ "[DUT: {}] OSPF LSDB area {}: expected"
+ " ASBR Summary LSA is {}".format(router, ospf_area, lsa)
+ )
+ return errormsg
- if 'linkLocalOpaqueLsa' in area_lsa:
- for lsa in area_lsa['linkLocalOpaqueLsa']:
- try:
- for lnklsa in show_ospf_json['areas'][ospf_area][
- 'linkLocalOpaqueLsa']:
- if lsa['lsaId'] in lnklsa['lsaId'] and \
- 'linkLocalOpaqueLsa' in show_ospf_json[
- 'areas'][ospf_area]:
- logger.info((
- "[DUT: FRR] OSPF LSDB area %s:Opaque-LSA"
- "%s", ospf_area, lsa))
- result = True
- else:
- errormsg = ("[DUT: FRR] OSPF LSDB area: {} "
- "expected Opaque-LSA is {}, Found is {}".format(
- ospf_area, lsa, show_ospf_json))
- raise ValueError (errormsg)
- return errormsg
- except KeyError:
- errormsg = ("[DUT: FRR] linkLocalOpaqueLsa Not "
- "present")
- return errormsg
+ if "linkLocalOpaqueLsa" in area_lsa:
+ for lsa in area_lsa["linkLocalOpaqueLsa"]:
+ try:
+ for lnklsa in show_ospf_json["areas"][ospf_area][
+ "linkLocalOpaqueLsa"
+ ]:
+ if (
+ lsa["lsaId"] in lnklsa["lsaId"]
+ and "linkLocalOpaqueLsa"
+ in show_ospf_json["areas"][ospf_area]
+ ):
+ logger.info(
+ (
+ "[DUT: FRR] OSPF LSDB area %s:Opaque-LSA"
+ "%s",
+ ospf_area,
+ lsa,
+ )
+ )
+ result = True
+ else:
+ errormsg = (
+ "[DUT: FRR] OSPF LSDB area: {} "
+ "expected Opaque-LSA is {}, Found is {}".format(
+ ospf_area, lsa, show_ospf_json
+ )
+ )
+ raise ValueError(errormsg)
+ return errormsg
+ except KeyError:
+ errormsg = "[DUT: FRR] linkLocalOpaqueLsa Not " "present"
+ return errormsg
if ospf_external_lsa:
- for lsa in ospf_external_lsa:
- try:
- for t5lsa in show_ospf_json['asExternalLinkStates']:
- if lsa['lsaId'] == t5lsa['lsaId'] and \
- lsa['advertisedRouter'] == t5lsa[
- 'advertisedRouter']:
- result = True
- break
- except KeyError:
- result = False
- if result:
- logger.info(
- "[DUT: %s] OSPF LSDB:External LSA %s",
- router, lsa)
- result = True
- else:
- errormsg = \
- "[DUT: {}] OSPF LSDB : expected" \
- " External LSA is {}".format(router, lsa)
- return errormsg
+ for lsa in ospf_external_lsa:
+ try:
+ for t5lsa in show_ospf_json["asExternalLinkStates"]:
+ if (
+ lsa["lsaId"] == t5lsa["lsaId"]
+ and lsa["advertisedRouter"] == t5lsa["advertisedRouter"]
+ ):
+ result = True
+ break
+ except KeyError:
+ result = False
+ if result:
+ logger.info("[DUT: %s] OSPF LSDB:External LSA %s", router, lsa)
+ result = True
+ else:
+ errormsg = (
+ "[DUT: {}] OSPF LSDB : expected"
+ " External LSA is {}".format(router, lsa)
+ )
+ return errormsg
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return result
-
-def config_ospf6_interface (tgen, topo, input_dict=None, build=False,
- load_config=True):
+def config_ospf6_interface(tgen, topo, input_dict=None, build=False, load_config=True):
"""
API to configure ospf on router.
@@ -2180,17 +2300,17 @@ def config_ospf6_interface (tgen, topo, input_dict=None, build=False,
"input_dict, passed input_dict %s", router,
str(input_dict))
continue
- ospf_data = input_dict[router]['links'][lnk]['ospf6']
+ ospf_data = input_dict[router]["links"][lnk]["ospf6"]
data_ospf_area = ospf_data.setdefault("area", None)
- data_ospf_auth = ospf_data.setdefault("authentication", None)
+ data_ospf_auth = ospf_data.setdefault("hash-algo", None)
data_ospf_dr_priority = ospf_data.setdefault("priority", None)
data_ospf_cost = ospf_data.setdefault("cost", None)
data_ospf_mtu = ospf_data.setdefault("mtu_ignore", None)
try:
- intf = topo['routers'][router]['links'][lnk]['interface']
+ intf = topo["routers"][router]["links"][lnk]["interface"]
except KeyError:
- intf = topo['switches'][router]['links'][lnk]['interface']
+ intf = topo["switches"][router]["links"][lnk]["interface"]
# interface
cmd = "interface {}".format(intf)
@@ -2201,34 +2321,50 @@ def config_ospf6_interface (tgen, topo, input_dict=None, build=False,
cmd = "ipv6 ospf area {}".format(data_ospf_area)
config_data.append(cmd)
+ # interface ospf auth
+ if data_ospf_auth:
+ cmd = "ipv6 ospf6 authentication"
+
+ if "del_action" in ospf_data:
+ cmd = "no {}".format(cmd)
+
+ if "hash-algo" in ospf_data:
+ cmd = "{} key-id {} hash-algo {} key {}".format(
+ cmd,
+ ospf_data["key-id"],
+ ospf_data["hash-algo"],
+ ospf_data["key"],
+ )
+ if "del_action" in ospf_data:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
# interface ospf dr priority
if data_ospf_dr_priority:
- cmd = "ipv6 ospf priority {}".format(
- ospf_data["priority"])
- if 'del_action' in ospf_data:
+ cmd = "ipv6 ospf priority {}".format(ospf_data["priority"])
+ if "del_action" in ospf_data:
cmd = "no {}".format(cmd)
config_data.append(cmd)
# interface ospf cost
if data_ospf_cost:
- cmd = "ipv6 ospf cost {}".format(
- ospf_data["cost"])
- if 'del_action' in ospf_data:
+ cmd = "ipv6 ospf cost {}".format(ospf_data["cost"])
+ if "del_action" in ospf_data:
cmd = "no {}".format(cmd)
config_data.append(cmd)
# interface ospf mtu
if data_ospf_mtu:
cmd = "ipv6 ospf mtu-ignore"
- if 'del_action' in ospf_data:
+ if "del_action" in ospf_data:
cmd = "no {}".format(cmd)
config_data.append(cmd)
if build:
return config_data
else:
- result = create_common_configuration(tgen, router, config_data,
- "interface_config",
- build=build)
+ result = create_common_configuration(
+ tgen, router, config_data, "interface_config", build=build
+ )
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return result
diff --git a/tests/topotests/lib/topogen.py b/tests/topotests/lib/topogen.py
index ade5933504..b998878118 100644
--- a/tests/topotests/lib/topogen.py
+++ b/tests/topotests/lib/topogen.py
@@ -801,8 +801,8 @@ class TopoRouter(TopoGear):
try:
return json.loads(output)
- except ValueError:
- logger.warning("vtysh_cmd: failed to convert json output")
+ except ValueError as error:
+ logger.warning("vtysh_cmd: %s: failed to convert json output: %s: %s", self.name, str(output), str(error))
return {}
def vtysh_multicmd(self, commands, pretty_output=True, daemon=None):
diff --git a/tests/topotests/lib/topotest.py b/tests/topotests/lib/topotest.py
index d1f60bfe0d..b516a67d5c 100644
--- a/tests/topotests/lib/topotest.py
+++ b/tests/topotests/lib/topotest.py
@@ -1152,6 +1152,18 @@ class Router(Node):
self.reportCores = True
self.version = None
+ self.ns_cmd = "sudo nsenter -m -n -t {} ".format(self.pid)
+ try:
+ # Allow escaping from running inside docker
+ cgroup = open("/proc/1/cgroup").read()
+ m = re.search("[0-9]+:cpuset:/docker/([a-f0-9]+)", cgroup)
+ if m:
+ self.ns_cmd = "docker exec -it {} ".format(m.group(1)) + self.ns_cmd
+ except IOError:
+ pass
+ else:
+ logger.debug("CMD to enter {}: {}".format(self.name, self.ns_cmd))
+
def _config_frr(self, **params):
"Configure FRR binaries"
self.daemondir = params.get("frrdir")
@@ -1223,25 +1235,28 @@ class Router(Node):
dmns = rundaemons.split("\n")
# Exclude empty string at end of list
for d in dmns[:-1]:
- daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip()
- if daemonpid.isdigit() and pid_exists(int(daemonpid)):
- daemonname = os.path.basename(d.rstrip().rsplit(".", 1)[0])
- logger.info("{}: stopping {}".format(self.name, daemonname))
- try:
- os.kill(int(daemonpid), signal.SIGTERM)
- except OSError as err:
- if err.errno == errno.ESRCH:
- logger.error(
- "{}: {} left a dead pidfile (pid={})".format(
- self.name, daemonname, daemonpid
+ # Only check if daemonfilepath starts with /
+ # Avoids hang on "-> Connection closed" in above self.cmd()
+ if d[0] == '/':
+ daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip()
+ if daemonpid.isdigit() and pid_exists(int(daemonpid)):
+ daemonname = os.path.basename(d.rstrip().rsplit(".", 1)[0])
+ logger.info("{}: stopping {}".format(self.name, daemonname))
+ try:
+ os.kill(int(daemonpid), signal.SIGTERM)
+ except OSError as err:
+ if err.errno == errno.ESRCH:
+ logger.error(
+ "{}: {} left a dead pidfile (pid={})".format(
+ self.name, daemonname, daemonpid
+ )
)
- )
- else:
- logger.info(
- "{}: {} could not kill pid {}: {}".format(
- self.name, daemonname, daemonpid, str(err)
+ else:
+ logger.info(
+ "{}: {} could not kill pid {}: {}".format(
+ self.name, daemonname, daemonpid, str(err)
+ )
)
- )
if not wait:
return errors
@@ -1350,7 +1365,7 @@ class Router(Node):
term = topo_terminal if topo_terminal else "xterm"
makeTerm(self, title=title if title else cmd, term=term, cmd=cmd)
else:
- nscmd = "sudo nsenter -m -n -t {} {}".format(self.pid, cmd)
+ nscmd = self.ns_cmd + cmd
if "TMUX" in os.environ:
self.cmd("tmux select-layout main-horizontal")
wcmd = "tmux split-window -h"
@@ -1451,11 +1466,13 @@ class Router(Node):
def startRouterDaemons(self, daemons=None):
"Starts all FRR daemons for this router."
+ asan_abort = g_extra_config["asan_abort"]
gdb_breakpoints = g_extra_config["gdb_breakpoints"]
gdb_daemons = g_extra_config["gdb_daemons"]
gdb_routers = g_extra_config["gdb_routers"]
valgrind_extra = g_extra_config["valgrind_extra"]
valgrind_memleaks = g_extra_config["valgrind_memleaks"]
+ strace_daemons = g_extra_config["strace_daemons"]
bundle_data = ""
@@ -1482,7 +1499,6 @@ class Router(Node):
os.path.join(self.daemondir, "bgpd") + " -v"
).split()[2]
logger.info("{}: running version: {}".format(self.name, self.version))
-
# If `daemons` was specified then some upper API called us with
# specific daemons, otherwise just use our own configuration.
daemons_list = []
@@ -1506,13 +1522,20 @@ class Router(Node):
else:
binary = os.path.join(self.daemondir, daemon)
- cmdenv = "ASAN_OPTIONS=log_path={0}.asan".format(daemon)
+ cmdenv = "ASAN_OPTIONS="
+ if asan_abort:
+ cmdenv = "abort_on_error=1:"
+ cmdenv += "log_path={0}/{1}.{2}.asan ".format(self.logdir, self.name, daemon)
+
if valgrind_memleaks:
this_dir = os.path.dirname(os.path.abspath(os.path.realpath(__file__)))
supp_file = os.path.abspath(os.path.join(this_dir, "../../../tools/valgrind.supp"))
cmdenv += " /usr/bin/valgrind --num-callers=50 --log-file={1}/{2}.valgrind.{0}.%p --leak-check=full --suppressions={3}".format(daemon, self.logdir, self.name, supp_file)
if valgrind_extra:
cmdenv += "--gen-suppressions=all --expensive-definedness-checks=yes"
+ elif daemon in strace_daemons or "all" in strace_daemons:
+ cmdenv = "strace -f -D -o {1}/{2}.strace.{0} ".format(daemon, self.logdir, self.name)
+
cmdopt = "{} --log file:{}.log --log-level debug".format(
daemon_opts, daemon
)