diff options
Diffstat (limited to 'tests/topotests/lib')
| -rw-r--r-- | tests/topotests/lib/bgp.py | 109 | ||||
| -rw-r--r-- | tests/topotests/lib/common_config.py | 750 | ||||
| -rw-r--r-- | tests/topotests/lib/lutil.py | 4 | ||||
| -rw-r--r-- | tests/topotests/lib/ospf.py | 247 | ||||
| -rw-r--r-- | tests/topotests/lib/pim.py | 3450 | ||||
| -rwxr-xr-x | tests/topotests/lib/send_bsr_packet.py | 58 | ||||
| -rw-r--r-- | tests/topotests/lib/snmptest.py | 152 | ||||
| -rwxr-xr-x | tests/topotests/lib/test/test_json.py | 229 | ||||
| -rw-r--r-- | tests/topotests/lib/topogen.py | 10 | ||||
| -rw-r--r-- | tests/topotests/lib/topojson.py | 38 | ||||
| -rw-r--r-- | tests/topotests/lib/topotest.py | 95 |
11 files changed, 4828 insertions, 314 deletions
diff --git a/tests/topotests/lib/bgp.py b/tests/topotests/lib/bgp.py index ddeaf55b33..22602cb460 100644 --- a/tests/topotests/lib/bgp.py +++ b/tests/topotests/lib/bgp.py @@ -21,6 +21,7 @@ from copy import deepcopy from time import sleep import traceback +import ipaddr import ipaddress import os import sys @@ -691,8 +692,8 @@ def __create_bgp_neighbor(topo, input_dict, router, addr_type, add_neigh=True): config_data.append("{} activate".format(neigh_cxt)) disable_connected = peer.setdefault("disable_connected_check", False) - keep_alive = peer.setdefault("keepalivetimer", 60) - hold_down = peer.setdefault("holddowntimer", 180) + keep_alive = peer.setdefault("keepalivetimer", 3) + hold_down = peer.setdefault("holddowntimer", 10) password = peer.setdefault("password", None) no_password = peer.setdefault("no_password", None) max_hop_limit = peer.setdefault("ebgp_multihop", 1) @@ -1615,8 +1616,6 @@ def clear_bgp(tgen, addr_type, router, vrf=None): else: run_frr_cmd(rnode, "clear bgp *") - sleep(5) - logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) @@ -2115,8 +2114,8 @@ def verify_bgp_attributes( errormsg(str) or True """ - logger.debug("Entering lib API: verify_bgp_attributes()") - for router, rnode in tgen.routers().items(): + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + for router, rnode in tgen.routers().iteritems(): if router != dut: continue @@ -2129,7 +2128,9 @@ def verify_bgp_attributes( dict_to_test = [] tmp_list = [] - if "route_maps" in input_dict.values()[0]: + dict_list = list(input_dict.values())[0] + + if "route_maps" in dict_list: for rmap_router in input_dict.keys(): for rmap, values in input_dict[rmap_router]["route_maps"].items(): if rmap == rmap_name: @@ -2194,7 +2195,7 @@ def verify_bgp_attributes( ) return errormsg - logger.debug("Exiting lib API: verify_bgp_attributes()") + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) return True @@ -2514,8 +2515,9 @@ def verify_best_path_as_per_admin_distance( return True -@retry(attempts=6, wait=2, return_is_str=True) -def verify_bgp_rib(tgen, addr_type, dut, input_dict, next_hop=None, aspath=None): +@retry(attempts=5, wait=2, return_is_str=True, initial_wait=2) +def verify_bgp_rib(tgen, addr_type, dut, input_dict, next_hop=None, +aspath=None, multi_nh=None): """ This API is to verify whether bgp rib has any matching route for a nexthop. @@ -2550,6 +2552,7 @@ def verify_bgp_rib(tgen, addr_type, dut, input_dict, next_hop=None, aspath=None) additional_nexthops_in_required_nhs = [] list1 = [] list2 = [] + found_hops = [] for routerInput in input_dict.keys(): for router, rnode in router_list.items(): if router != dut: @@ -2616,44 +2619,73 @@ def verify_bgp_rib(tgen, addr_type, dut, input_dict, next_hop=None, aspath=None) st_found = True found_routes.append(st_rt) - if next_hop: + if next_hop and multi_nh and st_found: if not isinstance(next_hop, list): next_hop = [next_hop] list1 = next_hop - found_hops = [ - rib_r["ip"] - for rib_r in rib_routes_json["routes"][st_rt][0][ - "nexthops" - ] - ] - list2 = found_hops - - missing_list_of_nexthops = set(list2).difference(list1) - additional_nexthops_in_required_nhs = set( - list1 - ).difference(list2) + for mnh in range( + 0, len(rib_routes_json["routes"][st_rt]) + ): + found_hops.append( + [ + rib_r["ip"] + for rib_r in rib_routes_json["routes"][ + st_rt + ][mnh]["nexthops"] + ] + ) + for mnh in found_hops: + for each_nh_in_multipath in mnh: + list2.append(each_nh_in_multipath) + if found_hops[0]: + missing_list_of_nexthops = set(list2).difference( + list1 + ) + additional_nexthops_in_required_nhs = set( + list1 + ).difference(list2) - if list2: - if additional_nexthops_in_required_nhs: - logger.info( - "Missing nexthop %s for route" - " %s in RIB of router %s\n", - additional_nexthops_in_required_nhs, - st_rt, - dut, - ) - errormsg = ( - "Nexthop {} is Missing for " - "route {} in RIB of router {}\n".format( + if list2: + if additional_nexthops_in_required_nhs: + logger.info( + "Missing nexthop %s for route" + " %s in RIB of router %s\n", additional_nexthops_in_required_nhs, st_rt, dut, ) - ) return errormsg else: nh_found = True + + elif next_hop and multi_nh is None: + if not isinstance(next_hop, list): + next_hop = [next_hop] + list1 = next_hop + found_hops = [rib_r["ip"] for rib_r in + rib_routes_json["routes"][ + st_rt][0]["nexthops"]] + list2 = found_hops + missing_list_of_nexthops = \ + set(list2).difference(list1) + additional_nexthops_in_required_nhs = \ + set(list1).difference(list2) + + if list2: + if additional_nexthops_in_required_nhs: + logger.info("Missing nexthop %s for route"\ + " %s in RIB of router %s\n", \ + additional_nexthops_in_required_nhs, \ + st_rt, dut) + errormsg=("Nexthop {} is Missing for "\ + "route {} in RIB of router {}\n".format( + additional_nexthops_in_required_nhs, + st_rt, dut)) + return errormsg + else: + nh_found = True + if aspath: found_paths = rib_routes_json["routes"][st_rt][0][ "path" @@ -3676,7 +3708,6 @@ def verify_attributes_for_evpn_routes( """ API to verify rd and rt value using "sh bgp l2vpn evpn 10.1.1.1" command. - Parameters ---------- * `tgen`: topogen object @@ -3690,7 +3721,6 @@ def verify_attributes_for_evpn_routes( * `ipLen` : IP prefix length * `rd_peer` : Peer name from which RD will be auto-generated * `rt_peer` : Peer name from which RT will be auto-generated - Usage ----- input_dict_1 = { @@ -4067,7 +4097,6 @@ def verify_evpn_routes( """ API to verify evpn routes using "sh bgp l2vpn evpn" command. - Parameters ---------- * `tgen`: topogen object @@ -4078,7 +4107,6 @@ def verify_evpn_routes( * `route_type` : Route type 5 is supported as of now * `EthTag` : Ethernet tag, by-default is 0 * `next_hop` : Prefered nexthop for the evpn routes - Usage ----- input_dict_1 = { @@ -4091,7 +4119,6 @@ def verify_evpn_routes( } } result = verify_evpn_routes(tgen, topo, input_dict) - Returns ------- errormsg(str) or True diff --git a/tests/topotests/lib/common_config.py b/tests/topotests/lib/common_config.py index 6c24b6ddbb..3f360ef40a 100644 --- a/tests/topotests/lib/common_config.py +++ b/tests/topotests/lib/common_config.py @@ -31,7 +31,6 @@ from re import search as re_search from tempfile import mkdtemp import os -import io import sys import traceback import socket @@ -39,6 +38,7 @@ import ipaddress import platform if sys.version_info[0] > 2: + import io import configparser else: import StringIO @@ -151,8 +151,8 @@ class InvalidCLIError(Exception): def run_frr_cmd(rnode, cmd, isjson=False): """ - Execute frr show commands in priviledged mode - * `rnode`: router node on which commands needs to executed + Execute frr show commands in privileged mode + * `rnode`: router node on which command needs to be executed * `cmd`: Command to be executed on frr * `isjson`: If command is to get json data or not :return str: @@ -184,11 +184,11 @@ def apply_raw_config(tgen, input_dict): """ API to configure raw configuration on device. This can be used for any cli - which does has not been implemented in JSON. + which has not been implemented in JSON. Parameters ---------- - * `tgen`: tgen onject + * `tgen`: tgen object * `input_dict`: configuration that needs to be applied Usage @@ -232,8 +232,8 @@ def create_common_configuration( frr_json.conf and load to router Parameters ---------- - * `tgen`: tgen onject - * `data`: Congiguration data saved in a list. + * `tgen`: tgen object + * `data`: Configuration data saved in a list. * `router` : router id to be configured. * `config_type` : Syntactic information while writing configuration. Should be one of the value as mentioned in the config_map below. @@ -257,6 +257,8 @@ def create_common_configuration( "bgp": "! BGP Config\n", "vrf": "! VRF Config\n", "ospf": "! OSPF Config\n", + "ospf6": "! OSPF Config\n", + "pim": "! PIM Config\n", } ) @@ -292,8 +294,8 @@ def create_common_configuration( def kill_router_daemons(tgen, router, daemons): """ - Router's current config would be saved to /etc/frr/ for each deamon - and deamon would be killed forcefully using SIGKILL. + Router's current config would be saved to /etc/frr/ for each daemon + and daemon would be killed forcefully using SIGKILL. * `tgen` : topogen object * `router`: Device under test * `daemons`: list of daemons to be killed @@ -389,6 +391,8 @@ def check_router_status(tgen): daemons.append("bgpd") if "zebra" in result: daemons.append("zebra") + if "pimd" in result: + daemons.append("pimd") rnode.startDaemons(daemons) @@ -593,13 +597,13 @@ def load_config_to_router(tgen, routerName, save_bkup=False): def get_frr_ipv6_linklocal(tgen, router, intf=None, vrf=None): """ - API to get the link local ipv6 address of a perticular interface using + API to get the link local ipv6 address of a particular interface using FRR command 'show interface' - * `tgen`: tgen onject - * `router` : router for which hightest interface should be + * `tgen`: tgen object + * `router` : router for which highest interface should be calculated - * `intf` : interface for which linklocal address needs to be taken + * `intf` : interface for which link-local address needs to be taken * `vrf` : VRF name Usage @@ -688,7 +692,7 @@ def generate_support_bundle(): def start_topology(tgen, daemon=None): """ Starting topology, create tmp files which are loaded to routers - to start deamons and then start routers + to start daemons and then start routers * `tgen` : topogen object """ @@ -696,7 +700,7 @@ def start_topology(tgen, daemon=None): # Starting topology tgen.start_topology() - # Starting deamons + # Starting daemons router_list = tgen.routers() ROUTER_LIST = sorted( @@ -735,28 +739,41 @@ def start_topology(tgen, daemon=None): except IOError as err: logger.error("I/O error({0}): {1}".format(err.errno, err.strerror)) - # Loading empty zebra.conf file to router, to start the zebra deamon + # Loading empty zebra.conf file to router, to start the zebra daemon router.load_config( TopoRouter.RD_ZEBRA, "{}/{}/zebra.conf".format(TMPDIR, rname) ) - # Loading empty bgpd.conf file to router, to start the bgp deamon + # Loading empty bgpd.conf file to router, to start the bgp daemon router.load_config(TopoRouter.RD_BGP, "{}/{}/bgpd.conf".format(TMPDIR, rname)) if daemon and "ospfd" in daemon: - # Loading empty ospf.conf file to router, to start the bgp deamon + # Loading empty ospf.conf file to router, to start the bgp daemon router.load_config( TopoRouter.RD_OSPF, "{}/{}/ospfd.conf".format(TMPDIR, rname) ) - # Starting routers + + if daemon and "ospf6d" in daemon: + # Loading empty ospf.conf file to router, to start the bgp daemon + router.load_config( + TopoRouter.RD_OSPF6, "{}/{}/ospf6d.conf".format(TMPDIR, rname) + ) + + if daemon and "pimd" in daemon: + # Loading empty pimd.conf file to router, to start the pim deamon + router.load_config( + TopoRouter.RD_PIM, "{}/{}/pimd.conf".format(TMPDIR, rname) + ) + + # Starting routers logger.info("Starting all routers once topology is created") tgen.start_router() def stop_router(tgen, router): """ - Router"s current config would be saved to /etc/frr/ for each deamon - and router and its deamons would be stopped. + Router"s current config would be saved to /tmp/topotest/<suite>/<router> for each daemon + and router and its daemons would be stopped. * `tgen` : topogen object * `router`: Device under test @@ -774,8 +791,8 @@ def stop_router(tgen, router): def start_router(tgen, router): """ - Router will started and config would be loaded from /etc/frr/ for each - deamon + Router will be started and config would be loaded from /tmp/topotest/<suite>/<router> for each + daemon * `tgen` : topogen object * `router`: Device under test @@ -786,8 +803,8 @@ def start_router(tgen, router): try: router_list = tgen.routers() - # Router and its deamons would be started and config would - # be loaded to router for each deamon from /etc/frr + # Router and its daemons would be started and config would + # be loaded to router for each daemon from /etc/frr router_list[router].start() # Waiting for router to come up @@ -835,9 +852,204 @@ def topo_daemons(tgen, topo): if "ospf" in topo["routers"][rtr] and "ospfd" not in daemon_list: daemon_list.append("ospfd") + if "ospf6" in topo["routers"][rtr] and "ospf6d" not in daemon_list: + daemon_list.append("ospf6d") + + for val in topo["routers"][rtr]["links"].values(): + if "pim" in val and "pimd" not in daemon_list: + daemon_list.append("pimd") + break + return daemon_list +def add_interfaces_to_vlan(tgen, input_dict): + """ + Add interfaces to VLAN, we need vlan pakcage to be installed on machine + + * `tgen`: tgen onject + * `input_dict` : interfaces to be added to vlans + + input_dict= { + "r1":{ + "vlan":{ + VLAN_1: [{ + intf_r1_s1: { + "ip": "10.1.1.1", + "subnet": "255.255.255.0 + } + }] + } + } + } + + add_interfaces_to_vlan(tgen, input_dict) + + """ + + router_list = tgen.routers() + for dut in input_dict.keys(): + rnode = tgen.routers()[dut] + + if "vlan" in input_dict[dut]: + for vlan, interfaces in input_dict[dut]["vlan"].items(): + for intf_dict in interfaces: + for interface, data in intf_dict.items(): + # Adding interface to VLAN + cmd = "vconfig add {} {}".format(interface, vlan) + logger.info("[DUT: %s]: Running command: %s", dut, cmd) + rnode.run(cmd) + + vlan_intf = "{}.{}".format(interface, vlan) + + ip = data["ip"] + subnet = data["subnet"] + + # Bringing interface up + cmd = "ip link set up {}".format(vlan_intf) + logger.info("[DUT: %s]: Running command: %s", dut, cmd) + rnode.run(cmd) + + # Assigning IP address + cmd = "ifconfig {} {} netmask {}".format(vlan_intf, ip, subnet) + logger.info("[DUT: %s]: Running command: %s", dut, cmd) + rnode.run(cmd) + + +def tcpdump_capture_start( + tgen, + router, + intf, + protocol=None, + grepstr=None, + timeout=0, + options=None, + cap_file=None, + background=True, +): + """ + API to capture network packets using tcp dump. + + Packages used : + + Parameters + ---------- + * `tgen`: topogen object. + * `router`: router on which ping has to be performed. + * `intf` : interface for capture. + * `protocol` : protocol for which packet needs to be captured. + * `grepstr` : string to filter out tcp dump output. + * `timeout` : Time for which packet needs to be captured. + * `options` : options for TCP dump, all tcpdump options can be used. + * `cap_file` : filename to store capture dump. + * `background` : Make tcp dump run in back ground. + + Usage + ----- + tcpdump_result = tcpdump_dut(tgen, 'r2', intf, protocol='tcp', timeout=20, + options='-A -vv -x > r2bgp.txt ') + Returns + ------- + 1) True for successful capture + 2) errormsg - when tcp dump fails + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + rnode = tgen.routers()[router] + + if timeout > 0: + cmd = "timeout {}".format(timeout) + else: + cmd = "" + + cmdargs = "{} tcpdump".format(cmd) + + if intf: + cmdargs += " -i {}".format(str(intf)) + if protocol: + cmdargs += " {}".format(str(protocol)) + if options: + cmdargs += " -s 0 {}".format(str(options)) + + if cap_file: + file_name = os.path.join(LOGDIR, tgen.modname, router, cap_file) + cmdargs += " -w {}".format(str(file_name)) + # Remove existing capture file + rnode.run("rm -rf {}".format(file_name)) + + if grepstr: + cmdargs += ' | grep "{}"'.format(str(grepstr)) + + logger.info("Running tcpdump command: [%s]", cmdargs) + if not background: + rnode.run(cmdargs) + else: + rnode.run("nohup {} & /dev/null 2>&1".format(cmdargs)) + + # Check if tcpdump process is running + if background: + result = rnode.run("pgrep tcpdump") + logger.debug("ps -ef | grep tcpdump \n {}".format(result)) + + if not result: + errormsg = "tcpdump is not running {}".format("tcpdump") + return errormsg + else: + logger.info("Packet capture started on %s: interface %s", router, intf) + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return True + + +def tcpdump_capture_stop(tgen, router): + """ + API to capture network packets using tcp dump. + + Packages used : + + Parameters + ---------- + * `tgen`: topogen object. + * `router`: router on which ping has to be performed. + * `intf` : interface for capture. + * `protocol` : protocol for which packet needs to be captured. + * `grepstr` : string to filter out tcp dump output. + * `timeout` : Time for which packet needs to be captured. + * `options` : options for TCP dump, all tcpdump options can be used. + * `cap2file` : filename to store capture dump. + * `bakgrnd` : Make tcp dump run in back ground. + + Usage + ----- + tcpdump_result = tcpdump_dut(tgen, 'r2', intf, protocol='tcp', timeout=20, + options='-A -vv -x > r2bgp.txt ') + Returns + ------- + 1) True for successful capture + 2) errormsg - when tcp dump fails + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + rnode = tgen.routers()[router] + + # Check if tcpdump process is running + result = rnode.run("ps -ef | grep tcpdump") + logger.debug("ps -ef | grep tcpdump \n {}".format(result)) + + if not re_search(r"{}".format("tcpdump"), result): + errormsg = "tcpdump is not running {}".format("tcpdump") + return errormsg + else: + ppid = tgen.net.nameToNode[rnode.name].pid + rnode.run("set +m; pkill -P %s tcpdump &> /dev/null" % ppid) + logger.info("Stopped tcpdump capture") + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return True + + ############################################# # Common APIs, will be used by all protocols ############################################# @@ -1137,11 +1349,11 @@ def generate_ips(network, no_of_ips): """ Returns list of IPs. based on start_ip and no_of_ips + * `network` : from here the ip will start generating, start_ip will be * `no_of_ips` : these many IPs will be generated """ - ipaddress_list = [] if type(network) is not list: network = [network] @@ -1152,14 +1364,20 @@ def generate_ips(network, no_of_ips): mask = int(start_ipaddr.split("/")[1]) else: logger.debug("start_ipaddr {} must have a / in it".format(start_ipaddr)) - assert(0) + assert 0 addr_type = validate_ip_address(start_ip) if addr_type == "ipv4": - start_ip = ipaddress.IPv4Address(frr_unicode(start_ip)) + if start_ip == "0.0.0.0" and mask == 0 and no_of_ips == 1: + ipaddress_list.append("{}/{}".format(start_ip, mask)) + return ipaddress_list + start_ip = ipaddress.IPv4Address(unicode(start_ip)) step = 2 ** (32 - mask) if addr_type == "ipv6": - start_ip = ipaddress.IPv6Address(frr_unicode(start_ip)) + if start_ip == "0::0" and mask == 0 and no_of_ips == 1: + ipaddress_list.append("{}/{}".format(start_ip, mask)) + return ipaddress_list + start_ip = ipaddress.IPv6Address(unicode(start_ip)) step = 2 ** (128 - mask) next_ip = start_ip @@ -1181,7 +1399,7 @@ def find_interface_with_greater_ip(topo, router, loopback=True, interface=True): it will return highest IP from loopback IPs otherwise from physical interface IPs. * `topo` : json file data - * `router` : router for which hightest interface should be calculated + * `router` : router for which highest interface should be calculated """ link_data = topo["routers"][router]["links"] @@ -1287,7 +1505,6 @@ def retry(attempts=3, wait=2, return_is_str=True, initial_wait=0, return_is_dict _wait = kwargs.pop("wait", wait) _attempts = kwargs.pop("attempts", attempts) _attempts = int(_attempts) - expected = True if _attempts < 0: raise ValueError("attempts must be 0 or greater") @@ -1297,12 +1514,10 @@ def retry(attempts=3, wait=2, return_is_str=True, initial_wait=0, return_is_dict _return_is_str = kwargs.pop("return_is_str", return_is_str) _return_is_dict = kwargs.pop("return_is_str", return_is_dict) + _expected = kwargs.setdefault("expected", True) + kwargs.pop("expected") for i in range(1, _attempts + 1): try: - _expected = kwargs.setdefault("expected", True) - if _expected is False: - expected = _expected - kwargs.pop("expected") ret = func(*args, **kwargs) logger.debug("Function returned %s", ret) if _return_is_str and isinstance(ret, bool) and _expected: @@ -1314,11 +1529,11 @@ def retry(attempts=3, wait=2, return_is_str=True, initial_wait=0, return_is_dict if _return_is_dict and isinstance(ret, dict): return ret - if _attempts == i and expected: + if _attempts == i: generate_support_bundle() return ret except Exception as err: - if _attempts == i and expected: + if _attempts == i: generate_support_bundle() logger.info("Max number of attempts (%r) reached", _attempts) raise @@ -1360,6 +1575,17 @@ def step(msg, reset=False): _step(msg, reset) +def do_countdown(secs): + """ + Countdown timer display + """ + for i in range(secs, 0, -1): + sys.stdout.write("{} ".format(str(i))) + sys.stdout.flush() + sleep(1) + return + + ############################################# # These APIs, will used by testcase ############################################# @@ -1378,6 +1604,25 @@ def create_interfaces_cfg(tgen, topo, build=False): ------- True or False """ + + def _create_interfaces_ospf_cfg(ospf, c_data, data, ospf_keywords): + interface_data = [] + ip_ospf = "ipv6 ospf6" if ospf == "ospf6" else "ip ospf" + for keyword in ospf_keywords: + if keyword in data[ospf]: + intf_ospf_value = c_data["links"][destRouterLink][ospf][keyword] + if "delete" in data and data["delete"]: + interface_data.append( + "no {} {}".format(ip_ospf, keyword.replace("_", "-")) + ) + else: + interface_data.append( + "{} {} {}".format( + ip_ospf, keyword.replace("_", "-"), intf_ospf_value + ) + ) + return interface_data + result = False topo = deepcopy(topo) @@ -1424,66 +1669,26 @@ def create_interfaces_cfg(tgen, topo, build=False): else: interface_data.append("ipv6 address {}\n".format(intf_addr)) + ospf_keywords = [ + "hello_interval", + "dead_interval", + "network", + "priority", + "cost", + ] if "ospf" in data: - ospf_data = data["ospf"] - if "area" in ospf_data: - intf_ospf_area = c_data["links"][destRouterLink]["ospf"]["area"] - if "delete" in data and data["delete"]: - interface_data.append("no ip ospf area") - else: - interface_data.append( - "ip ospf area {}".format(intf_ospf_area) - ) - - if "hello_interval" in ospf_data: - intf_ospf_hello = c_data["links"][destRouterLink]["ospf"][ - "hello_interval" - ] - if "delete" in data and data["delete"]: - interface_data.append("no ip ospf " " hello-interval") - else: - interface_data.append( - "ip ospf " " hello-interval {}".format(intf_ospf_hello) - ) - - if "dead_interval" in ospf_data: - intf_ospf_dead = c_data["links"][destRouterLink]["ospf"][ - "dead_interval" - ] - if "delete" in data and data["delete"]: - interface_data.append("no ip ospf" " dead-interval") - else: - interface_data.append( - "ip ospf " " dead-interval {}".format(intf_ospf_dead) - ) - - if "network" in ospf_data: - intf_ospf_nw = c_data["links"][destRouterLink]["ospf"][ - "network" - ] - if "delete" in data and data["delete"]: - interface_data.append( - "no ip ospf" " network {}".format(intf_ospf_nw) - ) - else: - interface_data.append( - "ip ospf" " network {}".format(intf_ospf_nw) - ) - - if "priority" in ospf_data: - intf_ospf_nw = c_data["links"][destRouterLink]["ospf"][ - "priority" - ] + interface_data += _create_interfaces_ospf_cfg( + "ospf", c_data, data, ospf_keywords + ["area"] + ) + if "ospf6" in data: + interface_data += _create_interfaces_ospf_cfg( + "ospf6", c_data, data, ospf_keywords + ) - if "delete" in data and data["delete"]: - interface_data.append("no ip ospf" " priority") - else: - interface_data.append( - "ip ospf" " priority {}".format(intf_ospf_nw) - ) result = create_common_configuration( tgen, c_router, interface_data, "interface_config", build=build ) + except InvalidCLIError: # Traceback errormsg = traceback.format_exc() @@ -2222,9 +2427,9 @@ def shutdown_bringup_interface(tgen, dut, intf_name, ifaceaction=False): ----- dut = "r3" intf = "r3-r1-eth0" - # Shut down ineterface + # Shut down interface shutdown_bringup_interface(tgen, dut, intf, False) - # Bring up ineterface + # Bring up interface shutdown_bringup_interface(tgen, dut, intf, True) Returns ------- @@ -2233,13 +2438,58 @@ def shutdown_bringup_interface(tgen, dut, intf_name, ifaceaction=False): router_list = tgen.routers() if ifaceaction: - logger.info("Bringing up interface : {}".format(intf_name)) + logger.info("Bringing up interface {} : {}".format(dut, intf_name)) else: - logger.info("Shutting down interface : {}".format(intf_name)) + logger.info("Shutting down interface {} : {}".format(dut, intf_name)) interface_set_status(router_list[dut], intf_name, ifaceaction) +def stop_router(tgen, router): + """ + Router's current config would be saved to /tmp/topotest/<suite>/<router> + for each daemon and router and its daemons would be stopped. + + * `tgen` : topogen object + * `router`: Device under test + """ + + router_list = tgen.routers() + + # Saving router config to /etc/frr, which will be loaded to router + # when it starts + router_list[router].vtysh_cmd("write memory") + + # Stop router + router_list[router].stop() + + +def start_router(tgen, router): + """ + Router will be started and config would be loaded from + /tmp/topotest/<suite>/<router> for each daemon + + * `tgen` : topogen object + * `router`: Device under test + """ + + logger.debug("Entering lib API: start_router") + + try: + router_list = tgen.routers() + + # Router and its daemons would be started and config would + # be loaded to router for each daemon from /etc/frr + router_list[router].start() + + except Exception as e: + errormsg = traceback.format_exc() + logger.error(errormsg) + return errormsg + + logger.debug("Exiting lib API: start_router()") + + def addKernelRoute( tgen, router, intf, group_addr_range, next_hop=None, src=None, del_action=None ): @@ -2250,7 +2500,7 @@ def addKernelRoute( ----------- * `tgen` : Topogen object * `router`: router for which kernal routes needs to be added - * `intf`: interface name, for which kernal routes needs to be added + * `intf`: interface name, for which kernel routes needs to be added * `bindToAddress`: bind to <host>, an interface or multicast address @@ -2313,7 +2563,7 @@ def configure_vxlan(tgen, input_dict): """ Add and configure vxlan - * `tgen`: tgen onject + * `tgen`: tgen object * `input_dict` : data for vxlan config Usage: @@ -2414,7 +2664,7 @@ def configure_brctl(tgen, topo, input_dict): """ Add and configure brctl - * `tgen`: tgen onject + * `tgen`: tgen object * `input_dict` : data for brctl config Usage: @@ -2508,7 +2758,7 @@ def configure_interface_mac(tgen, input_dict): """ Add and configure brctl - * `tgen`: tgen onject + * `tgen`: tgen object * `input_dict` : data for mac config input_mac= { @@ -2562,7 +2812,7 @@ def verify_rib( tag=None, metric=None, fib=None, - count_only=False + count_only=False, ): """ Data will be read from input_dict or input JSON file, API will generate @@ -2754,8 +3004,10 @@ def verify_rib( "Nexthops are missing for " "route {} in RIB of router {}: " "expected {}, found {}\n".format( - st_rt, dut, len(next_hop), - len(found_hops) + st_rt, + dut, + len(next_hop), + len(found_hops), ) ) return errormsg @@ -2802,7 +3054,11 @@ def verify_rib( errormsg = ( "[DUT: {}]: tag value {}" " is not matched for" - " route {} in RIB \n".format(dut, _tag, st_rt,) + " route {} in RIB \n".format( + dut, + _tag, + st_rt, + ) ) return errormsg @@ -2819,7 +3075,11 @@ def verify_rib( errormsg = ( "[DUT: {}]: metric value " "{} is not matched for " - "route {} in RIB \n".format(dut, metric, st_rt,) + "route {} in RIB \n".format( + dut, + metric, + st_rt, + ) ) return errormsg @@ -2868,7 +3128,9 @@ def verify_rib( for advertise_network_dict in advertise_network: if "vrf" in advertise_network_dict: - cmd = "{} vrf {} json".format(command, advertise_network_dict["vrf"]) + cmd = "{} vrf {} json".format( + command, advertise_network_dict["vrf"] + ) else: cmd = "{} json".format(command) @@ -2947,6 +3209,7 @@ def verify_rib( logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) return True + @retry(attempts=6, wait=2, return_is_str=True) def verify_fib_routes(tgen, addr_type, dut, input_dict, next_hop=None): """ @@ -3327,7 +3590,12 @@ def verify_prefix_lists(tgen, input_dict): for addr_type in prefix_lists_addr: if not check_address_types(addr_type): continue - + # show ip prefix list + if addr_type == "ipv4": + cmd = "show ip prefix-list" + else: + cmd = "show {} prefix-list".format(addr_type) + show_prefix_list = run_frr_cmd(rnode, cmd) for prefix_list in prefix_lists_addr[addr_type].keys(): if prefix_list in show_prefix_list: errormsg = ( @@ -3550,7 +3818,6 @@ def verify_cli_json(tgen, input_dict): """ API to verify if JSON is available for clis command. - Parameters ---------- * `tgen`: topogen object @@ -3720,7 +3987,6 @@ def verify_vrf_vni(tgen, input_dict): """ API to verify vrf vni details using "show vrf vni json" command. - Parameters ---------- * `tgen`: topogen object @@ -3852,3 +4118,269 @@ def required_linux_kernel_version(required_version): ) return error_msg return True + + +def iperfSendIGMPJoin( + tgen, server, bindToAddress, l4Type="UDP", join_interval=1, inc_step=0, repeat=0 +): + """ + Run iperf to send IGMP join and traffic + + Parameters: + ----------- + * `tgen` : Topogen object + * `l4Type`: string, one of [ TCP, UDP ] + * `server`: iperf server, from where IGMP join would be sent + * `bindToAddress`: bind to <host>, an interface or multicast + address + * `join_interval`: seconds between periodic bandwidth reports + * `inc_step`: increamental steps, by default 0 + * `repeat`: Repetition of group, by default 0 + + returns: + -------- + errormsg or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + rnode = tgen.routers()[server] + + iperfArgs = "iperf -s " + + # UDP/TCP + if l4Type == "UDP": + iperfArgs += "-u " + + iperfCmd = iperfArgs + # Group address range to cover + if bindToAddress: + if type(bindToAddress) is not list: + Address = [] + start = ipaddress.IPv4Address(frr_unicode(bindToAddress)) + + Address = [start] + next_ip = start + + count = 1 + while count < repeat: + next_ip += inc_step + Address.append(next_ip) + count += 1 + bindToAddress = Address + + for bindTo in bindToAddress: + iperfArgs = iperfCmd + iperfArgs += "-B %s " % bindTo + + # Join interval + if join_interval: + iperfArgs += "-i %d " % join_interval + + iperfArgs += " &>/dev/null &" + # Run iperf command to send IGMP join + logger.debug("[DUT: {}]: Running command: [{}]".format(server, iperfArgs)) + output = rnode.run("set +m; {} sleep 0.5".format(iperfArgs)) + + # Check if iperf process is running + if output: + pid = output.split()[1] + rnode.run("touch /var/run/frr/iperf_server.pid") + rnode.run("echo %s >> /var/run/frr/iperf_server.pid" % pid) + else: + errormsg = "IGMP join is not sent for {}. Error: {}".format(bindTo, output) + logger.error(output) + return errormsg + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return True + + +def iperfSendTraffic( + tgen, + client, + bindToAddress, + ttl, + time=0, + l4Type="UDP", + inc_step=0, + repeat=0, + mappedAddress=None, +): + """ + Run iperf to send IGMP join and traffic + + Parameters: + ----------- + * `tgen` : Topogen object + * `l4Type`: string, one of [ TCP, UDP ] + * `client`: iperf client, from where iperf traffic would be sent + * `bindToAddress`: bind to <host>, an interface or multicast + address + * `ttl`: time to live + * `time`: time in seconds to transmit for + * `inc_step`: increamental steps, by default 0 + * `repeat`: Repetition of group, by default 0 + * `mappedAddress`: Mapped Interface ip address + + returns: + -------- + errormsg or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + rnode = tgen.routers()[client] + + iperfArgs = "iperf -c " + + iperfCmd = iperfArgs + # Group address range to cover + if bindToAddress: + if type(bindToAddress) is not list: + Address = [] + start = ipaddress.IPv4Address(frr_unicode(bindToAddress)) + + Address = [start] + next_ip = start + + count = 1 + while count < repeat: + next_ip += inc_step + Address.append(next_ip) + count += 1 + bindToAddress = Address + + for bindTo in bindToAddress: + iperfArgs = iperfCmd + iperfArgs += "%s " % bindTo + + # Mapped Interface IP + if mappedAddress: + iperfArgs += "-B %s " % mappedAddress + + # UDP/TCP + if l4Type == "UDP": + iperfArgs += "-u -b 0.012m " + + # TTL + if ttl: + iperfArgs += "-T %d " % ttl + + # Time + if time: + iperfArgs += "-t %d " % time + + iperfArgs += " &>/dev/null &" + + # Run iperf command to send multicast traffic + logger.debug("[DUT: {}]: Running command: [{}]".format(client, iperfArgs)) + output = rnode.run("set +m; {} sleep 0.5".format(iperfArgs)) + + # Check if iperf process is running + if output: + pid = output.split()[1] + rnode.run("touch /var/run/frr/iperf_client.pid") + rnode.run("echo %s >> /var/run/frr/iperf_client.pid" % pid) + else: + errormsg = "Multicast traffic is not sent for {}. Error {}".format( + bindTo, output + ) + logger.error(output) + return errormsg + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return True + + +def kill_iperf(tgen, dut=None, action=None): + """ + Killing iperf process if running for any router in topology + Parameters: + ----------- + * `tgen` : Topogen object + * `dut` : Any iperf hostname to send igmp prune + * `action`: to kill igmp join iperf action is remove_join + to kill traffic iperf action is remove_traffic + + Usage + ---- + kill_iperf(tgen, dut ="i6", action="remove_join") + + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + router_list = tgen.routers() + for router, rnode in router_list.items(): + # Run iperf command to send IGMP join + pid_client = rnode.run("cat /var/run/frr/iperf_client.pid") + pid_server = rnode.run("cat /var/run/frr/iperf_server.pid") + if action == "remove_join": + pids = pid_server + elif action == "remove_traffic": + pids = pid_client + else: + pids = "\n".join([pid_client, pid_server]) + for pid in pids.split("\n"): + pid = pid.strip() + if pid.isdigit(): + cmd = "set +m; kill -9 %s &> /dev/null" % pid + logger.debug("[DUT: {}]: Running command: [{}]".format(router, cmd)) + rnode.run(cmd) + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + + +def verify_ip_nht(tgen, input_dict): + """ + Running "show ip nht" command and verifying given nexthop resolution + Parameters + ---------- + * `tgen` : topogen object + * `input_dict`: data to verify nexthop + Usage + ----- + input_dict_4 = { + "r1": { + nh: { + "Address": nh, + "resolvedVia": "connected", + "nexthops": { + "nexthop1": { + "Interface": intf + } + } + } + } + } + result = verify_ip_nht(tgen, input_dict_4) + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: verify_ip_nht()") + + for router in input_dict.keys(): + if router not in tgen.routers(): + continue + + rnode = tgen.routers()[router] + nh_list = input_dict[router] + + if validate_ip_address(nh_list.keys()[0]) is "ipv6": + show_ip_nht = run_frr_cmd(rnode, "show ipv6 nht") + else: + show_ip_nht = run_frr_cmd(rnode, "show ip nht") + + for nh in nh_list: + if nh in show_ip_nht: + logger.info("Nexthop %s is resolved on %s", nh, router) + return True + else: + errormsg = "Nexthop {} is resolved on {}".format(nh, router) + return errormsg + + logger.debug("Exiting lib API: verify_ip_nht()") + return False diff --git a/tests/topotests/lib/lutil.py b/tests/topotests/lib/lutil.py index 9cbea67af1..0b6a946fda 100644 --- a/tests/topotests/lib/lutil.py +++ b/tests/topotests/lib/lutil.py @@ -23,6 +23,7 @@ import time import datetime import json import math +import time from lib.topolog import logger from mininet.net import Mininet @@ -194,8 +195,9 @@ Total %-4d %-4d %d\n\ if op != "wait": self.l_line += 1 self.log( - "(#%d) %s:%s COMMAND:%s:%s:%s:%s:%s:" + "%s (#%d) %s:%s COMMAND:%s:%s:%s:%s:%s:" % ( + time.asctime(), self.l_total + 1, self.l_filename, self.l_line, diff --git a/tests/topotests/lib/ospf.py b/tests/topotests/lib/ospf.py index 9f3d4841b0..5bc9f14fea 100644 --- a/tests/topotests/lib/ospf.py +++ b/tests/topotests/lib/ospf.py @@ -62,7 +62,7 @@ def create_router_ospf(tgen, topo, input_dict=None, build=False, load_config=Tru "r1": { "ospf": { "router_id": "22.22.22.22", - "area": [{ "id":0.0.0.0, "type": "nssa"}] + "area": [{ "id": "0.0.0.0", "type": "nssa"}] } } @@ -94,7 +94,7 @@ def create_router_ospf(tgen, topo, input_dict=None, build=False, load_config=Tru return result -def __create_ospf_global(tgen, input_dict, router, build=False, load_config=True): +def __create_ospf_global(tgen, input_dict, router, build=False, load_config=True, ospf="ospf"): """ Helper API to create ospf global configuration. @@ -105,6 +105,33 @@ def __create_ospf_global(tgen, input_dict, router, build=False, load_config=True * `router` : router to be configured. * `build` : Only for initial setup phase this is set as True. * `load_config` : Loading the config to router this is set as True. + * `ospf` : either 'ospf' or 'ospf6' + + Usage + ----- + input_dict = { + "routers": { + "r1": { + "links": { + "r3": { + "ipv6": "2013:13::1/64", + "ospf6": { + "hello_interval": 1, + "dead_interval": 4, + "network": "point-to-point" + } + } + }, + "ospf6": { + "router_id": "1.1.1.1", + "neighbors": { + "r3": { + "area": "1.1.1.1" + } + } + } + } + } Returns ------- @@ -115,17 +142,17 @@ def __create_ospf_global(tgen, input_dict, router, build=False, load_config=True logger.debug("Entering lib API: __create_ospf_global()") try: - ospf_data = input_dict[router]["ospf"] + ospf_data = input_dict[router][ospf] del_ospf_action = ospf_data.setdefault("delete", False) if del_ospf_action: - config_data = ["no router ospf"] + config_data = ["no router {}".format(ospf)] result = create_common_configuration( - tgen, router, config_data, "ospf", build, load_config + tgen, router, config_data, ospf, build, load_config ) return result config_data = [] - cmd = "router ospf" + cmd = "router {}".format(ospf) config_data.append(cmd) @@ -133,9 +160,9 @@ def __create_ospf_global(tgen, input_dict, router, build=False, load_config=True router_id = ospf_data.setdefault("router_id", None) del_router_id = ospf_data.setdefault("del_router_id", False) if del_router_id: - config_data.append("no ospf router-id") + config_data.append("no {} router-id".format(ospf)) if router_id: - config_data.append("ospf router-id {}".format(router_id)) + config_data.append("{} router-id {}".format(ospf, router_id)) # redistribute command redistribute_data = ospf_data.setdefault("redistribute", {}) @@ -154,6 +181,7 @@ def __create_ospf_global(tgen, input_dict, router, build=False, load_config=True if del_action: cmd = "no {}".format(cmd) config_data.append(cmd) + # area information area_data = ospf_data.setdefault("area", {}) if area_data: @@ -172,9 +200,20 @@ def __create_ospf_global(tgen, input_dict, router, build=False, load_config=True if del_action: cmd = "no {}".format(cmd) config_data.append(cmd) - result = create_common_configuration( - tgen, router, config_data, "ospf", build, load_config - ) + + # area interface information for ospf6d only + if ospf == "ospf6": + area_iface = ospf_data.setdefault("neighbors", {}) + if area_iface: + for neighbor in area_iface: + if "area" in area_iface[neighbor]: + iface = input_dict[router]["links"][neighbor]["interface"] + cmd = "interface {} area {}".format( + iface, area_iface[neighbor]["area"] + ) + if area_iface[neighbor].setdefault("delete", False): + cmd = "no {}".format(cmd) + config_data.append(cmd) # summary information summary_data = ospf_data.setdefault("summary-address", {}) @@ -200,8 +239,9 @@ def __create_ospf_global(tgen, input_dict, router, build=False, load_config=True if del_action: cmd = "no {}".format(cmd) config_data.append(cmd) + result = create_common_configuration( - tgen, router, config_data, "ospf", build, load_config + tgen, router, config_data, ospf, build, load_config ) except InvalidCLIError: @@ -238,7 +278,7 @@ def create_router_ospf6(tgen, topo, input_dict=None, build=False, load_config=Tr ------- True or False """ - logger.debug("Entering lib API: create_router_ospf()") + logger.debug("Entering lib API: create_router_ospf6()") result = False if not input_dict: @@ -247,67 +287,15 @@ def create_router_ospf6(tgen, topo, input_dict=None, build=False, load_config=Tr topo = topo["routers"] input_dict = deepcopy(input_dict) for router in input_dict.keys(): - if "ospf" not in input_dict[router]: - logger.debug("Router %s: 'ospf' not present in input_dict", router) + if "ospf6" not in input_dict[router]: + logger.debug("Router %s: 'ospf6' not present in input_dict", router) continue - result = __create_ospf_global(tgen, input_dict, router, build, load_config) - - logger.debug("Exiting lib API: create_router_ospf()") - return result - - -def __create_ospf6_global(tgen, input_dict, router, build=False, load_config=True): - """ - Helper API to create ospf global configuration. - - Parameters - ---------- - * `tgen` : Topogen object - * `input_dict` : Input dict data, required when configuring from testcase - * `router` : router id to be configured. - * `build` : Only for initial setup phase this is set as True. - - Returns - ------- - True or False - """ - - result = False - logger.debug("Entering lib API: __create_ospf_global()") - try: - - ospf_data = input_dict[router]["ospf6"] - del_ospf_action = ospf_data.setdefault("delete", False) - if del_ospf_action: - config_data = ["no ipv6 router ospf"] - result = create_common_configuration( - tgen, router, config_data, "ospf", build, load_config - ) - return result - - config_data = [] - cmd = "router ospf" - - config_data.append(cmd) - - router_id = ospf_data.setdefault("router_id", None) - del_router_id = ospf_data.setdefault("del_router_id", False) - if del_router_id: - config_data.append("no ospf router-id") - if router_id: - config_data.append("ospf router-id {}".format(router_id)) - - result = create_common_configuration( - tgen, router, config_data, "ospf", build, load_config + result = __create_ospf_global( + tgen, input_dict, router, build, load_config, "ospf6" ) - except InvalidCLIError: - # Traceback - errormsg = traceback.format_exc() - logger.error(errormsg) - return errormsg - logger.debug("Exiting lib API: create_ospf_global()") + logger.debug("Exiting lib API: create_router_ospf6()") return result @@ -330,7 +318,7 @@ def config_ospf_interface(tgen, topo, input_dict=None, build=False, load_config= "links": { "r2": { "ospf": { - "authentication": 'message-digest', + "authentication": "message-digest", "authentication-key": "ospf", "message-digest-key": "10" } @@ -379,6 +367,7 @@ def config_ospf_interface(tgen, topo, input_dict=None, build=False, load_config= if data_ospf_area: cmd = "ip ospf area {}".format(data_ospf_area) config_data.append(cmd) + # interface ospf auth if data_ospf_auth: if data_ospf_auth == "null": @@ -464,6 +453,32 @@ def clear_ospf(tgen, router): logger.debug("Exiting lib API: clear_ospf()") +def redistribute_ospf(tgen, topo, dut, route_type, **kwargs): + """ + Redstribution of routes inside ospf. + + Parameters + ---------- + * `tgen`: Topogen object + * `topo` : json file data + * `dut`: device under test + * `route_type`: "static" or "connected" or .... + * `kwargs`: pass extra information (see below) + + Usage + ----- + redistribute_ospf(tgen, topo, "r0", "static", delete=True) + redistribute_ospf(tgen, topo, "r0", "static", route_map="rmap_ipv4") + """ + + ospf_red = {dut: {"ospf": {"redistribute": [{"redist_type": route_type}]}}} + for k, v in kwargs.items(): + ospf_red[dut]["ospf"]["redistribute"][0][k] = v + + result = create_router_ospf(tgen, topo, ospf_red) + assert result is True, "Testcase : Failed \n Error: {}".format(result) + + ################################ # Verification procs ################################ @@ -525,7 +540,7 @@ def verify_ospf_neighbor(tgen, topo, dut=None, input_dict=None, lan=False): logger.info("Verifying OSPF neighborship on router %s:", router) show_ospf_json = run_frr_cmd( - rnode, "show ip ospf neighbor all json", isjson=True + rnode, "show ip ospf neighbor all json", isjson=True ) # Verifying output dictionary show_ospf_json is empty or not @@ -658,6 +673,70 @@ def verify_ospf_neighbor(tgen, topo, dut=None, input_dict=None, lan=False): return result +################################ +# Verification procs +################################ +@retry(attempts=40, wait=2, return_is_str=True) +def verify_ospf6_neighbor(tgen, topo): + """ + This API is to verify ospf neighborship by running + show ip ospf neighbour command, + + Parameters + ---------- + * `tgen` : Topogen object + * `topo` : json file data + + Usage + ----- + Check FULL neighbors. + verify_ospf_neighbor(tgen, topo) + + result = verify_ospf_neighbor(tgen, topo) + + Returns + ------- + True or False (Error Message) + """ + + logger.debug("Entering lib API: verify_ospf6_neighbor()") + result = False + for router, rnode in tgen.routers().items(): + if "ospf6" not in topo["routers"][router]: + continue + + logger.info("Verifying OSPF6 neighborship on router %s:", router) + show_ospf_json = run_frr_cmd( + rnode, "show ipv6 ospf6 neighbor json", isjson=True + ) + + if not show_ospf_json: + return "OSPF6 is not running" + + ospf_nbr_list = topo["routers"][router]["ospf6"]["neighbors"] + no_of_peer = 0 + for ospf_nbr in ospf_nbr_list: + ospf_nbr_rid = topo["routers"][ospf_nbr]["ospf6"]["router_id"] + for neighbor in show_ospf_json["neighbors"]: + if neighbor["neighborId"] == ospf_nbr_rid: + nh_state = neighbor["state"] + break + else: + return "[DUT: {}] OSPF6 peer {} missing".format(router, data_rid) + + if nh_state == "Full": + no_of_peer += 1 + + if no_of_peer == len(ospf_nbr_list): + logger.info("[DUT: {}] OSPF6 is Converged".format(router)) + result = True + else: + return "[DUT: {}] OSPF6 is not Converged".format(router) + + logger.debug("Exiting API: verify_ospf6_neighbor()") + return result + + @retry(attempts=21, wait=2, return_is_str=True) def verify_ospf_rib( tgen, dut, input_dict, next_hop=None, tag=None, metric=None, fib=None @@ -847,19 +926,23 @@ def verify_ospf_rib( if "routeType" not in ospf_rib_json[st_rt]: errormsg = ( "[DUT: {}]: routeType missing" - "for route {} in OSPF RIB \n".format(dut, st_rt) + " for route {} in OSPF RIB \n".format( + dut, st_rt + ) ) return errormsg elif _rtype != ospf_rib_json[st_rt]["routeType"]: errormsg = ( "[DUT: {}]: routeType mismatch" - "for route {} in OSPF RIB \n".format(dut, st_rt) + " for route {} in OSPF RIB \n".format( + dut, st_rt + ) ) return errormsg else: logger.info( - "DUT: {}]: Found routeType {}" - "for route {}".format(dut, _rtype, st_rt) + "[DUT: {}]: Found routeType {}" + " for route {}".format(dut, _rtype, st_rt) ) if tag: if "tag" not in ospf_rib_json[st_rt]: @@ -874,7 +957,11 @@ def verify_ospf_rib( errormsg = ( "[DUT: {}]: tag value {}" " is not matched for" - " route {} in RIB \n".format(dut, _tag, st_rt,) + " route {} in RIB \n".format( + dut, + _tag, + st_rt, + ) ) return errormsg @@ -891,7 +978,11 @@ def verify_ospf_rib( errormsg = ( "[DUT: {}]: metric value " "{} is not matched for " - "route {} in RIB \n".format(dut, metric, st_rt,) + "route {} in RIB \n".format( + dut, + metric, + st_rt, + ) ) return errormsg diff --git a/tests/topotests/lib/pim.py b/tests/topotests/lib/pim.py new file mode 100644 index 0000000000..294f60bf68 --- /dev/null +++ b/tests/topotests/lib/pim.py @@ -0,0 +1,3450 @@ +# Copyright (c) 2019 by VMware, Inc. ("VMware") +# Used Copyright (c) 2018 by Network Device Education Foundation, Inc. +# ("NetDEF") in this file. +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. + +import sys +import os +import re +import datetime +import traceback +import pytest +from time import sleep +from copy import deepcopy +from lib.topolog import logger + +# Import common_config to use commomnly used APIs +from lib.common_config import ( + create_common_configuration, + InvalidCLIError, + retry, + run_frr_cmd, +) + +#### +CWD = os.path.dirname(os.path.realpath(__file__)) + + +def create_pim_config(tgen, topo, input_dict=None, build=False, load_config=True): + """ + API to configure pim on router + + Parameters + ---------- + * `tgen` : Topogen object + * `topo` : json file data + * `input_dict` : Input dict data, required when configuring from + testcase + * `build` : Only for initial setup phase this is set as True. + + Usage + ----- + input_dict = { + "r1": { + "pim": { + "disable" : ["l1-i1-eth1"], + "rp": [{ + "rp_addr" : "1.0.3.17". + "keep-alive-timer": "100" + "group_addr_range": ["224.1.1.0/24", "225.1.1.0/24"] + "prefix-list": "pf_list_1" + "delete": True + }] + } + } + } + + + Returns + ------- + True or False + """ + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + result = False + if not input_dict: + input_dict = deepcopy(topo) + else: + topo = topo["routers"] + input_dict = deepcopy(input_dict) + for router in input_dict.keys(): + result = _enable_disable_pim(tgen, topo, input_dict, router, build) + + if "pim" not in input_dict[router]: + logger.debug("Router %s: 'pim' is not present in " "input_dict", router) + continue + + if result is True: + if "rp" not in input_dict[router]["pim"]: + continue + + result = _create_pim_config( + tgen, topo, input_dict, router, build, load_config + ) + if result is not True: + return False + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return result + + +def _create_pim_config(tgen, topo, input_dict, router, build=False, load_config=False): + """ + Helper API to create pim configuration. + + Parameters + ---------- + * `tgen` : Topogen object + * `topo` : json file data + * `input_dict` : Input dict data, required when configuring from testcase + * `router` : router id to be configured. + * `build` : Only for initial setup phase this is set as True. + + Returns + ------- + True or False + """ + + result = False + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + try: + + pim_data = input_dict[router]["pim"] + + for dut in tgen.routers(): + if "pim" not in input_dict[router]: + continue + + for destLink, data in topo[dut]["links"].items(): + if "pim" not in data: + continue + + if "rp" in pim_data: + config_data = [] + rp_data = pim_data["rp"] + + for rp_dict in deepcopy(rp_data): + # ip address of RP + if "rp_addr" not in rp_dict and build: + logger.error( + "Router %s: 'ip address of RP' not " + "present in input_dict/JSON", + router, + ) + + return False + rp_addr = rp_dict.setdefault("rp_addr", None) + + # Keep alive Timer + keep_alive_timer = rp_dict.setdefault("keep_alive_timer", None) + + # Group Address range to cover + if "group_addr_range" not in rp_dict and build: + logger.error( + "Router %s:'Group Address range to cover'" + " not present in input_dict/JSON", + router, + ) + + return False + group_addr_range = rp_dict.setdefault("group_addr_range", None) + + # Group prefix-list filter + prefix_list = rp_dict.setdefault("prefix_list", None) + + # Delete rp config + del_action = rp_dict.setdefault("delete", False) + + if keep_alive_timer: + cmd = "ip pim rp keep-alive-timer {}".format(keep_alive_timer) + config_data.append(cmd) + + if del_action: + cmd = "no {}".format(cmd) + config_data.append(cmd) + + if rp_addr: + if group_addr_range: + if type(group_addr_range) is not list: + group_addr_range = [group_addr_range] + + for grp_addr in group_addr_range: + cmd = "ip pim rp {} {}".format(rp_addr, grp_addr) + config_data.append(cmd) + + if del_action: + cmd = "no {}".format(cmd) + config_data.append(cmd) + + if prefix_list: + cmd = "ip pim rp {} prefix-list {}".format( + rp_addr, prefix_list + ) + config_data.append(cmd) + + if del_action: + cmd = "no {}".format(cmd) + config_data.append(cmd) + + result = create_common_configuration( + tgen, dut, config_data, "pim", build, load_config + ) + if result is not True: + return False + + except InvalidCLIError: + # Traceback + errormsg = traceback.format_exc() + logger.error(errormsg) + return errormsg + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return result + + +def create_igmp_config(tgen, topo, input_dict=None, build=False): + """ + API to configure igmp on router + + Parameters + ---------- + * `tgen` : Topogen object + * `topo` : json file data + * `input_dict` : Input dict data, required when configuring from + testcase + * `build` : Only for initial setup phase this is set as True. + + Usage + ----- + input_dict = { + "r1": { + "igmp": { + "interfaces": { + "r1-r0-eth0" :{ + "igmp":{ + "version": "2", + "delete": True + "query": { + "query-interval" : 100, + "query-max-response-time": 200 + } + } + } + } + } + } + } + + Returns + ------- + True or False + """ + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + result = False + if not input_dict: + input_dict = deepcopy(topo) + else: + topo = topo["routers"] + input_dict = deepcopy(input_dict) + for router in input_dict.keys(): + if "igmp" not in input_dict[router]: + logger.debug("Router %s: 'igmp' is not present in " "input_dict", router) + continue + + igmp_data = input_dict[router]["igmp"] + + if "interfaces" in igmp_data: + config_data = [] + intf_data = igmp_data["interfaces"] + + for intf_name in intf_data.keys(): + cmd = "interface {}".format(intf_name) + config_data.append(cmd) + protocol = "igmp" + del_action = intf_data[intf_name]["igmp"].setdefault("delete", False) + cmd = "ip igmp" + if del_action: + cmd = "no {}".format(cmd) + config_data.append(cmd) + + del_attr = intf_data[intf_name]["igmp"].setdefault("delete_attr", False) + for attribute, data in intf_data[intf_name]["igmp"].items(): + if attribute == "version": + cmd = "ip {} {} {}".format(protocol, attribute, data) + if del_action: + cmd = "no {}".format(cmd) + config_data.append(cmd) + + if attribute == "join": + for group in data: + cmd = "ip {} {} {}".format(protocol, attribute, group) + if del_attr: + cmd = "no {}".format(cmd) + config_data.append(cmd) + + if attribute == "query": + for query, value in data.items(): + if query != "delete": + cmd = "ip {} {} {}".format(protocol, query, value) + + if "delete" in intf_data[intf_name][protocol]["query"]: + cmd = "no {}".format(cmd) + + config_data.append(cmd) + try: + + result = create_common_configuration( + tgen, router, config_data, "interface_config", build=build + ) + except InvalidCLIError: + errormsg = traceback.format_exc() + logger.error(errormsg) + return errormsg + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return result + + +def _enable_disable_pim(tgen, topo, input_dict, router, build=False): + """ + Helper API to enable or disable pim on interfaces + + Parameters + ---------- + * `tgen` : Topogen object + * `topo` : json file data + * `input_dict` : Input dict data, required when configuring from testcase + * `router` : router id to be configured. + * `build` : Only for initial setup phase this is set as True. + + Returns + ------- + True or False + """ + result = False + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + try: + config_data = [] + + enable_flag = True + # Disable pim on interface + if "pim" in input_dict[router]: + if "disable" in input_dict[router]["pim"]: + enable_flag = False + interfaces = input_dict[router]["pim"]["disable"] + + if type(interfaces) is not list: + interfaces = [interfaces] + + for interface in interfaces: + cmd = "interface {}".format(interface) + config_data.append(cmd) + config_data.append("no ip pim") + + # Enable pim on interface + if enable_flag: + for destRouterLink, data in sorted(topo[router]["links"].items()): + if "pim" in data and data["pim"] == "enable": + + # Loopback interfaces + if "type" in data and data["type"] == "loopback": + interface_name = destRouterLink + else: + interface_name = data["interface"] + + cmd = "interface {}".format(interface_name) + config_data.append(cmd) + config_data.append("ip pim") + + result = create_common_configuration( + tgen, router, config_data, "interface_config", build=build + ) + if result is not True: + return False + + except InvalidCLIError: + # Traceback + errormsg = traceback.format_exc() + logger.error(errormsg) + return errormsg + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return result + + +def add_rp_interfaces_and_pim_config(tgen, topo, interface, rp, rp_mapping): + """ + Add physical interfaces tp RP for all the RPs + + Parameters + ---------- + * `tgen` : Topogen object + * `topo` : json file data + * `interface` : RP interface + * `rp` : rp for given topology + * `rp_mapping` : dictionary of all groups and RPs + + Returns + ------- + True or False + """ + result = False + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + try: + config_data = [] + + for group, rp_list in rp_mapping.items(): + for _rp in rp_list: + config_data.append("interface {}".format(interface)) + config_data.append("ip address {}".format(_rp)) + config_data.append("ip pim") + + result = create_common_configuration( + tgen, rp, config_data, "interface_config" + ) + if result is not True: + return False + + except InvalidCLIError: + # Traceback + errormsg = traceback.format_exc() + logger.error(errormsg) + return errormsg + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return result + + +def find_rp_details(tgen, topo): + """ + Find who is RP in topology and returns list of RPs + + Parameters: + ----------- + * `tgen` : Topogen object + * `topo` : json file data + + returns: + -------- + errormsg or True + """ + + rp_details = {} + + router_list = tgen.routers() + topo_data = topo["routers"] + + for router in router_list.keys(): + + if "pim" not in topo_data[router]: + continue + + pim_data = topo_data[router]["pim"] + if "rp" in pim_data: + rp_data = pim_data["rp"] + for rp_dict in rp_data: + # ip address of RP + rp_addr = rp_dict["rp_addr"] + + for link, data in topo["routers"][router]["links"].items(): + if data["ipv4"].split("/")[0] == rp_addr: + rp_details[router] = rp_addr + + return rp_details + + +def configure_pim_force_expire(tgen, topo, input_dict, build=False): + """ + Helper API to create pim configuration. + + Parameters + ---------- + * `tgen` : Topogen object + * `topo` : json file data + * `input_dict` : Input dict data, required when configuring from testcase + * `build` : Only for initial setup phase this is set as True. + + Usage + ----- + input_dict ={ + "l1": { + "pim": { + "force_expire":{ + "10.0.10.1": ["255.1.1.1"] + } + } + } + } + + result = create_pim_config(tgen, topo, input_dict) + + Returns + ------- + True or False + """ + + result = False + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + try: + + for dut in input_dict.keys(): + if "pim" not in input_dict[dut]: + continue + + pim_data = input_dict[dut]["pim"] + + if "force_expire" in pim_data: + config_data = [] + force_expire_data = pim_data["force_expire"] + + for source, groups in force_expire_data.items(): + if type(groups) is not list: + groups = [groups] + + for group in groups: + cmd = "ip pim force-expire source {} group {}".format( + source, group + ) + config_data.append(cmd) + + result = create_common_configuration( + tgen, dut, config_data, "pim", build=build + ) + if result is not True: + return False + + except InvalidCLIError: + # Traceback + errormsg = traceback.format_exc() + logger.error(errormsg) + return errormsg + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return result + + +############################################# +# Verification APIs +############################################# +@retry(attempts=6, wait=2, return_is_str=True) +def verify_pim_neighbors(tgen, topo, dut=None, iface=None, nbr_ip=None): + """ + Verify all PIM neighbors are up and running, config is verified + using "show ip pim neighbor" cli + + Parameters + ---------- + * `tgen`: topogen object + * `topo` : json file data + * `dut` : dut info + * `iface` : link for which PIM nbr need to check + * `nbr_ip` : neighbor ip of interface + + Usage + ----- + result = verify_pim_neighbors(tgen, topo, dut, iface=ens192, nbr_ip=20.1.1.2) + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + for router in tgen.routers(): + if dut is not None and dut != router: + continue + + rnode = tgen.routers()[router] + show_ip_pim_neighbor_json = rnode.vtysh_cmd( + "show ip pim neighbor json", isjson=True + ) + + for destLink, data in topo["routers"][router]["links"].items(): + if iface is not None and iface != data["interface"]: + continue + + if "type" in data and data["type"] == "loopback": + continue + + if "pim" not in data: + continue + + if "pim" in data and data["pim"] == "enable": + local_interface = data["interface"] + + if "-" in destLink: + # Spliting and storing destRouterLink data in tempList + tempList = destLink.split("-") + + # destRouter + destLink = tempList.pop(0) + + # Current Router Link + tempList.insert(0, router) + curRouter = "-".join(tempList) + else: + curRouter = router + if destLink not in topo["routers"]: + continue + data = topo["routers"][destLink]["links"][curRouter] + if "type" in data and data["type"] == "loopback": + continue + + if "pim" not in data: + continue + + logger.info("[DUT: %s]: Verifying PIM neighbor status:", router) + + if "pim" in data and data["pim"] == "enable": + pim_nh_intf_ip = data["ipv4"].split("/")[0] + + # Verifying PIM neighbor + if local_interface in show_ip_pim_neighbor_json: + if show_ip_pim_neighbor_json[local_interface]: + if ( + show_ip_pim_neighbor_json[local_interface][pim_nh_intf_ip][ + "neighbor" + ] + != pim_nh_intf_ip + ): + errormsg = ( + "[DUT %s]: Local interface: %s, PIM" + " neighbor check failed " + "Expected neighbor: %s, Found neighbor:" + " %s" + % ( + router, + local_interface, + pim_nh_intf_ip, + show_ip_pim_neighbor_json[local_interface][ + pim_nh_intf_ip + ]["neighbor"], + ) + ) + return errormsg + + logger.info( + "[DUT %s]: Local interface: %s, Found" + " expected PIM neighbor %s", + router, + local_interface, + pim_nh_intf_ip, + ) + else: + errormsg = ( + "[DUT %s]: Local interface: %s, and" + "interface ip: %s is not found in " + "PIM neighbor " % (router, local_interface, pim_nh_intf_ip) + ) + return errormsg + else: + errormsg = ( + "[DUT %s]: Local interface: %s, is not " + "present in PIM neighbor " % (router, local_interface) + ) + return errormsg + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return True + + +@retry(attempts=21, wait=2, return_is_str=True) +def verify_igmp_groups(tgen, dut, interface, group_addresses): + """ + Verify IGMP groups are received from an intended interface + by running "show ip igmp groups" command + + Parameters + ---------- + * `tgen`: topogen object + * `dut`: device under test + * `interface`: interface, from which IGMP groups would be received + * `group_addresses`: IGMP group address + + Usage + ----- + dut = "r1" + interface = "r1-r0-eth0" + group_address = "225.1.1.1" + result = verify_igmp_groups(tgen, dut, interface, group_address) + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + if dut not in tgen.routers(): + return False + + rnode = tgen.routers()[dut] + + logger.info("[DUT: %s]: Verifying IGMP groups received:", dut) + show_ip_igmp_json = run_frr_cmd(rnode, "show ip igmp groups json", isjson=True) + + if type(group_addresses) is not list: + group_addresses = [group_addresses] + + if interface in show_ip_igmp_json: + show_ip_igmp_json = show_ip_igmp_json[interface]["groups"] + else: + errormsg = ( + "[DUT %s]: Verifying IGMP group received" + " from interface %s [FAILED]!! " % (dut, interface) + ) + return errormsg + + found = False + for grp_addr in group_addresses: + for index in show_ip_igmp_json: + if index["group"] == grp_addr: + found = True + break + if found is not True: + errormsg = ( + "[DUT %s]: Verifying IGMP group received" + " from interface %s [FAILED]!! " + " Expected not found: %s" % (dut, interface, grp_addr) + ) + return errormsg + + logger.info( + "[DUT %s]: Verifying IGMP group %s received " + "from interface %s [PASSED]!! ", + dut, + grp_addr, + interface, + ) + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return True + + +@retry(attempts=31, wait=2, return_is_str=True) +def verify_upstream_iif( + tgen, dut, iif, src_address, group_addresses, joinState=None, refCount=1 +): + """ + Verify upstream inbound interface is updated correctly + by running "show ip pim upstream" cli + + Parameters + ---------- + * `tgen`: topogen object + * `dut`: device under test + * `iif`: inbound interface + * `src_address`: source address + * `group_addresses`: IGMP group address + * `joinState`: upstream join state + * `refCount`: refCount value + + Usage + ----- + dut = "r1" + iif = "r1-r0-eth0" + src_address = "*" + group_address = "225.1.1.1" + result = verify_upstream_iif(tgen, dut, iif, src_address, group_address, + state, refCount) + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + if dut not in tgen.routers(): + return False + + rnode = tgen.routers()[dut] + + logger.info( + "[DUT: %s]: Verifying upstream Inbound Interface" " for IGMP groups received:", + dut, + ) + show_ip_pim_upstream_json = run_frr_cmd( + rnode, "show ip pim upstream json", isjson=True + ) + + if type(group_addresses) is not list: + group_addresses = [group_addresses] + + if type(iif) is not list: + iif = [iif] + + for grp_addr in group_addresses: + # Verify group address + if grp_addr not in show_ip_pim_upstream_json: + errormsg = "[DUT %s]: Verifying upstream" " for group %s [FAILED]!!" % ( + dut, + grp_addr, + ) + return errormsg + group_addr_json = show_ip_pim_upstream_json[grp_addr] + + # Verify source address + if src_address not in group_addr_json: + errormsg = "[DUT %s]: Verifying upstream" " for (%s,%s) [FAILED]!!" % ( + dut, + src_address, + grp_addr, + ) + return errormsg + + # Verify Inbound Interface + found = False + for in_interface in iif: + if group_addr_json[src_address]["inboundInterface"] == in_interface: + if refCount > 0: + logger.info( + "[DUT %s]: Verifying refCount " + "for (%s,%s) [PASSED]!! " + " Found Expected: %s", + dut, + src_address, + grp_addr, + group_addr_json[src_address]["refCount"], + ) + found = True + if found: + if joinState is None: + if group_addr_json[src_address]["joinState"] != "Joined": + errormsg = ( + "[DUT %s]: Verifying iif " + "(Inbound Interface) for (%s,%s) and" + " joinState :%s [FAILED]!! " + " Expected: %s, Found: %s" + % ( + dut, + src_address, + grp_addr, + group_addr_json[src_address]["joinState"], + in_interface, + group_addr_json[src_address]["inboundInterface"], + ) + ) + return errormsg + + elif group_addr_json[src_address]["joinState"] != joinState: + errormsg = ( + "[DUT %s]: Verifying iif " + "(Inbound Interface) for (%s,%s) and" + " joinState :%s [FAILED]!! " + " Expected: %s, Found: %s" + % ( + dut, + src_address, + grp_addr, + group_addr_json[src_address]["joinState"], + in_interface, + group_addr_json[src_address]["inboundInterface"], + ) + ) + return errormsg + + logger.info( + "[DUT %s]: Verifying iif(Inbound Interface)" + " for (%s,%s) and joinState is %s [PASSED]!! " + " Found Expected: (%s)", + dut, + src_address, + grp_addr, + group_addr_json[src_address]["joinState"], + group_addr_json[src_address]["inboundInterface"], + ) + if not found: + errormsg = ( + "[DUT %s]: Verifying iif " + "(Inbound Interface) for (%s, %s) " + "[FAILED]!! " + " Expected: %s, Found: %s" + % ( + dut, + src_address, + grp_addr, + in_interface, + group_addr_json[src_address]["inboundInterface"], + ) + ) + return errormsg + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return True + + +@retry(attempts=6, wait=2, return_is_str=True) +def verify_join_state_and_timer(tgen, dut, iif, src_address, group_addresses): + """ + Verify join state is updated correctly and join timer is + running with the help of "show ip pim upstream" cli + + Parameters + ---------- + * `tgen`: topogen object + * `dut`: device under test + * `iif`: inbound interface + * `src_address`: source address + * `group_addresses`: IGMP group address + + Usage + ----- + dut = "r1" + iif = "r1-r0-eth0" + group_address = "225.1.1.1" + result = verify_join_state_and_timer(tgen, dut, iif, group_address) + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + errormsg = "" + + if dut not in tgen.routers(): + return False + + rnode = tgen.routers()[dut] + + logger.info( + "[DUT: %s]: Verifying Join state and Join Timer" " for IGMP groups received:", + dut, + ) + show_ip_pim_upstream_json = run_frr_cmd( + rnode, "show ip pim upstream json", isjson=True + ) + + if type(group_addresses) is not list: + group_addresses = [group_addresses] + + for grp_addr in group_addresses: + # Verify group address + if grp_addr not in show_ip_pim_upstream_json: + errormsg = "[DUT %s]: Verifying upstream" " for group %s [FAILED]!!" % ( + dut, + grp_addr, + ) + return errormsg + + group_addr_json = show_ip_pim_upstream_json[grp_addr] + + # Verify source address + if src_address not in group_addr_json: + errormsg = "[DUT %s]: Verifying upstream" " for (%s,%s) [FAILED]!!" % ( + dut, + src_address, + grp_addr, + ) + return errormsg + + # Verify join state + joinState = group_addr_json[src_address]["joinState"] + if joinState != "Joined": + error = ( + "[DUT %s]: Verifying join state for" + " (%s,%s) [FAILED]!! " + " Expected: %s, Found: %s" + % (dut, src_address, grp_addr, "Joined", joinState) + ) + errormsg = errormsg + "\n" + str(error) + else: + logger.info( + "[DUT %s]: Verifying join state for" + " (%s,%s) [PASSED]!! " + " Found Expected: %s", + dut, + src_address, + grp_addr, + joinState, + ) + + # Verify join timer + joinTimer = group_addr_json[src_address]["joinTimer"] + if not re.match(r"(\d{2}):(\d{2}):(\d{2})", joinTimer): + error = ( + "[DUT %s]: Verifying join timer for" + " (%s,%s) [FAILED]!! " + " Expected: %s, Found: %s", + dut, + src_address, + grp_addr, + "join timer should be running", + joinTimer, + ) + errormsg = errormsg + "\n" + str(error) + else: + logger.info( + "[DUT %s]: Verifying join timer is running" + " for (%s,%s) [PASSED]!! " + " Found Expected: %s", + dut, + src_address, + grp_addr, + joinTimer, + ) + + if errormsg != "": + return errormsg + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return True + + +@retry(attempts=41, wait=2, return_is_dict=True) +def verify_ip_mroutes( + tgen, dut, src_address, group_addresses, iif, oil, return_uptime=False, mwait=0 +): + """ + Verify ip mroutes and make sure (*, G)/(S, G) is present in mroutes + by running "show ip pim upstream" cli + + Parameters + ---------- + * `tgen`: topogen object + * `dut`: device under test + * `src_address`: source address + * `group_addresses`: IGMP group address + * `iif`: Incoming interface + * `oil`: Outgoing interface + * `return_uptime`: If True, return uptime dict, default is False + * `mwait`: Wait time, default is 0 + + + Usage + ----- + dut = "r1" + group_address = "225.1.1.1" + result = verify_ip_mroutes(tgen, dut, src_address, group_address) + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + if dut not in tgen.routers(): + return False + + rnode = tgen.routers()[dut] + + if return_uptime: + logger.info("Sleeping for %s sec..", mwait) + sleep(mwait) + + logger.info("[DUT: %s]: Verifying ip mroutes", dut) + show_ip_mroute_json = run_frr_cmd(rnode, "show ip mroute json", isjson=True) + + if return_uptime: + uptime_dict = {} + + if bool(show_ip_mroute_json) == False: + error_msg = "[DUT %s]: mroutes are not present or flushed out !!" % (dut) + return error_msg + + if not isinstance(group_addresses, list): + group_addresses = [group_addresses] + + if not isinstance(iif, list) and iif is not "none": + iif = [iif] + + if not isinstance(oil, list) and oil is not "none": + oil = [oil] + + for grp_addr in group_addresses: + if grp_addr not in show_ip_mroute_json: + errormsg = "[DUT %s]: Verifying (%s, %s) mroute," "[FAILED]!! " % ( + dut, + src_address, + grp_addr, + ) + return errormsg + else: + if return_uptime: + uptime_dict[grp_addr] = {} + + group_addr_json = show_ip_mroute_json[grp_addr] + + if src_address not in group_addr_json: + errormsg = "[DUT %s]: Verifying (%s, %s) mroute," "[FAILED]!! " % ( + dut, + src_address, + grp_addr, + ) + return errormsg + else: + if return_uptime: + uptime_dict[grp_addr][src_address] = {} + + mroutes = group_addr_json[src_address] + + if mroutes["installed"] != 0: + logger.info( + "[DUT %s]: mroute (%s,%s) is installed", dut, src_address, grp_addr + ) + + if "oil" not in mroutes: + if oil == "none" and mroutes["iif"] in iif: + logger.info( + "[DUT %s]: Verifying (%s, %s) mroute," + " [PASSED]!! Found Expected: " + "(iif: %s, oil: %s, installed: (%s,%s))", + dut, + src_address, + grp_addr, + mroutes["iif"], + oil, + src_address, + grp_addr, + ) + else: + errormsg = ( + "[DUT %s]: Verifying (%s, %s) mroute," + " [FAILED]!! " + "Expected: (oil: %s, installed:" + " (%s,%s)) Found: ( oil: none, " + "installed: (%s,%s))" + % ( + dut, + src_address, + grp_addr, + oil, + src_address, + grp_addr, + src_address, + grp_addr, + ) + ) + + return errormsg + + else: + found = False + for route, data in mroutes["oil"].items(): + if route in oil and route != "pimreg": + if ( + data["source"] == src_address + and data["group"] == grp_addr + and data["inboundInterface"] in iif + and data["outboundInterface"] in oil + ): + if return_uptime: + + uptime_dict[grp_addr][src_address] = data["upTime"] + + logger.info( + "[DUT %s]: Verifying (%s, %s)" + " mroute, [PASSED]!! " + "Found Expected: " + "(iif: %s, oil: %s, installed:" + " (%s,%s)", + dut, + src_address, + grp_addr, + data["inboundInterface"], + data["outboundInterface"], + data["source"], + data["group"], + ) + found = True + break + else: + continue + + if not found: + errormsg = ( + "[DUT %s]: Verifying (%s, %s)" + " mroute [FAILED]!! " + "Expected in: (iif: %s, oil: %s," + " installed: (%s,%s)) Found: " + "(iif: %s, oil: %s, " + "installed: (%s,%s))" + % ( + dut, + src_address, + grp_addr, + iif, + oil, + src_address, + grp_addr, + data["inboundInterface"], + data["outboundInterface"], + data["source"], + data["group"], + ) + ) + return errormsg + + else: + errormsg = "[DUT %s]: mroute (%s,%s) is not installed" % ( + dut, + src_address, + grp_addr, + ) + return errormsg + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return True if return_uptime == False else uptime_dict + + +@retry(attempts=31, wait=2, return_is_str=True) +def verify_pim_rp_info( + tgen, topo, dut, group_addresses, oif=None, rp=None, source=None, iamrp=None +): + """ + Verify pim rp info by running "show ip pim rp-info" cli + + Parameters + ---------- + * `tgen`: topogen object + * `topo`: JSON file handler + * `dut`: device under test + * `group_addresses`: IGMP group address + * `oif`: outbound interface name + * `rp`: RP address + * `source`: Source of RP + * `iamrp`: User defined RP + + Usage + ----- + dut = "r1" + result = verify_pim_rp_info(tgen, topo, dut, group_address, + rp=rp, source="BSR") + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + if dut not in tgen.routers(): + return False + + rnode = tgen.routers()[dut] + + logger.info("[DUT: %s]: Verifying ip rp info", dut) + show_ip_rp_info_json = run_frr_cmd(rnode, "show ip pim rp-info json", isjson=True) + + if type(group_addresses) is not list: + group_addresses = [group_addresses] + + if type(oif) is not list: + oif = [oif] + + for grp_addr in group_addresses: + if rp is None: + rp_details = find_rp_details(tgen, topo) + + if dut in rp_details: + iamRP = True + else: + iamRP = False + else: + show_ip_route_json = run_frr_cmd( + rnode, "show ip route connected json", isjson=True + ) + for _rp in show_ip_route_json.keys(): + if rp == _rp.split("/")[0]: + iamRP = True + break + else: + iamRP = False + + if rp not in show_ip_rp_info_json: + errormsg = "[DUT %s]: Verifying rp-info" "for rp_address %s [FAILED]!! " % ( + dut, + rp, + ) + return errormsg + else: + group_addr_json = show_ip_rp_info_json[rp] + + for rp_json in group_addr_json: + if oif is not None: + found = False + if rp_json["outboundInterface"] not in oif: + errormsg = ( + "[DUT %s]: Verifying OIF " + "for group %s and RP %s [FAILED]!! " + "Expected interfaces: (%s)," + " Found: (%s)" + % (dut, grp_addr, rp, oif, rp_json["outboundInterface"]) + ) + return errormsg + + logger.info( + "[DUT %s]: Verifying OIF " + "for group %s and RP %s [PASSED]!! " + "Found Expected: (%s)" + % (dut, grp_addr, rp, rp_json["outboundInterface"]) + ) + + if source is not None: + if rp_json["source"] != source: + errormsg = ( + "[DUT %s]: Verifying SOURCE " + "for group %s and RP %s [FAILED]!! " + "Expected: (%s)," + " Found: (%s)" % (dut, grp_addr, rp, source, rp_json["source"]) + ) + return errormsg + + logger.info( + "[DUT %s]: Verifying SOURCE " + "for group %s and RP %s [PASSED]!! " + "Found Expected: (%s)" % (dut, grp_addr, rp, rp_json["source"]) + ) + + if rp_json["group"] == grp_addr and iamrp is not None: + if iamRP: + if rp_json["iAmRP"]: + logger.info( + "[DUT %s]: Verifying group " + "and iAmRP [PASSED]!!" + " Found Expected: (%s, %s:%s)", + dut, + grp_addr, + "iAmRP", + rp_json["iAmRP"], + ) + else: + errormsg = ( + "[DUT %s]: Verifying group" + "%s and iAmRP [FAILED]!! " + "Expected: (iAmRP: %s)," + " Found: (iAmRP: %s)" + % (dut, grp_addr, "true", rp_json["iAmRP"]) + ) + return errormsg + + if not iamRP: + if rp_json["iAmRP"] == False: + logger.info( + "[DUT %s]: Verifying group " + "and iAmNotRP [PASSED]!!" + " Found Expected: (%s, %s:%s)", + dut, + grp_addr, + "iAmRP", + rp_json["iAmRP"], + ) + else: + errormsg = ( + "[DUT %s]: Verifying group" + "%s and iAmRP [FAILED]!! " + "Expected: (iAmRP: %s)," + " Found: (iAmRP: %s)" + % (dut, grp_addr, "false", rp_json["iAmRP"]) + ) + return errormsg + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return True + + +@retry(attempts=31, wait=2, return_is_str=True) +def verify_pim_state( + tgen, dut, iif, oil, group_addresses, src_address=None, installed_fl=None +): + """ + Verify pim state by running "show ip pim state" cli + + Parameters + ---------- + * `tgen`: topogen object + * `dut`: device under test + * `iif`: inbound interface + * `oil`: outbound interface + * `group_addresses`: IGMP group address + * `src_address`: source address, default = None + * installed_fl` : Installed flag + + Usage + ----- + dut = "r1" + iif = "r1-r3-eth1" + oil = "r1-r0-eth0" + group_address = "225.1.1.1" + result = verify_pim_state(tgen, dut, iif, oil, group_address) + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + if dut not in tgen.routers(): + return False + + rnode = tgen.routers()[dut] + + logger.info("[DUT: %s]: Verifying pim state", dut) + show_pim_state_json = run_frr_cmd(rnode, "show ip pim state json", isjson=True) + + if installed_fl is None: + installed_fl = 1 + + if type(group_addresses) is not list: + group_addresses = [group_addresses] + + for grp_addr in group_addresses: + if src_address is None: + src_address = "*" + pim_state_json = show_pim_state_json[grp_addr][src_address] + else: + pim_state_json = show_pim_state_json[grp_addr][src_address] + + if pim_state_json["Installed"] == installed_fl: + logger.info( + "[DUT %s]: group %s is installed flag: %s", + dut, + grp_addr, + pim_state_json["Installed"], + ) + for interface, data in pim_state_json[iif].items(): + if interface != oil: + continue + + # Verify iif, oil and installed state + if ( + data["group"] == grp_addr + and data["installed"] == installed_fl + and data["inboundInterface"] == iif + and data["outboundInterface"] == oil + ): + logger.info( + "[DUT %s]: Verifying pim state for group" + " %s [PASSED]!! Found Expected: " + "(iif: %s, oil: %s, installed: %s) ", + dut, + grp_addr, + data["inboundInterface"], + data["outboundInterface"], + data["installed"], + ) + else: + errormsg = ( + "[DUT %s]: Verifying pim state for group" + " %s, [FAILED]!! Expected: " + "(iif: %s, oil: %s, installed: %s) ", + "Found: (iif: %s, oil: %s, installed: %s)" + % ( + dut, + grp_addr, + iif, + oil, + "1", + data["inboundInterface"], + data["outboundInterface"], + data["installed"], + ), + ) + return errormsg + else: + errormsg = "[DUT %s]: %s install flag value not as expected" % ( + dut, + grp_addr, + ) + return errormsg + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return True + + +def verify_pim_interface_traffic(tgen, input_dict): + """ + Verify ip pim interface traffice by running + "show ip pim interface traffic" cli + + Parameters + ---------- + * `tgen`: topogen object + * `input_dict(dict)`: defines DUT, what and from which interfaces + traffic needs to be verified + Usage + ----- + input_dict = { + "r1": { + "r1-r0-eth0": { + "helloRx": 0, + "helloTx": 1, + "joinRx": 0, + "joinTx": 0 + } + } + } + + result = verify_pim_interface_traffic(tgen, input_dict) + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + output_dict = {} + for dut in input_dict.keys(): + if dut not in tgen.routers(): + continue + + rnode = tgen.routers()[dut] + + logger.info("[DUT: %s]: Verifying pim interface traffic", dut) + show_pim_intf_traffic_json = run_frr_cmd( + rnode, "show ip pim interface traffic json", isjson=True + ) + + output_dict[dut] = {} + for intf, data in input_dict[dut].items(): + interface_json = show_pim_intf_traffic_json[intf] + for state in data: + + # Verify Tx/Rx + if state in interface_json: + output_dict[dut][state] = interface_json[state] + else: + errormsg = ( + "[DUT %s]: %s is not present" + "for interface %s [FAILED]!! " % (dut, state, intf) + ) + return errormsg + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return output_dict + + +@retry(attempts=21, wait=2, return_is_str=True) +def verify_pim_interface(tgen, topo, dut, interface=None, interface_ip=None): + """ + Verify all PIM interface are up and running, config is verified + using "show ip pim interface" cli + + Parameters + ---------- + * `tgen`: topogen object + * `topo` : json file data + * `dut` : device under test + * `interface` : interface name + * `interface_ip` : interface ip address + + Usage + ----- + result = verify_pim_interfacetgen, topo, dut, interface=ens192, interface_ip=20.1.1.1) + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + for router in tgen.routers(): + if router != dut: + continue + + logger.info("[DUT: %s]: Verifying PIM interface status:", dut) + + rnode = tgen.routers()[dut] + show_ip_pim_interface_json = rnode.\ + vtysh_cmd("show ip pim interface json", isjson=True) + + logger.info("show_ip_pim_interface_json: \n %s", + show_ip_pim_interface_json) + + if interface_ip: + if interface in show_ip_pim_interface_json: + pim_intf_json = show_ip_pim_interface_json[interface] + if pim_intf_json["address"] != interface_ip: + errormsg = ("[DUT %s]: PIM interface " + "ip is not correct " + "[FAILED]!! Expected : %s, Found : %s" + %(dut, pim_intf_json["address"],interface_ip)) + return errormsg + else: + logger.info("[DUT %s]: PIM interface " + "ip is correct " + "[Passed]!! Expected : %s, Found : %s" + %(dut, pim_intf_json["address"],interface_ip)) + return True + else: + for destLink, data in topo["routers"][dut]["links"].items(): + if "type" in data and data["type"] == "loopback": + continue + + if "pim" in data and data["pim"] == "enable": + pim_interface = data["interface"] + pim_intf_ip = data["ipv4"].split("/")[0] + + if pim_interface in show_ip_pim_interface_json: + pim_intf_json = show_ip_pim_interface_json\ + [pim_interface] + + # Verifying PIM interface + if pim_intf_json["address"] != pim_intf_ip and \ + pim_intf_json["state"] != "up": + errormsg = ("[DUT %s]: PIM interface: %s " + "PIM interface ip: %s, status check " + "[FAILED]!! Expected : %s, Found : %s" + %(dut, pim_interface, pim_intf_ip, + pim_interface, pim_intf_json["state"])) + return errormsg + + logger.info("[DUT %s]: PIM interface: %s, " + "interface ip: %s, status: %s" + " [PASSED]!!", + dut, pim_interface, pim_intf_ip, + pim_intf_json["state"]) + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return True + + +def clear_ip_pim_interface_traffic(tgen, topo): + """ + Clear ip pim interface traffice by running + "clear ip pim interface traffic" cli + + Parameters + ---------- + * `tgen`: topogen object + Usage + ----- + + result = clear_ip_pim_interface_traffic(tgen, topo) + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + for dut in tgen.routers(): + if "pim" not in topo["routers"][dut]: + continue + + rnode = tgen.routers()[dut] + + logger.info("[DUT: %s]: Clearing pim interface traffic", dut) + result = run_frr_cmd(rnode, "clear ip pim interface traffic") + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + + return True + + +def clear_ip_pim_interfaces(tgen, dut): + """ + Clear ip pim interface by running + "clear ip pim interfaces" cli + + Parameters + ---------- + * `tgen`: topogen object + * `dut`: Device Under Test + Usage + ----- + + result = clear_ip_pim_interfaces(tgen, dut) + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + nh_before_clear = {} + nh_after_clear = {} + + rnode = tgen.routers()[dut] + + logger.info("[DUT: %s]: Verify pim neighbor before pim" " neighbor clear", dut) + # To add uptime initially + sleep(10) + run_json_before = run_frr_cmd(rnode, "show ip pim neighbor json", isjson=True) + + for key, value in run_json_before.items(): + if bool(value): + for _key, _value in value.items(): + nh_before_clear[key] = _value["upTime"] + + # Clearing PIM neighbors + logger.info("[DUT: %s]: Clearing pim interfaces", dut) + run_frr_cmd(rnode, "clear ip pim interfaces") + + logger.info("[DUT: %s]: Verify pim neighbor after pim" " neighbor clear", dut) + + found = False + + # Waiting for maximum 60 sec + fail_intf = [] + for retry in range(1, 13): + logger.info("[DUT: %s]: Waiting for 5 sec for PIM neighbors" " to come up", dut) + sleep(5) + run_json_after = run_frr_cmd(rnode, "show ip pim neighbor json", isjson=True) + found = True + for pim_intf in nh_before_clear.keys(): + if pim_intf not in run_json_after or not run_json_after[pim_intf]: + found = False + fail_intf.append(pim_intf) + + if found is True: + break + else: + errormsg = ( + "[DUT: %s]: pim neighborship is not formed for %s" + "after clear_ip_pim_interfaces %s [FAILED!!]", + dut, + fail_intf, + ) + return errormsg + + for key, value in run_json_after.items(): + if bool(value): + for _key, _value in value.items(): + nh_after_clear[key] = _value["upTime"] + + # Verify uptime for neighbors + for pim_intf in nh_before_clear.keys(): + d1 = datetime.datetime.strptime(nh_before_clear[pim_intf], "%H:%M:%S") + d2 = datetime.datetime.strptime(nh_after_clear[pim_intf], "%H:%M:%S") + if d2 >= d1: + errormsg = ( + "[DUT: %s]: PIM neighborship is not cleared for", + " interface %s [FAILED!!]", + dut, + pim_intf, + ) + + logger.info("[DUT: %s]: PIM neighborship is cleared [PASSED!!]") + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + + return True + + +def clear_ip_igmp_interfaces(tgen, dut): + """ + Clear ip igmp interfaces by running + "clear ip igmp interfaces" cli + + Parameters + ---------- + * `tgen`: topogen object + * `dut`: device under test + + Usage + ----- + dut = "r1" + result = clear_ip_igmp_interfaces(tgen, dut) + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + group_before_clear = {} + group_after_clear = {} + + rnode = tgen.routers()[dut] + + logger.info("[DUT: %s]: IGMP group uptime before clear" " igmp groups:", dut) + igmp_json = run_frr_cmd(rnode, "show ip igmp groups json", isjson=True) + + total_groups_before_clear = igmp_json["totalGroups"] + + for key, value in igmp_json.items(): + if type(value) is not dict: + continue + + groups = value["groups"] + group = groups[0]["group"] + uptime = groups[0]["uptime"] + group_before_clear[group] = uptime + + logger.info("[DUT: %s]: Clearing ip igmp interfaces", dut) + result = run_frr_cmd(rnode, "clear ip igmp interfaces") + + # Waiting for maximum 60 sec + for retry in range(1, 13): + logger.info( + "[DUT: %s]: Waiting for 5 sec for igmp interfaces" " to come up", dut + ) + sleep(5) + igmp_json = run_frr_cmd(rnode, "show ip igmp groups json", isjson=True) + + total_groups_after_clear = igmp_json["totalGroups"] + + if total_groups_before_clear == total_groups_after_clear: + break + + for key, value in igmp_json.items(): + if type(value) is not dict: + continue + + groups = value["groups"] + group = groups[0]["group"] + uptime = groups[0]["uptime"] + group_after_clear[group] = uptime + + # Verify uptime for groups + for group in group_before_clear.keys(): + d1 = datetime.datetime.strptime(group_before_clear[group], "%H:%M:%S") + d2 = datetime.datetime.strptime(group_after_clear[group], "%H:%M:%S") + if d2 >= d1: + errormsg = ("[DUT: %s]: IGMP group is not cleared", " [FAILED!!]", dut) + + logger.info("[DUT: %s]: IGMP group is cleared [PASSED!!]") + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + + return True + + +@retry(attempts=10, wait=2, return_is_str=True) +def clear_ip_mroute_verify(tgen, dut): + """ + Clear ip mroute by running "clear ip mroute" cli and verify + mroutes are up again after mroute clear + + Parameters + ---------- + * `tgen`: topogen object + * `dut`: Device Under Test + Usage + ----- + + result = clear_ip_mroute_verify(tgen, dut) + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + mroute_before_clear = {} + mroute_after_clear = {} + + rnode = tgen.routers()[dut] + + logger.info("[DUT: %s]: IP mroutes uptime before clear", dut) + mroute_json_1 = run_frr_cmd(rnode, "show ip mroute json", isjson=True) + + for group in mroute_json_1.keys(): + mroute_before_clear[group] = {} + for key in mroute_json_1[group].keys(): + for _key, _value in mroute_json_1[group][key]["oil"].items(): + if _key != "pimreg": + mroute_before_clear[group][key] = _value["upTime"] + + logger.info("[DUT: %s]: Clearing ip mroute", dut) + result = run_frr_cmd(rnode, "clear ip mroute") + + # RFC 3376: 8.2. Query Interval - Default: 125 seconds + # So waiting for maximum 130 sec to get the igmp report + for retry in range(1, 26): + logger.info("[DUT: %s]: Waiting for 2 sec for mroutes" " to come up", dut) + sleep(5) + keys_json1 = mroute_json_1.keys() + mroute_json_2 = run_frr_cmd(rnode, "show ip mroute json", isjson=True) + + if bool(mroute_json_2): + keys_json2 = mroute_json_2.keys() + + for group in mroute_json_2.keys(): + flag = False + for key in mroute_json_2[group].keys(): + if "oil" not in mroute_json_2[group]: + continue + + for _key, _value in mroute_json_2[group][key]["oil"].items(): + if _key != "pimreg" and keys_json1 == keys_json2: + break + flag = True + if flag: + break + else: + continue + + for group in mroute_json_2.keys(): + mroute_after_clear[group] = {} + for key in mroute_json_2[group].keys(): + for _key, _value in mroute_json_2[group][key]["oil"].items(): + if _key != "pimreg": + mroute_after_clear[group][key] = _value["upTime"] + + # Verify uptime for mroute + for group in mroute_before_clear.keys(): + for source in mroute_before_clear[group].keys(): + if set(mroute_before_clear[group]) != set(mroute_after_clear[group]): + errormsg = ( + "[DUT: %s]: mroute (%s, %s) has not come" + " up after mroute clear [FAILED!!]" % (dut, source, group) + ) + return errormsg + + d1 = datetime.datetime.strptime( + mroute_before_clear[group][source], "%H:%M:%S" + ) + d2 = datetime.datetime.strptime( + mroute_after_clear[group][source], "%H:%M:%S" + ) + if d2 >= d1: + errormsg = "[DUT: %s]: IP mroute is not cleared" " [FAILED!!]" % (dut) + + logger.info("[DUT: %s]: IP mroute is cleared [PASSED!!]", dut) + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + + return True + + +def clear_ip_mroute(tgen, dut=None): + """ + Clear ip mroute by running "clear ip mroute" cli + + Parameters + ---------- + * `tgen`: topogen object + * `dut`: device under test, default None + + Usage + ----- + clear_ip_mroute(tgen, dut) + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + router_list = tgen.routers() + for router, rnode in router_list.items(): + if dut is not None and router != dut: + continue + + logger.debug("[DUT: %s]: Clearing ip mroute", router) + rnode.vtysh_cmd("clear ip mroute") + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + + +def reconfig_interfaces(tgen, topo, senderRouter, receiverRouter, packet=None): + """ + Configure interface ip for sender and receiver routers + as per bsr packet + + Parameters + ---------- + * `tgen` : Topogen object + * `topo` : json file data + * `senderRouter` : Sender router + * `receiverRouter` : Receiver router + * `packet` : BSR packet in raw format + + Returns + ------- + True or False + """ + result = False + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + try: + config_data = [] + + src_ip = topo["routers"][senderRouter]["bsm"]["bsr_packets"][packet]["src_ip"] + dest_ip = topo["routers"][senderRouter]["bsm"]["bsr_packets"][packet]["dest_ip"] + + for destLink, data in topo["routers"][senderRouter]["links"].items(): + if "type" in data and data["type"] == "loopback": + continue + + if "pim" in data and data["pim"] == "enable": + sender_interface = data["interface"] + sender_interface_ip = data["ipv4"] + + config_data.append("interface {}".format(sender_interface)) + config_data.append("no ip address {}".format(sender_interface_ip)) + config_data.append("ip address {}".format(src_ip)) + + result = create_common_configuration( + tgen, senderRouter, config_data, "interface_config" + ) + if result is not True: + return False + + config_data = [] + links = topo["routers"][destLink]["links"] + pim_neighbor = {key: links[key] for key in [senderRouter]} + + data = pim_neighbor[senderRouter] + if "type" in data and data["type"] == "loopback": + continue + + if "pim" in data and data["pim"] == "enable": + receiver_interface = data["interface"] + receiver_interface_ip = data["ipv4"] + + config_data.append("interface {}".format(receiver_interface)) + config_data.append("no ip address {}".format(receiver_interface_ip)) + config_data.append("ip address {}".format(dest_ip)) + + result = create_common_configuration( + tgen, receiverRouter, config_data, "interface_config" + ) + if result is not True: + return False + + except InvalidCLIError: + # Traceback + errormsg = traceback.format_exc() + logger.error(errormsg) + return errormsg + + logger.debug("Exiting lib API: reconfig_interfaces()") + return result + + +def add_rp_interfaces_and_pim_config(tgen, topo, interface, rp, rp_mapping): + """ + Add physical interfaces tp RP for all the RPs + + Parameters + ---------- + * `tgen` : Topogen object + * `topo` : json file data + * `interface` : RP interface + * `rp` : rp for given topology + * `rp_mapping` : dictionary of all groups and RPs + + Returns + ------- + True or False + """ + result = False + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + try: + config_data = [] + + for group, rp_list in rp_mapping.items(): + for _rp in rp_list: + config_data.append("interface {}".format(interface)) + config_data.append("ip address {}".format(_rp)) + config_data.append("ip pim") + + result = create_common_configuration( + tgen, rp, config_data, "interface_config" + ) + if result is not True: + return False + + except InvalidCLIError: + # Traceback + errormsg = traceback.format_exc() + logger.error(errormsg) + return errormsg + + logger.debug("Exiting lib API: add_rp_interfaces_and_pim_config()") + return result + + +def scapy_send_bsr_raw_packet( + tgen, topo, senderRouter, receiverRouter, packet=None, interval=1, count=1 +): + """ + Using scapy Raw() method to send BSR raw packet from one FRR + to other + + Parameters: + ----------- + * `tgen` : Topogen object + * `topo` : json file data + * `senderRouter` : Sender router + * `receiverRouter` : Receiver router + * `packet` : BSR packet in raw format + * `interval` : Interval between the packets + * `count` : Number of packets to be sent + + returns: + -------- + errormsg or True + """ + + global CWD + result = "" + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + rnode = tgen.routers()[senderRouter] + + for destLink, data in topo["routers"][senderRouter]["links"].items(): + if "type" in data and data["type"] == "loopback": + continue + + if "pim" in data and data["pim"] == "enable": + sender_interface = data["interface"] + + packet = topo["routers"][senderRouter]["bsm"]["bsr_packets"][packet]["data"] + + if interval > 1 or count > 1: + cmd = ( + "nohup /usr/bin/python {}/send_bsr_packet.py '{}' '{}' " + "--interval={} --count={} &".format( + CWD, packet, sender_interface, interval, count + ) + ) + else: + cmd = ( + "/usr/bin/python {}/send_bsr_packet.py '{}' '{}' " + "--interval={} --count={}".format( + CWD, packet, sender_interface, interval, count + ) + ) + + logger.info("Scapy cmd: \n %s", cmd) + result = rnode.run(cmd) + + if result == "": + return result + + logger.debug("Exiting lib API: scapy_send_bsr_raw_packet") + return True + + +def find_rp_from_bsrp_info(tgen, dut, bsr, grp=None): + """ + Find which RP is having lowest prioriy and returns rp IP + + Parameters + ---------- + * `tgen`: topogen object + * `dut`: device under test + * `bsr`: BSR address + * 'grp': Group Address + + Usage + ----- + dut = "r1" + result = verify_pim_rp_info(tgen, dut, bsr) + + Returns: + dictionary: group and RP, which has to be installed as per + lowest priority or highest priority + """ + + rp_details = {} + rnode = tgen.routers()[dut] + + logger.info("[DUT: %s]: Fetching rp details from bsrp-info", dut) + bsrp_json = run_frr_cmd(rnode, "show ip pim bsrp-info json", isjson=True) + + if grp not in bsrp_json: + return {} + + for group, rp_data in bsrp_json.items(): + if group == "BSR Address" and bsrp_json["BSR Address"] == bsr: + continue + + if group != grp: + continue + + rp_priority = {} + rp_hash = {} + + for rp, value in rp_data.items(): + if rp == "Pending RP count": + continue + rp_priority[value["Rp Address"]] = value["Rp Priority"] + rp_hash[value["Rp Address"]] = value["Hash Val"] + + priority_dict = dict(zip(rp_priority.values(), rp_priority.keys())) + hash_dict = dict(zip(rp_hash.values(), rp_hash.keys())) + + # RP with lowest priority + if len(priority_dict) != 1: + rp_p, lowest_priority = sorted(rp_priority.items(), key=lambda x: x[1])[0] + rp_details[group] = rp_p + + # RP with highest hash value + if len(priority_dict) == 1: + rp_h, highest_hash = sorted(rp_hash.items(), key=lambda x: x[1])[-1] + rp_details[group] = rp_h + + # RP with highest IP address + if len(priority_dict) == 1 and len(hash_dict) == 1: + rp_details[group] = sorted(rp_priority.keys())[-1] + + return rp_details + + +@retry(attempts=6, wait=2, return_is_str=True) +def verify_pim_grp_rp_source(tgen, topo, dut, grp_addr, rp_source, rpadd=None): + """ + Verify pim rp info by running "show ip pim rp-info" cli + + Parameters + ---------- + * `tgen`: topogen object + * `topo`: JSON file handler + * `dut`: device under test + * `grp_addr`: IGMP group address + * 'rp_source': source from which rp installed + * 'rpadd': rp address + + Usage + ----- + dut = "r1" + group_address = "225.1.1.1" + rp_source = "BSR" + result = verify_pim_rp_and_source(tgen, topo, dut, group_address, rp_source) + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + if dut not in tgen.routers(): + return False + + rnode = tgen.routers()[dut] + + logger.info("[DUT: %s]: Verifying ip rp info", dut) + show_ip_rp_info_json = run_frr_cmd(rnode, "show ip pim rp-info json", isjson=True) + + if rpadd != None: + rp_json = show_ip_rp_info_json[rpadd] + if rp_json[0]["group"] == grp_addr: + if rp_json[0]["source"] == rp_source: + logger.info( + "[DUT %s]: Verifying Group and rp_source [PASSED]" + "Found Expected: %s, %s" + % (dut, rp_json[0]["group"], rp_json[0]["source"]) + ) + return True + else: + errormsg = ( + "[DUT %s]: Verifying Group and rp_source [FAILED]" + "Expected (%s, %s) " + "Found (%s, %s)" + % ( + dut, + grp_addr, + rp_source, + rp_json[0]["group"], + rp_json[0]["source"], + ) + ) + return errormsg + errormsg = ( + "[DUT %s]: Verifying Group and rp_source [FAILED]" + "Expected: %s, %s but not found" % (dut, grp_addr, rp_source) + ) + return errormsg + + for rp in show_ip_rp_info_json: + rp_json = show_ip_rp_info_json[rp] + logger.info("%s", rp_json) + if rp_json[0]["group"] == grp_addr: + if rp_json[0]["source"] == rp_source: + logger.info( + "[DUT %s]: Verifying Group and rp_source [PASSED]" + "Found Expected: %s, %s" + % (dut, rp_json[0]["group"], rp_json[0]["source"]) + ) + return True + else: + errormsg = ( + "[DUT %s]: Verifying Group and rp_source [FAILED]" + "Expected (%s, %s) " + "Found (%s, %s)" + % ( + dut, + grp_addr, + rp_source, + rp_json[0]["group"], + rp_json[0]["source"], + ) + ) + return errormsg + + errormsg = ( + "[DUT %s]: Verifying Group and rp_source [FAILED]" + "Expected: %s, %s but not found" % (dut, grp_addr, rp_source) + ) + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + + return errormsg + + +@retry(attempts=31, wait=2, return_is_str=True) +def verify_pim_bsr(tgen, topo, dut, bsr_ip): + """ + Verify all PIM interface are up and running, config is verified + using "show ip pim interface" cli + + Parameters + ---------- + * `tgen`: topogen object + * `topo` : json file data + * `dut` : device under test + * 'bsr' : bsr ip to be verified + + Usage + ----- + result = verify_pim_bsr(tgen, topo, dut, bsr_ip) + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + for router in tgen.routers(): + if router != dut: + continue + + logger.info("[DUT: %s]: Verifying PIM bsr status:", dut) + + rnode = tgen.routers()[dut] + pim_bsr_json = rnode.vtysh_cmd("show ip pim bsr json", isjson=True) + + logger.info("show_ip_pim_bsr_json: \n %s", pim_bsr_json) + + # Verifying PIM bsr + if pim_bsr_json["bsr"] != bsr_ip: + errormsg = ( + "[DUT %s]:" + "bsr status: not found" + "[FAILED]!! Expected : %s, Found : %s" + % (dut, bsr_ip, pim_bsr_json["bsr"]) + ) + return errormsg + + logger.info( + "[DUT %s]:" " bsr status: found, Address :%s" " [PASSED]!!", + dut, + pim_bsr_json["bsr"], + ) + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return True + + +@retry(attempts=31, wait=2, return_is_str=True) +def verify_ip_pim_upstream_rpf(tgen, topo, dut, interface, group_addresses, rp=None): + """ + Verify IP PIM upstream rpf, config is verified + using "show ip pim neighbor" cli + + Parameters + ---------- + * `tgen`: topogen object + * `topo` : json file data + * `dut` : devuce under test + * `interface` : upstream interface + * `group_addresses` : list of group address for which upstream info + needs to be checked + * `rp` : RP address + + Usage + ----- + result = verify_ip_pim_upstream_rpf(gen, topo, dut, interface, + group_addresses, rp=None) + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + if "pim" in topo["routers"][dut]: + + logger.info("[DUT: %s]: Verifying ip pim upstream rpf:", dut) + + rnode = tgen.routers()[dut] + show_ip_pim_upstream_rpf_json = rnode.vtysh_cmd( + "show ip pim upstream-rpf json", isjson=True + ) + + logger.info( + "show_ip_pim_upstream_rpf_json: \n %s", show_ip_pim_upstream_rpf_json + ) + + if type(group_addresses) is not list: + group_addresses = [group_addresses] + + for grp_addr in group_addresses: + for destLink, data in topo["routers"][dut]["links"].items(): + if "type" in data and data["type"] == "loopback": + continue + + if "pim" not in topo["routers"][destLink]: + continue + + # Verify RP info + if rp is None: + rp_details = find_rp_details(tgen, topo) + else: + rp_details = {dut: ip} + rp_details[dut] = rp + + if dut in rp_details: + pim_nh_intf_ip = topo["routers"][dut]["links"]["lo"]["ipv4"].split( + "/" + )[0] + else: + if destLink not in interface: + continue + + links = topo["routers"][destLink]["links"] + pim_neighbor = {key: links[key] for key in [dut]} + + data = pim_neighbor[dut] + if "pim" in data and data["pim"] == "enable": + pim_nh_intf_ip = data["ipv4"].split("/")[0] + + upstream_rpf_json = show_ip_pim_upstream_rpf_json[grp_addr]["*"] + + # Verifying ip pim upstream rpf + if ( + upstream_rpf_json["rpfInterface"] == interface + and upstream_rpf_json["ribNexthop"] != pim_nh_intf_ip + ): + errormsg = ( + "[DUT %s]: Verifying group: %s, " + "rpf interface: %s, " + " rib Nexthop check [FAILED]!!" + "Expected: %s, Found: %s" + % ( + dut, + grp_addr, + interface, + pim_nh_intf_ip, + upstream_rpf_json["ribNexthop"], + ) + ) + return errormsg + + logger.info( + "[DUT %s]: Verifying group: %s," + " rpf interface: %s, " + " rib Nexthop: %s [PASSED]!!", + dut, + grp_addr, + interface, + pim_nh_intf_ip, + ) + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return True + + +def enable_disable_pim_unicast_bsm(tgen, router, intf, enable=True): + """ + Helper API to enable or disable pim bsm on interfaces + + Parameters + ---------- + * `tgen` : Topogen object + * `router` : router id to be configured. + * `intf` : Interface to be configured + * `enable` : this flag denotes if config should be enabled or disabled + + Returns + ------- + True or False + """ + result = False + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + try: + config_data = [] + cmd = "interface {}".format(intf) + config_data.append(cmd) + + if enable == True: + config_data.append("ip pim unicast-bsm") + else: + config_data.append("no ip pim unicast-bsm") + + result = create_common_configuration( + tgen, router, config_data, "interface_config", build=False + ) + if result is not True: + return False + + except InvalidCLIError: + # Traceback + errormsg = traceback.format_exc() + logger.error(errormsg) + return errormsg + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return result + + +def enable_disable_pim_bsm(tgen, router, intf, enable=True): + """ + Helper API to enable or disable pim bsm on interfaces + + Parameters + ---------- + * `tgen` : Topogen object + * `router` : router id to be configured. + * `intf` : Interface to be configured + * `enable` : this flag denotes if config should be enabled or disabled + + Returns + ------- + True or False + """ + result = False + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + try: + config_data = [] + cmd = "interface {}".format(intf) + config_data.append(cmd) + + if enable is True: + config_data.append("ip pim bsm") + else: + config_data.append("no ip pim bsm") + + result = create_common_configuration( + tgen, router, config_data, "interface_config", build=False + ) + if result is not True: + return False + + except InvalidCLIError: + # Traceback + errormsg = traceback.format_exc() + logger.error(errormsg) + return errormsg + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return result + + +@retry(attempts=31, wait=2, return_is_str=True) +def verify_ip_pim_join(tgen, topo, dut, interface, group_addresses, src_address=None): + """ + Verify ip pim join by running "show ip pim join" cli + + Parameters + ---------- + * `tgen`: topogen object + * `topo`: JSON file handler + * `dut`: device under test + * `interface`: interface name, from which PIM join would come + * `group_addresses`: IGMP group address + * `src_address`: Source address + + Usage + ----- + dut = "r1" + interface = "r1-r0-eth0" + group_address = "225.1.1.1" + result = verify_ip_pim_join(tgen, dut, star, group_address, interface) + + Returns + ------- + errormsg(str) or True + """ + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + if dut not in tgen.routers(): + return False + + rnode = tgen.routers()[dut] + + logger.info("[DUT: %s]: Verifying pim join", dut) + show_pim_join_json = run_frr_cmd(rnode, "show ip pim join json", isjson=True) + + if type(group_addresses) is not list: + group_addresses = [group_addresses] + + for grp_addr in group_addresses: + # Verify if IGMP is enabled in DUT + if "igmp" not in topo["routers"][dut]: + pim_join = True + else: + pim_join = False + + interface_json = show_pim_join_json[interface] + + grp_addr = grp_addr.split("/")[0] + for source, data in interface_json[grp_addr].items(): + + # Verify pim join + if pim_join: + if data["group"] == grp_addr and data["channelJoinName"] == "JOIN": + logger.info( + "[DUT %s]: Verifying pim join for group: %s" + "[PASSED]!! Found Expected: (%s)", + dut, + grp_addr, + data["channelJoinName"], + ) + else: + errormsg = ( + "[DUT %s]: Verifying pim join for group: %s" + "[FAILED]!! Expected: (%s) " + "Found: (%s)" % (dut, grp_addr, "JOIN", data["channelJoinName"]) + ) + return errormsg + + if not pim_join: + if data["group"] == grp_addr and data["channelJoinName"] == "NOINFO": + logger.info( + "[DUT %s]: Verifying pim join for group: %s" + "[PASSED]!! Found Expected: (%s)", + dut, + grp_addr, + data["channelJoinName"], + ) + else: + errormsg = ( + "[DUT %s]: Verifying pim join for group: %s" + "[FAILED]!! Expected: (%s) " + "Found: (%s)" + % (dut, grp_addr, "NOINFO", data["channelJoinName"]) + ) + return errormsg + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return True + + +@retry(attempts=31, wait=2, return_is_dict=True) +def verify_igmp_config(tgen, input_dict, stats_return=False): + """ + Verify igmp interface details, verifying following configs: + timerQueryInterval + timerQueryResponseIntervalMsec + lastMemberQueryCount + timerLastMemberQueryMsec + + Parameters + ---------- + * `tgen`: topogen object + * `input_dict` : Input dict data, required to verify + timer + * `stats_return`: If user wants API to return statistics + + Usage + ----- + input_dict ={ + "l1": { + "igmp": { + "interfaces": { + "l1-i1-eth1": { + "igmp": { + "query": { + "query-interval" : 200, + "query-max-response-time" : 100 + }, + "statistics": { + "queryV2" : 2, + "reportV2" : 1 + } + } + } + } + } + } + } + result = verify_igmp_config(tgen, input_dict, stats_return) + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + for dut in input_dict.keys(): + rnode = tgen.routers()[dut] + + for interface, data in input_dict[dut]["igmp"]["interfaces"].items(): + + statistics = False + report = False + if "statistics" in input_dict[dut]["igmp"]["interfaces"][interface]["igmp"]: + statistics = True + cmd = "show ip igmp statistics" + else: + cmd = "show ip igmp" + + logger.info( + "[DUT: %s]: Verifying IGMP interface %s detail:", dut, interface + ) + + if statistics: + if ( + "report" + in input_dict[dut]["igmp"]["interfaces"][interface]["igmp"][ + "statistics" + ] + ): + report = True + + if statistics and report: + show_ip_igmp_intf_json = run_frr_cmd( + rnode, "{} json".format(cmd, interface), isjson=True + ) + intf_detail_json = show_ip_igmp_intf_json["global"] + else: + show_ip_igmp_intf_json = run_frr_cmd( + rnode, "{} interface {} json".format(cmd, interface), isjson=True + ) + + if not report: + if interface not in show_ip_igmp_intf_json: + errormsg = ( + "[DUT %s]: IGMP interface: %s " + " is not present in CLI output " + "[FAILED]!! " % (dut, interface) + ) + return errormsg + + else: + intf_detail_json = show_ip_igmp_intf_json[interface] + + if stats_return: + igmp_stats = {} + + if "statistics" in data["igmp"]: + if stats_return: + igmp_stats["statistics"] = {} + for query, value in data["igmp"]["statistics"].items(): + if query == "queryV2": + # Verifying IGMP interface queryV2 statistics + if stats_return: + igmp_stats["statistics"][query] = intf_detail_json[ + "queryV2" + ] + + else: + if intf_detail_json["queryV2"] != value: + errormsg = ( + "[DUT %s]: IGMP interface: %s " + " queryV2 statistics verification " + "[FAILED]!! Expected : %s," + " Found : %s" + % ( + dut, + interface, + value, + intf_detail_json["queryV2"], + ) + ) + return errormsg + + logger.info( + "[DUT %s]: IGMP interface: %s " + "queryV2 statistics is %s", + dut, + interface, + value, + ) + + if query == "reportV2": + # Verifying IGMP interface timerV2 statistics + if stats_return: + igmp_stats["statistics"][query] = intf_detail_json[ + "reportV2" + ] + + else: + if intf_detail_json["reportV2"] <= value: + errormsg = ( + "[DUT %s]: IGMP reportV2 " + "statistics verification " + "[FAILED]!! Expected : %s " + "or more, Found : %s" + % ( + dut, + interface, + value, + intf_detail_json["reportV2"], + ) + ) + return errormsg + + logger.info( + "[DUT %s]: IGMP reportV2 " "statistics is %s", + dut, + intf_detail_json["reportV2"], + ) + + if "query" in data["igmp"]: + for query, value in data["igmp"]["query"].items(): + if query == "query-interval": + # Verifying IGMP interface query interval timer + if intf_detail_json["timerQueryInterval"] != value: + errormsg = ( + "[DUT %s]: IGMP interface: %s " + " query-interval verification " + "[FAILED]!! Expected : %s," + " Found : %s" + % ( + dut, + interface, + value, + intf_detail_json["timerQueryInterval"], + ) + ) + return errormsg + + logger.info( + "[DUT %s]: IGMP interface: %s " "query-interval is %s", + dut, + interface, + value, + ) + + if query == "query-max-response-time": + # Verifying IGMP interface query max response timer + if ( + intf_detail_json["timerQueryResponseIntervalMsec"] + != value * 100 + ): + errormsg = ( + "[DUT %s]: IGMP interface: %s " + "query-max-response-time " + "verification [FAILED]!!" + " Expected : %s, Found : %s" + % ( + dut, + interface, + value * 1000, + intf_detail_json["timerQueryResponseIntervalMsec"], + ) + ) + return errormsg + + logger.info( + "[DUT %s]: IGMP interface: %s " + "query-max-response-time is %s ms", + dut, + interface, + value * 100, + ) + + if query == "last-member-query-count": + # Verifying IGMP interface last member query count + if intf_detail_json["lastMemberQueryCount"] != value: + errormsg = ( + "[DUT %s]: IGMP interface: %s " + "last-member-query-count " + "verification [FAILED]!!" + " Expected : %s, Found : %s" + % ( + dut, + interface, + value, + intf_detail_json["lastMemberQueryCount"], + ) + ) + return errormsg + + logger.info( + "[DUT %s]: IGMP interface: %s " + "last-member-query-count is %s ms", + dut, + interface, + value * 1000, + ) + + if query == "last-member-query-interval": + # Verifying IGMP interface last member query interval + if ( + intf_detail_json["timerLastMemberQueryMsec"] + != value * 100 * intf_detail_json["lastMemberQueryCount"] + ): + errormsg = ( + "[DUT %s]: IGMP interface: %s " + "last-member-query-interval " + "verification [FAILED]!!" + " Expected : %s, Found : %s" + % ( + dut, + interface, + value * 1000, + intf_detail_json["timerLastMemberQueryMsec"], + ) + ) + return errormsg + + logger.info( + "[DUT %s]: IGMP interface: %s " + "last-member-query-interval is %s ms", + dut, + interface, + value * intf_detail_json["lastMemberQueryCount"] * 100, + ) + + if "version" in data["igmp"]: + # Verifying IGMP interface state is up + if intf_detail_json["state"] != "up": + errormsg = ( + "[DUT %s]: IGMP interface: %s " + " state: %s verification " + "[FAILED]!!" % (dut, interface, intf_detail_json["state"]) + ) + return errormsg + + logger.info( + "[DUT %s]: IGMP interface: %s " "state: %s", + dut, + interface, + intf_detail_json["state"], + ) + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return True if stats_return == False else igmp_stats + + +@retry(attempts=31, wait=2, return_is_str=True) +def verify_pim_config(tgen, input_dict): + """ + Verify pim interface details, verifying following configs: + drPriority + helloPeriod + helloReceived + helloSend + drAddress + + Parameters + ---------- + * `tgen`: topogen object + * `input_dict` : Input dict data, required to verify + timer + + Usage + ----- + input_dict ={ + "l1": { + "igmp": { + "interfaces": { + "l1-i1-eth1": { + "pim": { + "drPriority" : 10, + "helloPeriod" : 5 + } + } + } + } + } + } + } + result = verify_pim_config(tgen, input_dict) + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + for dut in input_dict.keys(): + rnode = tgen.routers()[dut] + + for interface, data in input_dict[dut]["pim"]["interfaces"].items(): + + logger.info("[DUT: %s]: Verifying PIM interface %s detail:", dut, interface) + + show_ip_igmp_intf_json = run_frr_cmd( + rnode, "show ip pim interface {} json".format(interface), isjson=True + ) + + if interface not in show_ip_igmp_intf_json: + errormsg = ( + "[DUT %s]: PIM interface: %s " + " is not present in CLI output " + "[FAILED]!! " % (dut, interface) + ) + return errormsg + + intf_detail_json = show_ip_igmp_intf_json[interface] + + for config, value in data.items(): + if config == "helloPeriod": + # Verifying PIM interface helloPeriod + if intf_detail_json["helloPeriod"] != value: + errormsg = ( + "[DUT %s]: PIM interface: %s " + " helloPeriod verification " + "[FAILED]!! Expected : %s," + " Found : %s" + % (dut, interface, value, intf_detail_json["helloPeriod"]) + ) + return errormsg + + logger.info( + "[DUT %s]: PIM interface: %s " "helloPeriod is %s", + dut, + interface, + value, + ) + + if config == "drPriority": + # Verifying PIM interface drPriority + if intf_detail_json["drPriority"] != value: + errormsg = ( + "[DUT %s]: PIM interface: %s " + " drPriority verification " + "[FAILED]!! Expected : %s," + " Found : %s" + % (dut, interface, value, intf_detail_json["drPriority"]) + ) + return errormsg + + logger.info( + "[DUT %s]: PIM interface: %s " "drPriority is %s", + dut, + interface, + value, + ) + + if config == "drAddress": + # Verifying PIM interface drAddress + if intf_detail_json["drAddress"] != value: + errormsg = ( + "[DUT %s]: PIM interface: %s " + " drAddress verification " + "[FAILED]!! Expected : %s," + " Found : %s" + % (dut, interface, value, intf_detail_json["drAddress"]) + ) + return errormsg + + logger.info( + "[DUT %s]: PIM interface: %s " "drAddress is %s", + dut, + interface, + value, + ) + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return True + + +@retry(attempts=21, wait=2, return_is_dict=True) +def verify_multicast_traffic(tgen, input_dict, return_traffic=False): + """ + Verify multicast traffic by running + "show multicast traffic count json" cli + + Parameters + ---------- + * `tgen`: topogen object + * `input_dict(dict)`: defines DUT, what and for which interfaces + traffic needs to be verified + * `return_traffic`: returns traffic stats + Usage + ----- + input_dict = { + "r1": { + "traffic_received": ["r1-r0-eth0"], + "traffic_sent": ["r1-r0-eth0"] + } + } + + result = verify_multicast_traffic(tgen, input_dict) + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + traffic_dict = {} + for dut in input_dict.keys(): + if dut not in tgen.routers(): + continue + + rnode = tgen.routers()[dut] + + logger.info("[DUT: %s]: Verifying multicast " "traffic", dut) + + show_multicast_traffic_json = run_frr_cmd( + rnode, "show ip multicast count json", isjson=True + ) + + for traffic_type, interfaces in input_dict[dut].items(): + traffic_dict[traffic_type] = {} + if traffic_type == "traffic_received": + for interface in interfaces: + traffic_dict[traffic_type][interface] = {} + interface_json = show_multicast_traffic_json[interface] + + if interface_json["pktsIn"] == 0 and interface_json["bytesIn"] == 0: + errormsg = ( + "[DUT %s]: Multicast traffic is " + "not received on interface %s " + "PktsIn: %s, BytesIn: %s " + "[FAILED]!!" + % ( + dut, + interface, + interface_json["pktsIn"], + interface_json["bytesIn"], + ) + ) + return errormsg + + elif ( + interface_json["pktsIn"] != 0 and interface_json["bytesIn"] != 0 + ): + + traffic_dict[traffic_type][interface][ + "pktsIn" + ] = interface_json["pktsIn"] + traffic_dict[traffic_type][interface][ + "bytesIn" + ] = interface_json["bytesIn"] + + logger.info( + "[DUT %s]: Multicast traffic is " + "received on interface %s " + "PktsIn: %s, BytesIn: %s " + "[PASSED]!!" + % ( + dut, + interface, + interface_json["pktsIn"], + interface_json["bytesIn"], + ) + ) + + else: + errormsg = ( + "[DUT %s]: Multicast traffic interface %s:" + " Miss-match in " + "PktsIn: %s, BytesIn: %s" + "[FAILED]!!" + % ( + dut, + interface, + interface_json["pktsIn"], + interface_json["bytesIn"], + ) + ) + return errormsg + + if traffic_type == "traffic_sent": + traffic_dict[traffic_type] = {} + for interface in interfaces: + traffic_dict[traffic_type][interface] = {} + interface_json = show_multicast_traffic_json[interface] + + if ( + interface_json["pktsOut"] == 0 + and interface_json["bytesOut"] == 0 + ): + errormsg = ( + "[DUT %s]: Multicast traffic is " + "not received on interface %s " + "PktsIn: %s, BytesIn: %s" + "[FAILED]!!" + % ( + dut, + interface, + interface_json["pktsOut"], + interface_json["bytesOut"], + ) + ) + return errormsg + + elif ( + interface_json["pktsOut"] != 0 + and interface_json["bytesOut"] != 0 + ): + + traffic_dict[traffic_type][interface][ + "pktsOut" + ] = interface_json["pktsOut"] + traffic_dict[traffic_type][interface][ + "bytesOut" + ] = interface_json["bytesOut"] + + logger.info( + "[DUT %s]: Multicast traffic is " + "received on interface %s " + "PktsOut: %s, BytesOut: %s " + "[PASSED]!!" + % ( + dut, + interface, + interface_json["pktsOut"], + interface_json["bytesOut"], + ) + ) + else: + errormsg = ( + "[DUT %s]: Multicast traffic interface %s:" + " Miss-match in " + "PktsOut: %s, BytesOut: %s " + "[FAILED]!!" + % ( + dut, + interface, + interface_json["pktsOut"], + interface_json["bytesOut"], + ) + ) + return errormsg + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return True if return_traffic == False else traffic_dict + + +def get_refCount_for_mroute(tgen, dut, iif, src_address, group_addresses): + """ + Verify upstream inbound interface is updated correctly + by running "show ip pim upstream" cli + + Parameters + ---------- + * `tgen`: topogen object + * `dut`: device under test + * `iif`: inbound interface + * `src_address`: source address + * `group_addresses`: IGMP group address + + Usage + ----- + dut = "r1" + iif = "r1-r0-eth0" + src_address = "*" + group_address = "225.1.1.1" + result = get_refCount_for_mroute(tgen, dut, iif, src_address, + group_address) + + Returns + ------- + refCount(int) + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + refCount = 0 + if dut not in tgen.routers(): + return False + + rnode = tgen.routers()[dut] + + logger.info("[DUT: %s]: Verifying refCount for mroutes: ", dut) + show_ip_pim_upstream_json = run_frr_cmd( + rnode, "show ip pim upstream json", isjson=True + ) + + if type(group_addresses) is not list: + group_addresses = [group_addresses] + + for grp_addr in group_addresses: + # Verify group address + if grp_addr not in show_ip_pim_upstream_json: + errormsg = "[DUT %s]: Verifying upstream" " for group %s [FAILED]!!" % ( + dut, + grp_addr, + ) + return errormsg + group_addr_json = show_ip_pim_upstream_json[grp_addr] + + # Verify source address + if src_address not in group_addr_json: + errormsg = "[DUT %s]: Verifying upstream" " for (%s,%s) [FAILED]!!" % ( + dut, + src_address, + grp_addr, + ) + return errormsg + + # Verify Inbound Interface + if group_addr_json[src_address]["inboundInterface"] == iif: + refCount = group_addr_json[src_address]["refCount"] + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return refCount + + +@retry(attempts=21, wait=2, return_is_str=True) +def verify_multicast_flag_state(tgen, dut, src_address, group_addresses, flag): + """ + Verify flag state for mroutes and make sure (*, G)/(S, G) are having + coorect flags by running "show ip mroute" cli + + Parameters + ---------- + * `tgen`: topogen object + * `dut`: device under test + * `src_address`: source address + * `group_addresses`: IGMP group address + * `flag`: flag state, needs to be verified + + Usage + ----- + dut = "r1" + flag = "SC" + group_address = "225.1.1.1" + result = verify_multicast_flag_state(tgen, dut, src_address, + group_address, flag) + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + if dut not in tgen.routers(): + return False + + rnode = tgen.routers()[dut] + + logger.info("[DUT: %s]: Verifying flag state for mroutes", dut) + show_ip_mroute_json = run_frr_cmd(rnode, "show ip mroute json", isjson=True) + + if bool(show_ip_mroute_json) == False: + error_msg = "[DUT %s]: mroutes are not present or flushed out !!" % (dut) + return error_msg + + if type(group_addresses) is not list: + group_addresses = [group_addresses] + + for grp_addr in group_addresses: + if grp_addr not in show_ip_mroute_json: + errormsg = ( + "[DUT %s]: Verifying (%s, %s) mroute," "[FAILED]!! ", + dut, + src_address, + grp_addr, + ) + return errormsg + else: + group_addr_json = show_ip_mroute_json[grp_addr] + + if src_address not in group_addr_json: + errormsg = "[DUT %s]: Verifying (%s, %s) mroute," "[FAILED]!! " % ( + dut, + src_address, + grp_addr, + ) + return errormsg + else: + mroutes = group_addr_json[src_address] + + if mroutes["installed"] != 0: + logger.info( + "[DUT %s]: mroute (%s,%s) is installed", dut, src_address, grp_addr + ) + + if mroutes["flags"] != flag: + errormsg = ( + "[DUT %s]: Verifying flag for (%s, %s) " + "mroute [FAILED]!! " + "Expected: %s Found: %s" + % (dut, src_address, grp_addr, flag, mroutes["flags"]) + ) + return errormsg + + logger.info( + "[DUT %s]: Verifying flag for (%s, %s)" + " mroute, [PASSED]!! " + "Found Expected: %s", + dut, + src_address, + grp_addr, + mroutes["flags"], + ) + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return True + + +@retry(attempts=21, wait=2, return_is_str=True) +def verify_igmp_interface(tgen, topo, dut, igmp_iface, interface_ip): + """ + Verify all IGMP interface are up and running, config is verified + using "show ip igmp interface" cli + + Parameters + ---------- + * `tgen`: topogen object + * `topo` : json file data + * `dut` : device under test + * `igmp_iface` : interface name + * `interface_ip` : interface ip address + + Usage + ----- + result = verify_igmp_interface(tgen, topo, dut, igmp_iface, interface_ip) + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + for router in tgen.routers(): + if router != dut: + continue + + logger.info("[DUT: %s]: Verifying PIM interface status:", + dut) + + rnode = tgen.routers()[dut] + show_ip_igmp_interface_json = \ + run_frr_cmd(rnode, "show ip igmp interface json", isjson=True) + + if igmp_iface in show_ip_igmp_interface_json: + igmp_intf_json = show_ip_igmp_interface_json[igmp_iface] + # Verifying igmp interface + if igmp_intf_json["address"] != interface_ip: + errormsg = ("[DUT %s]: igmp interface ip is not correct " + "[FAILED]!! Expected : %s, Found : %s" + %(dut, igmp_intf_json["address"], interface_ip)) + return errormsg + + logger.info("[DUT %s]: igmp interface: %s, " + "interface ip: %s" + " [PASSED]!!", + dut, igmp_iface, interface_ip) + else: + errormsg = ("[DUT %s]: igmp interface: %s " + "igmp interface ip: %s, is not present " + %(dut, igmp_iface, interface_ip)) + return errormsg + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return True diff --git a/tests/topotests/lib/send_bsr_packet.py b/tests/topotests/lib/send_bsr_packet.py new file mode 100755 index 0000000000..c226899324 --- /dev/null +++ b/tests/topotests/lib/send_bsr_packet.py @@ -0,0 +1,58 @@ +# +# Copyright (c) 2019 by VMware, Inc. ("VMware") +# Used Copyright (c) 2018 by Network Device Education Foundation, Inc. +# ("NetDEF") in this file. +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +import sys +import argparse +from scapy.all import Raw, sendp +import binascii + + +def send_packet(packet, iface, interval, count): + """ + Read BSR packet in Raw format and send it to specified interface + + Parameter: + --------- + * `packet` : BSR packet in raw format + * `interface` : Interface from which packet would be send + * `interval` : Interval between the packets + * `count` : Number of packets to be sent + """ + + data = binascii.a2b_hex(packet) + p = Raw(load=data) + p.show() + sendp(p, inter=int(interval), iface=iface, count=int(count)) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Send BSR Raw packet") + parser.add_argument("packet", help="Packet in raw format") + parser.add_argument("iface", help="Packet send to this ineterface") + parser.add_argument("--interval", help="Interval between packets", default=0) + parser.add_argument( + "--count", help="Number of times packet is sent repetitively", default=0 + ) + args = parser.parse_args() + + if not args.packet or not args.iface: + sys.exit(1) + + send_packet(args.packet, args.iface, args.interval, args.count) diff --git a/tests/topotests/lib/snmptest.py b/tests/topotests/lib/snmptest.py new file mode 100644 index 0000000000..5112500e0b --- /dev/null +++ b/tests/topotests/lib/snmptest.py @@ -0,0 +1,152 @@ +# +# topogen.py +# Library of helper functions for NetDEF Topology Tests +# +# Copyright (c) 2020 by Volta Networks +# +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +SNMP library to test snmp walks and gets + +Basic usage instructions: + +* define an SnmpTester class giving a router, address, community and version +* use test_oid or test_walk to check values in MIBS +* see tests/topotest/simple-snmp-test/test_simple_snmp.py for example +""" + +from topolog import logger + + +class SnmpTester(object): + "A helper class for testing SNMP" + + def __init__(self, router, iface, community, version): + self.community = community + self.version = version + self.router = router + self.iface = iface + logger.info( + "created SNMP tester: SNMPv{0} community:{1}".format( + self.version, self.community + ) + ) + + def _snmp_config(self): + """ + Helper function to build a string with SNMP + configuration for commands. + """ + return "-v {0} -c {1} {2}".format(self.version, self.community, self.iface) + + @staticmethod + def _get_snmp_value(snmp_output): + tokens = snmp_output.strip().split() + + num_value_tokens = len(tokens) - 3 + + # this copes with the emptys string return + if num_value_tokens == 0: + return tokens[2] + + if num_value_tokens > 1: + output = "" + index = 3 + while index < len(tokens) - 1: + output += "{} ".format(tokens[index]) + index += 1 + output += "{}".format(tokens[index]) + return output + # third token is the value of the object + return tokens[3] + + @staticmethod + def _get_snmp_oid(snmp_output): + tokens = snmp_output.strip().split() + + # third token onwards is the value of the object + return tokens[0].split(".", 1)[1] + + @staticmethod + def _get_snmp_oid(snmp_output): + tokens = snmp_output.strip().split() + +# if len(tokens) > 5: +# return None + + + # third token is the value of the object + return tokens[0].split('.',1)[1] + + def _parse_multiline(self, snmp_output): + results = snmp_output.strip().split("\r\n") + + out_dict = {} + out_list = [] + for response in results: + out_dict[self._get_snmp_oid(response)] = self._get_snmp_value(response) + out_list.append(self._get_snmp_value(response)) + + return out_dict, out_list + + def get(self, oid): + cmd = "snmpget {0} {1}".format(self._snmp_config(), oid) + + result = self.router.cmd(cmd) + if "not found" in result: + return None + return self._get_snmp_value(result) + + def get_next(self, oid): + cmd = "snmpgetnext {0} {1}".format(self._snmp_config(), oid) + + result = self.router.cmd(cmd) + print("get_next: {}".format(result)) + if "not found" in result: + return None + return self._get_snmp_value(result) + + def walk(self, oid): + cmd = "snmpwalk {0} {1}".format(self._snmp_config(), oid) + + result = self.router.cmd(cmd) + return self._parse_multiline(result) + + def test_oid(self, oid, value): + print("oid: {}".format(self.get_next(oid))) + return self.get_next(oid) == value + + def test_oid_walk(self, oid, values, oids=None): + results_dict, results_list = self.walk(oid) + print("test_oid_walk: {} {}".format(oid, results_dict)) + if oids is not None: + index = 0 + for oid in oids: + # avoid key error for missing keys + if not oid in results_dict.keys(): + print("FAIL: missing oid key {}".format(oid)) + return False + if results_dict[oid] != values[index]: + print("FAIL{} {} |{}| == |{}|".format(oid, index, results_dict[oid], values[index])) + return False + index += 1 + return True + + # Return true if 'values' is a subset of 'results_list' + print("test {} == {}".format(results_list[: len(values)], values)) + return results_list[: len(values)] == values diff --git a/tests/topotests/lib/test/test_json.py b/tests/topotests/lib/test/test_json.py index b85e193d3b..7b3c8593cc 100755 --- a/tests/topotests/lib/test/test_json.py +++ b/tests/topotests/lib/test/test_json.py @@ -107,16 +107,25 @@ def test_json_intersect_multilevel_true(): dcomplete = { "i1": "item1", "i2": "item2", - "i3": {"i100": "item100",}, + "i3": { + "i100": "item100", + }, "i4": { - "i41": {"i411": "item411",}, - "i42": {"i421": "item421", "i422": "item422",}, + "i41": { + "i411": "item411", + }, + "i42": { + "i421": "item421", + "i422": "item422", + }, }, } dsub1 = { "i1": "item1", - "i3": {"i100": "item100",}, + "i3": { + "i100": "item100", + }, "i10": None, } dsub2 = { @@ -126,10 +135,36 @@ def test_json_intersect_multilevel_true(): } dsub3 = { "i2": "item2", - "i4": {"i41": {"i411": "item411",}, "i42": {"i422": "item422", "i450": None,}}, + "i4": { + "i41": { + "i411": "item411", + }, + "i42": { + "i422": "item422", + "i450": None, + }, + }, + } + dsub4 = { + "i2": "item2", + "i4": { + "i41": {}, + "i42": { + "i450": None, + }, + }, + } + dsub5 = { + "i2": "item2", + "i3": { + "i100": "item100", + }, + "i4": { + "i42": { + "i450": None, + } + }, } - dsub4 = {"i2": "item2", "i4": {"i41": {}, "i42": {"i450": None,}}} - dsub5 = {"i2": "item2", "i3": {"i100": "item100",}, "i4": {"i42": {"i450": None,}}} assert json_cmp(dcomplete, dsub1) is None assert json_cmp(dcomplete, dsub2) is None @@ -144,17 +179,26 @@ def test_json_intersect_multilevel_false(): dcomplete = { "i1": "item1", "i2": "item2", - "i3": {"i100": "item100",}, + "i3": { + "i100": "item100", + }, "i4": { - "i41": {"i411": "item411",}, - "i42": {"i421": "item421", "i422": "item422",}, + "i41": { + "i411": "item411", + }, + "i42": { + "i421": "item421", + "i422": "item422", + }, }, } # Incorrect sub-level value dsub1 = { "i1": "item1", - "i3": {"i100": "item00",}, + "i3": { + "i100": "item00", + }, "i10": None, } # Inexistent sub-level @@ -166,14 +210,41 @@ def test_json_intersect_multilevel_false(): # Inexistent sub-level value dsub3 = { "i1": "item1", - "i3": {"i100": None,}, + "i3": { + "i100": None, + }, } # Inexistent sub-sub-level value - dsub4 = {"i4": {"i41": {"i412": "item412",}, "i42": {"i421": "item421",}}} + dsub4 = { + "i4": { + "i41": { + "i412": "item412", + }, + "i42": { + "i421": "item421", + }, + } + } # Invalid sub-sub-level value - dsub5 = {"i4": {"i41": {"i411": "item411",}, "i42": {"i421": "item420000",}}} + dsub5 = { + "i4": { + "i41": { + "i411": "item411", + }, + "i42": { + "i421": "item420000", + }, + } + } # sub-sub-level should be value - dsub6 = {"i4": {"i41": {"i411": "item411",}, "i42": "foobar",}} + dsub6 = { + "i4": { + "i41": { + "i411": "item411", + }, + "i42": "foobar", + } + } assert json_cmp(dcomplete, dsub1) is not None assert json_cmp(dcomplete, dsub2) is not None @@ -187,7 +258,15 @@ def test_json_with_list_sucess(): "Test successful json comparisons that have lists." dcomplete = { - "list": [{"i1": "item 1", "i2": "item 2",}, {"i10": "item 10",},], + "list": [ + { + "i1": "item 1", + "i2": "item 2", + }, + { + "i10": "item 10", + }, + ], "i100": "item 100", } @@ -197,12 +276,19 @@ def test_json_with_list_sucess(): } # Test list correct list items dsub2 = { - "list": [{"i1": "item 1",},], + "list": [ + { + "i1": "item 1", + }, + ], "i100": "item 100", } # Test list correct list size dsub3 = { - "list": [{}, {},], + "list": [ + {}, + {}, + ], } assert json_cmp(dcomplete, dsub1) is None @@ -214,7 +300,15 @@ def test_json_with_list_failure(): "Test failed json comparisons that have lists." dcomplete = { - "list": [{"i1": "item 1", "i2": "item 2",}, {"i10": "item 10",},], + "list": [ + { + "i1": "item 1", + "i2": "item 2", + }, + { + "i10": "item 10", + }, + ], "i100": "item 100", } @@ -224,12 +318,20 @@ def test_json_with_list_failure(): } # Test list incorrect list items dsub2 = { - "list": [{"i1": "item 2",},], + "list": [ + { + "i1": "item 2", + }, + ], "i100": "item 100", } # Test list correct list size dsub3 = { - "list": [{}, {}, {},], + "list": [ + {}, + {}, + {}, + ], } assert json_cmp(dcomplete, dsub1) is not None @@ -241,20 +343,52 @@ def test_json_list_start_success(): "Test JSON encoded data that starts with a list that should succeed." dcomplete = [ - {"id": 100, "value": "abc",}, - {"id": 200, "value": "abcd",}, - {"id": 300, "value": "abcde",}, + { + "id": 100, + "value": "abc", + }, + { + "id": 200, + "value": "abcd", + }, + { + "id": 300, + "value": "abcde", + }, ] - dsub1 = [{"id": 100, "value": "abc",}] + dsub1 = [ + { + "id": 100, + "value": "abc", + } + ] - dsub2 = [{"id": 100, "value": "abc",}, {"id": 200, "value": "abcd",}] + dsub2 = [ + { + "id": 100, + "value": "abc", + }, + { + "id": 200, + "value": "abcd", + }, + ] - dsub3 = [{"id": 300, "value": "abcde",}] + dsub3 = [ + { + "id": 300, + "value": "abcde", + } + ] dsub4 = [] - dsub5 = [{"id": 100,}] + dsub5 = [ + { + "id": 100, + } + ] assert json_cmp(dcomplete, dsub1) is None assert json_cmp(dcomplete, dsub2) is None @@ -272,13 +406,44 @@ def test_json_list_start_failure(): {"id": 300, "value": "abcde"}, ] - dsub1 = [{"id": 100, "value": "abcd",}] + dsub1 = [ + { + "id": 100, + "value": "abcd", + } + ] - dsub2 = [{"id": 100, "value": "abc",}, {"id": 200, "value": "abc",}] + dsub2 = [ + { + "id": 100, + "value": "abc", + }, + { + "id": 200, + "value": "abc", + }, + ] - dsub3 = [{"id": 100, "value": "abc",}, {"id": 350, "value": "abcde",}] + dsub3 = [ + { + "id": 100, + "value": "abc", + }, + { + "id": 350, + "value": "abcde", + }, + ] - dsub4 = [{"value": "abcx",}, {"id": 300, "value": "abcde",}] + dsub4 = [ + { + "value": "abcx", + }, + { + "id": 300, + "value": "abcde", + }, + ] assert json_cmp(dcomplete, dsub1) is not None assert json_cmp(dcomplete, dsub2) is not None diff --git a/tests/topotests/lib/topogen.py b/tests/topotests/lib/topogen.py index eaf7f90479..f958cc11d3 100644 --- a/tests/topotests/lib/topogen.py +++ b/tests/topotests/lib/topogen.py @@ -336,7 +336,9 @@ class Topogen(object): for gear in self.gears.values(): errors += gear.stop() if len(errors) > 0: - logger.error("Errors found post shutdown - details follow: {}".format(errors)) + logger.error( + "Errors found post shutdown - details follow: {}".format(errors) + ) self.net.stop() @@ -552,6 +554,8 @@ class TopoRouter(TopoGear): RD_SHARP = 14 RD_BABEL = 15 RD_PBRD = 16 + RD_PATH = 17 + RD_SNMP = 18 RD = { RD_ZEBRA: "zebra", RD_RIP: "ripd", @@ -569,6 +573,8 @@ class TopoRouter(TopoGear): RD_SHARP: "sharpd", RD_BABEL: "babeld", RD_PBRD: "pbrd", + RD_PATH: "pathd", + RD_SNMP: "snmpd", } def __init__(self, tgen, cls, name, **params): @@ -653,7 +659,7 @@ class TopoRouter(TopoGear): Possible daemon values are: TopoRouter.RD_ZEBRA, TopoRouter.RD_RIP, TopoRouter.RD_RIPNG, TopoRouter.RD_OSPF, TopoRouter.RD_OSPF6, TopoRouter.RD_ISIS, TopoRouter.RD_BGP, TopoRouter.RD_LDP, - TopoRouter.RD_PIM, TopoRouter.RD_PBR. + TopoRouter.RD_PIM, TopoRouter.RD_PBR, TopoRouter.RD_SNMP. """ daemonstr = self.RD.get(daemon) self.logger.info('loading "{}" configuration: {}'.format(daemonstr, source)) diff --git a/tests/topotests/lib/topojson.py b/tests/topotests/lib/topojson.py index f2fafa5e2a..fcc6c19868 100644 --- a/tests/topotests/lib/topojson.py +++ b/tests/topotests/lib/topojson.py @@ -43,8 +43,9 @@ from lib.common_config import ( create_vrf_cfg, ) +from lib.pim import create_pim_config, create_igmp_config from lib.bgp import create_router_bgp -from lib.ospf import create_router_ospf +from lib.ospf import create_router_ospf, create_router_ospf6 ROUTER_LIST = [] @@ -68,20 +69,18 @@ def build_topo_from_json(tgen, topo): topo["switches"].keys(), key=lambda x: int(re_search("\d+", x).group(0)) ) - listRouters = ROUTER_LIST[:] - listSwitches = SWITCH_LIST[:] + listRouters = sorted(ROUTER_LIST[:]) + listSwitches = sorted(SWITCH_LIST[:]) listAllRouters = deepcopy(listRouters) dictSwitches = {} for routerN in ROUTER_LIST: logger.info("Topo: Add router {}".format(routerN)) tgen.add_router(routerN) - listRouters.append(routerN) for switchN in SWITCH_LIST: logger.info("Topo: Add switch {}".format(switchN)) dictSwitches[switchN] = tgen.add_switch(switchN) - listSwitches.append(switchN) if "ipv4base" in topo: ipv4Next = ipaddress.IPv4Address(topo["link_ip_start"]["ipv4"]) @@ -96,33 +95,25 @@ def build_topo_from_json(tgen, topo): for router in listRouters: topo["routers"][router]["nextIfname"] = 0 + router_count = 0 while listRouters != []: curRouter = listRouters.pop(0) # Physical Interfaces if "links" in topo["routers"][curRouter]: - - def link_sort(x): - if x == "lo": - return 0 - elif "link" in x: - return int(x.split("-link")[1]) - else: - return int(re_search("\d+", x).group(0)) - for destRouterLink, data in sorted( - topo["routers"][curRouter]["links"].items(), - key=lambda x: link_sort(x[0]), + topo["routers"][curRouter]["links"].iteritems() ): currRouter_lo_json = topo["routers"][curRouter]["links"][destRouterLink] # Loopback interfaces if "type" in data and data["type"] == "loopback": + router_count += 1 if ( "ipv4" in currRouter_lo_json and currRouter_lo_json["ipv4"] == "auto" ): currRouter_lo_json["ipv4"] = "{}{}.{}/{}".format( topo["lo_prefix"]["ipv4"], - number_to_row(curRouter), + router_count, number_to_column(curRouter), topo["lo_prefix"]["v4mask"], ) @@ -132,7 +123,7 @@ def build_topo_from_json(tgen, topo): ): currRouter_lo_json["ipv6"] = "{}{}:{}/{}".format( topo["lo_prefix"]["ipv6"], - number_to_row(curRouter), + router_count, number_to_column(curRouter), topo["lo_prefix"]["v6mask"], ) @@ -167,6 +158,14 @@ def build_topo_from_json(tgen, topo): destRouter, curRouter, topo["routers"][destRouter]["nextIfname"] ) + # add link interface + destRouter_link_json["peer-interface"] = "{}-{}-eth{}".format( + curRouter, destRouter, topo["routers"][curRouter]["nextIfname"] + ) + currRouter_link_json["peer-interface"] = "{}-{}-eth{}".format( + destRouter, curRouter, topo["routers"][destRouter]["nextIfname"] + ) + topo["routers"][curRouter]["nextIfname"] += 1 topo["routers"][destRouter]["nextIfname"] += 1 @@ -311,8 +310,11 @@ def build_config_from_json(tgen, topo, save_bkup=True): ("prefix_lists", create_prefix_lists), ("bgp_community_list", create_bgp_community_lists), ("route_maps", create_route_maps), + ("pim", create_pim_config), + ("igmp", create_igmp_config), ("bgp", create_router_bgp), ("ospf", create_router_ospf), + ("ospf6", create_router_ospf6), ] ) diff --git a/tests/topotests/lib/topotest.py b/tests/topotests/lib/topotest.py index 20d60ebbef..1e6ef1b2b3 100644 --- a/tests/topotests/lib/topotest.py +++ b/tests/topotests/lib/topotest.py @@ -609,8 +609,10 @@ def interface_set_status(node, ifacename, ifaceaction=False, vrf_name=None): ifacename, str_ifaceaction ) else: - cmd = 'vtysh -c "configure terminal" -c "interface {0} vrf {1}" -c "{2}"'.format( - ifacename, vrf_name, str_ifaceaction + cmd = ( + 'vtysh -c "configure terminal" -c "interface {0} vrf {1}" -c "{2}"'.format( + ifacename, vrf_name, str_ifaceaction + ) ) node.run(cmd) @@ -924,40 +926,44 @@ def checkAddressSanitizerError(output, router, component, logdir=""): ) if addressSanitizerLog: # Find Calling Test. Could be multiple steps back - testframe=sys._current_frames().values()[0] - level=0 + testframe = sys._current_frames().values()[0] + level = 0 while level < 10: - test=os.path.splitext(os.path.basename(testframe.f_globals["__file__"]))[0] + test = os.path.splitext( + os.path.basename(testframe.f_globals["__file__"]) + )[0] if (test != "topotest") and (test != "topogen"): # Found the calling test - callingTest=os.path.basename(testframe.f_globals["__file__"]) + callingTest = os.path.basename(testframe.f_globals["__file__"]) break - level=level+1 - testframe=testframe.f_back - if (level >= 10): + level = level + 1 + testframe = testframe.f_back + if level >= 10: # somehow couldn't find the test script. - callingTest="unknownTest" + callingTest = "unknownTest" # # Now finding Calling Procedure - level=0 + level = 0 while level < 20: - callingProc=sys._getframe(level).f_code.co_name - if ((callingProc != "processAddressSanitizerError") and - (callingProc != "checkAddressSanitizerError") and - (callingProc != "checkRouterCores") and - (callingProc != "stopRouter") and - (callingProc != "__stop_internal") and - (callingProc != "stop") and - (callingProc != "stop_topology") and - (callingProc != "checkRouterRunning") and - (callingProc != "check_router_running") and - (callingProc != "routers_have_failure")): + callingProc = sys._getframe(level).f_code.co_name + if ( + (callingProc != "processAddressSanitizerError") + and (callingProc != "checkAddressSanitizerError") + and (callingProc != "checkRouterCores") + and (callingProc != "stopRouter") + and (callingProc != "__stop_internal") + and (callingProc != "stop") + and (callingProc != "stop_topology") + and (callingProc != "checkRouterRunning") + and (callingProc != "check_router_running") + and (callingProc != "routers_have_failure") + ): # Found the calling test break - level=level+1 - if (level >= 20): + level = level + 1 + if level >= 20: # something wrong - couldn't found the calling test function - callingProc="unknownProc" + callingProc = "unknownProc" with open("/tmp/AddressSanitzer.txt", "a") as addrSanFile: sys.stderr.write( "AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n" @@ -979,7 +985,6 @@ def checkAddressSanitizerError(output, router, component, logdir=""): addrSanFile.write("\n---------------\n") return - addressSanitizerError = re.search( "(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", output ) @@ -989,16 +994,20 @@ def checkAddressSanitizerError(output, router, component, logdir=""): # No Address Sanitizer Error in Output. Now check for AddressSanitizer daemon file if logdir: - filepattern=logdir+"/"+router+"/"+component+".asan.*" - logger.debug("Log check for %s on %s, pattern %s\n" % (component, router, filepattern)) + filepattern = logdir + "/" + router + "/" + component + ".asan.*" + logger.debug( + "Log check for %s on %s, pattern %s\n" % (component, router, filepattern) + ) for file in glob.glob(filepattern): with open(file, "r") as asanErrorFile: - asanError=asanErrorFile.read() + asanError = asanErrorFile.read() addressSanitizerError = re.search( "(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", asanError - ) + ) if addressSanitizerError: - processAddressSanitizerError(addressSanitizerError, asanError, router, component) + processAddressSanitizerError( + addressSanitizerError, asanError, router, component + ) return True return False @@ -1065,7 +1074,7 @@ class Router(Node): if self.logdir is None: cur_test = os.environ["PYTEST_CURRENT_TEST"] self.logdir = "/tmp/topotests/" + cur_test[ - cur_test.find("/")+1 : cur_test.find(".py") + cur_test.find("/") + 1 : cur_test.find(".py") ].replace("/", ".") # If the logdir is not created, then create it and set the @@ -1073,7 +1082,7 @@ class Router(Node): if not os.path.isdir(self.logdir): os.system("mkdir -p " + self.logdir + "/" + name) os.system("chmod -R go+rw /tmp/topotests") - # Erase logs of previous run + # Erase logs of previous run os.system("rm -rf " + self.logdir + "/" + name) self.daemondir = None @@ -1096,6 +1105,8 @@ class Router(Node): "sharpd": 0, "babeld": 0, "pbrd": 0, + "pathd": 0, + "snmpd": 0, } self.daemons_options = {"zebra": ""} self.reportCores = True @@ -1279,6 +1290,8 @@ class Router(Node): % (self.routertype, self.routertype, self.routertype, daemon) ) self.waitOutput() + if (daemon == "snmpd") and (self.routertype == "frr"): + self.cmd('echo "agentXSocket /etc/frr/agentx" > /etc/snmp/frr.conf') if (daemon == "zebra") and (self.daemons["staticd"] == 0): # Add staticd with zebra - if it exists staticd_path = os.path.join(self.daemondir, "staticd") @@ -1435,6 +1448,20 @@ class Router(Node): while "staticd" in daemons_list: daemons_list.remove("staticd") + if "snmpd" in daemons_list: + snmpd_path = "/usr/sbin/snmpd" + snmpd_option = self.daemons_options["snmpd"] + self.cmd( + "{0} {1} -C -c /etc/frr/snmpd.conf -p /var/run/{2}/snmpd.pid -x /etc/frr/agentx > snmpd.out 2> snmpd.err".format( + snmpd_path, snmpd_option, self.routertype + ) + ) + logger.info("{}: {} snmpd started".format(self, self.routertype)) + + # Remove `snmpd` so we don't attempt to start it again. + while "snmpd" in daemons_list: + daemons_list.remove("snmpd") + # Fix Link-Local Addresses # Somehow (on Mininet only), Zebra removes the IPv6 Link-Local addresses on start. Fix this self.cmd( @@ -1598,6 +1625,8 @@ class Router(Node): return "%s: vtysh killed by AddressSanitizer" % (self.name) for daemon in self.daemons: + if daemon == "snmpd": + continue if (self.daemons[daemon] == 1) and not (daemon in daemonsRunning): sys.stderr.write("%s: Daemon %s not running\n" % (self.name, daemon)) if daemon == "staticd": |
