diff options
26 files changed, 1204 insertions, 104 deletions
diff --git a/bgpd/bgp_clist.c b/bgpd/bgp_clist.c index 6ac6cf56dd..e17cce3ff6 100644 --- a/bgpd/bgp_clist.c +++ b/bgpd/bgp_clist.c @@ -654,86 +654,6 @@ static bool ecommunity_regexp_match(struct ecommunity *ecom, regex_t *reg)  	return false;  } -#if 0 -/* Delete community attribute using regular expression match.  Return -   modified communites attribute.  */ -static struct community * -community_regexp_delete (struct community *com, regex_t * reg) -{ -	int i; -	uint32_t comval; -	/* Maximum is "65535:65535" + '\0'. */ -	char c[12]; -	const char *str; - -	if (!com) -		return NULL; - -	i = 0; -	while (i < com->size) -	{ -		memcpy (&comval, com_nthval (com, i), sizeof(uint32_t)); -		comval = ntohl (comval); - -		switch (comval) { -		case COMMUNITY_INTERNET: -			str = "internet"; -			break; -		case COMMUNITY_ACCEPT_OWN: -			str = "accept-own"; -			break; -		case COMMUNITY_ROUTE_FILTER_TRANSLATED_v4: -			str = "route-filter-translated-v4"; -			break; -		case COMMUNITY_ROUTE_FILTER_v4: -			str = "route-filter-v4"; -			break; -		case COMMUNITY_ROUTE_FILTER_TRANSLATED_v6: -			str = "route-filter-translated-v6"; -			break; -		case COMMUNITY_ROUTE_FILTER_v6: -			str = "route-filter-v6"; -			break; -		case COMMUNITY_LLGR_STALE: -			str = "llgr-stale"; -			break; -		case COMMUNITY_NO_LLGR: -			str = "no-llgr"; -			break; -		case COMMUNITY_ACCEPT_OWN_NEXTHOP: -			str = "accept-own-nexthop"; -			break; -		case COMMUNITY_BLACKHOLE: -			str = "blackhole"; -			break; -		case COMMUNITY_NO_EXPORT: -			str = "no-export"; -			break; -		case COMMUNITY_NO_ADVERTISE: -			str = "no-advertise"; -			break; -		case COMMUNITY_LOCAL_AS: -			str = "local-AS"; -			break; -		case COMMUNITY_NO_PEER: -			str = "no-peer"; -			break; -		default: -			sprintf (c, "%d:%d", (comval >> 16) & 0xFFFF, -			 comval & 0xFFFF); -			str = c; -			break; -		} - -		if (regexec (reg, str, 0, NULL, 0) == 0) -			community_del_val (com, com_nthval (com, i)); -		else -			i++; -	} -	return com; -} -#endif -  /* When given community attribute matches to the community-list return     1 else return 0.  */  bool community_list_match(struct community *com, struct community_list *list) diff --git a/bgpd/bgp_community.c b/bgpd/bgp_community.c index f722a8dbc7..43138b82f6 100644 --- a/bgpd/bgp_community.c +++ b/bgpd/bgp_community.c @@ -56,7 +56,7 @@ void community_free(struct community **com)  }  /* Add one community value to the community. */ -static void community_add_val(struct community *com, uint32_t val) +void community_add_val(struct community *com, uint32_t val)  {  	com->size++;  	if (com->val) diff --git a/bgpd/bgp_community.h b/bgpd/bgp_community.h index b99f38ab64..2a1fbf526a 100644 --- a/bgpd/bgp_community.h +++ b/bgpd/bgp_community.h @@ -88,6 +88,7 @@ extern struct community *community_delete(struct community *,  					  struct community *);  extern struct community *community_dup(struct community *);  extern bool community_include(struct community *, uint32_t); +extern void community_add_val(struct community *com, uint32_t val);  extern void community_del_val(struct community *, uint32_t *);  extern unsigned long community_count(void);  extern struct hash *community_hash(void); diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c index c4ab223b7f..6735c1a952 100644 --- a/bgpd/bgp_route.c +++ b/bgpd/bgp_route.c @@ -3505,6 +3505,34 @@ bool bgp_update_martian_nexthop(struct bgp *bgp, afi_t afi, safi_t safi,  	return ret;  } +static void bgp_attr_add_no_advertise_community(struct attr *attr) +{ +	struct community *old; +	struct community *new; +	struct community *merge; +	struct community *noadv; + +	old = attr->community; +	noadv = community_str2com("no-advertise"); + +	if (old) { +		merge = community_merge(community_dup(old), noadv); + +		if (!old->refcnt) +			community_free(&old); + +		new = community_uniq_sort(merge); +		community_free(&merge); +	} else { +		new = community_dup(noadv); +	} + +	community_free(&noadv); + +	attr->community = new; +	attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_COMMUNITIES); +} +  int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id,  	       struct attr *attr, afi_t afi, safi_t safi, int type,  	       int sub_type, struct prefix_rd *prd, mpls_label_t *label, @@ -3697,6 +3725,20 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id,  	if (peer->sort == BGP_PEER_EBGP) { +		/* rfc7999: +		 * A BGP speaker receiving an announcement tagged with the +		 * BLACKHOLE community SHOULD add the NO_ADVERTISE or +		 * NO_EXPORT community as defined in RFC1997, or a +		 * similar community, to prevent propagation of the +		 * prefix outside the local AS. The community to prevent +		 * propagation SHOULD be chosen according to the operator's +		 * routing policy. +		 */ +		if (new_attr.community +		    && community_include(new_attr.community, +					 COMMUNITY_BLACKHOLE)) +			bgp_attr_add_no_advertise_community(&new_attr); +  		/* If we receive the graceful-shutdown community from an eBGP  		 * peer we must lower local-preference */  		if (new_attr.community @@ -11262,8 +11304,13 @@ static int bgp_show_route_in_table(struct vty *vty, struct bgp *bgp,  					   vty,  					   use_json,  					   json_paths); -		if (use_json && display) -			json_object_object_add(json, "paths", json_paths); +		if (use_json) { +			if (display) +				json_object_object_add(json, "paths", +						       json_paths); +			else +				json_object_free(json_paths); +		}  	} else {  		if ((dest = bgp_node_match(rib, &match)) != NULL) {  			const struct prefix *dest_p = bgp_dest_get_prefix(dest); @@ -12742,6 +12789,7 @@ static int bgp_peer_counts(struct vty *vty, struct peer *peer, afi_t afi,  				"No such neighbor or address family");  			vty_out(vty, "%s\n", json_object_to_json_string(json));  			json_object_free(json); +			json_object_free(json_loop);  		} else  			vty_out(vty, "%% No such neighbor or address family\n"); diff --git a/bgpd/bgp_routemap.c b/bgpd/bgp_routemap.c index 32af9ed027..3dc2cfbd5c 100644 --- a/bgpd/bgp_routemap.c +++ b/bgpd/bgp_routemap.c @@ -4828,6 +4828,11 @@ DEFUN (set_community,  			buffer_putstr(b, "no-export");  			continue;  		} +		if (strncmp(argv[i]->arg, "blackhole", strlen(argv[i]->arg)) +		    == 0) { +			buffer_putstr(b, "blackhole"); +			continue; +		}  		if (strncmp(argv[i]->arg, "graceful-shutdown",  			    strlen(argv[i]->arg))  		    == 0) { diff --git a/doc/developer/topotests-markers.rst b/doc/developer/topotests-markers.rst new file mode 100644 index 0000000000..02ffe3f777 --- /dev/null +++ b/doc/developer/topotests-markers.rst @@ -0,0 +1,115 @@ +.. _topotests-markers: + +Markers +-------- + +To allow for automated selective testing on large scale continuous integration +systems, all tests must be marked with at least one of the following markers: + +* babeld +* bfdd +* bgpd +* eigrpd +* isisd +* ldpd +* nhrpd +* ospf6d +* ospfd +* pathd +* pbrd +* pimd +* ripd +* ripngd +* sharpd +* staticd +* vrrpd + +The markers corespond to the daemon subdirectories in FRR's source code and have +to be added to tests on a module level depending on which daemons are used +during the test. + +The goal is to have continuous integration systems scan code submissions, detect +changes to files in a daemons subdirectory and select only tests using that +daemon to run to shorten developers waiting times for test results and save test +infrastructure resources. + +Newly written modules and code changes on tests, which do not contain any or +incorrect markers will be rejected by reviewers. + + +Registering markers +^^^^^^^^^^^^^^^^^^^ +The Registration of new markers takes place in the file +``tests/topotests/pytest.ini`` and should be discussed with members of the TSC +beforehand. + +.. code:: python3 + +    # tests/topotests/pytest.ini +    [pytest] +    ... +    markers = +        babeld: Tests that run against BABELD +        bfdd: Tests that run against BFDD +        ... +        vrrpd: Tests that run against VRRPD + + +Adding markers to tests +^^^^^^^^^^^^^^^^^^^^^^^ +Markers are added to a test by placing a global variable in the test module. + +Adding a single marker: + +.. code:: python3 + +    import pytest +     +    ... +     +    pytestmark = pytest.mark.bfdd +     +    ... +     +    def test_using_bfdd(): + + +Adding multiple markers: + +.. code:: python3 + +    import pytest +     +    ... +     +    pytestmark = [ +        pytest.mark.bgpd, +        pytest.mark.ospfd, +        pytest.mark.ospf6d +    ] +     +    ... +     +    def test_using_bgpd_ospfd_ospf6d(): + + +Selecting marked modules fort testing +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Selecting by a single marker: + +.. code:: bash + +    pytest -v -m isisd + +Selecting by multiple markers: + +.. code:: bash + +    pytest -v -m "isisd or ldpd or nhrpd" + + +Further Information +^^^^^^^^^^^^^^^^^^^ +The `online pytest documentation <https://docs.pytest.org/en/stable/example/markers.html>`_ +provides further information and usage examples for pytest markers. + diff --git a/doc/developer/topotests.rst b/doc/developer/topotests.rst index 688f73c991..3e8987f126 100644 --- a/doc/developer/topotests.rst +++ b/doc/developer/topotests.rst @@ -792,6 +792,8 @@ Requirements:    conforms with this, run it without the :option:`-s` parameter.  - Use `black <https://github.com/psf/black>`_ code formatter before creating    a pull request. This ensures we have a unified code style. +- Mark test modules with pytest markers depending on the daemons used during the +  tests (s. Markers)  Tips: @@ -950,6 +952,8 @@ does what you need. If nothing is similar, then you may create a new topology,  preferably, using the newest template  (:file:`tests/topotests/example-test/test_template.py`). +.. include:: topotests-markers.rst +  .. include:: topotests-snippets.rst  License diff --git a/doc/user/bgp.rst b/doc/user/bgp.rst index e609761e1c..173daa9b22 100644 --- a/doc/user/bgp.rst +++ b/doc/user/bgp.rst @@ -435,10 +435,12 @@ Require policy on EBGP  .. clicmd:: [no] bgp ebgp-requires-policy     This command requires incoming and outgoing filters to be applied -   for eBGP sessions. Without the incoming filter, no routes will be -   accepted. Without the outgoing filter, no routes will be announced. +   for eBGP sessions as part of RFC-8212 compliance. Without the incoming +   filter, no routes will be accepted. Without the outgoing filter, no +   routes will be announced. -   This is enabled by default. +   This is enabled by default for the traditional configuration and +   turned off by default for datacenter configuration.     When the incoming or outgoing filter is missing you will see     "(Policy)" sign under ``show bgp summary``: @@ -457,6 +459,22 @@ Require policy on EBGP        192.168.0.2     4      65002         8        10        0    0    0 00:03:09            5 (Policy)        fe80:1::2222    4      65002         9        11        0    0    0 00:03:09     (Policy) (Policy) +   Additionally a `show bgp neighbor` command would indicate in the `For address family:` +   block that: + +   .. code-block:: frr + +      exit1# show bgp neighbor +      ... +      For address family: IPv4 Unicast +       Update group 1, subgroup 1 +       Packet Queue length 0 +       Inbound soft reconfiguration allowed +       Community attribute sent to this neighbor(all) +       Inbound updates discarded due to missing policy +       Outbound updates discarded due to missing policy +       0 accepted prefixes +  Reject routes with AS_SET or AS_CONFED_SET types  ------------------------------------------------ @@ -1967,9 +1985,9 @@ is 4 octet long. The following format is used to define the community value.     ``0xFFFF029A`` ``65535:666``. :rfc:`7999` documents sending prefixes to     EBGP peers and upstream for the purpose of blackholing traffic.     Prefixes tagged with the this community should normally not be -   re-advertised from neighbors of the originating network. It is -   recommended upon receiving prefixes tagged with this community to -   add ``NO_EXPORT`` and ``NO_ADVERTISE``. +   re-advertised from neighbors of the originating network. Upon receiving +   ``BLACKHOLE`` community from a BGP speaker, ``NO_ADVERTISE`` community +   is added automatically.  ``no-export``     ``no-export`` represents well-known communities value ``NO_EXPORT`` diff --git a/lib/command.h b/lib/command.h index bfe64a7235..b002e79f09 100644 --- a/lib/command.h +++ b/lib/command.h @@ -414,7 +414,8 @@ struct cmd_node {  	"<neighbor|interface|area|lsa|zebra|config|dbex|spf|route|lsdb|redistribute|hook|asbr|prefix|abr>"  #define AREA_TAG_STR "[area tag]\n"  #define COMMUNITY_AANN_STR "Community number where AA and NN are (0-65535)\n" -#define COMMUNITY_VAL_STR  "Community number in AA:NN format (where AA and NN are (0-65535)) or local-AS|no-advertise|no-export|internet or additive\n" +#define COMMUNITY_VAL_STR                                                      \ +	"Community number in AA:NN format (where AA and NN are (0-65535)) or local-AS|no-advertise|no-export|internet|graceful-shutdown|accept-own-nexthop|accept-own|route-filter-translated-v4|route-filter-v4|route-filter-translated-v6|route-filter-v6|llgr-stale|no-llgr|blackhole|no-peer or additive\n"  #define MPLS_TE_STR "MPLS-TE specific commands\n"  #define LINK_PARAMS_STR "Configure interface link parameters\n"  #define OSPF_RI_STR "OSPF Router Information specific commands\n" diff --git a/tests/topotests/bgp_blackhole_community/__init__.py b/tests/topotests/bgp_blackhole_community/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/topotests/bgp_blackhole_community/__init__.py diff --git a/tests/topotests/bgp_blackhole_community/r1/bgpd.conf b/tests/topotests/bgp_blackhole_community/r1/bgpd.conf new file mode 100644 index 0000000000..be86bd6b2c --- /dev/null +++ b/tests/topotests/bgp_blackhole_community/r1/bgpd.conf @@ -0,0 +1,13 @@ +! +router bgp 65001 +  no bgp ebgp-requires-policy +  neighbor r1-eth0 interface remote-as external +  address-family ipv4 unicast +    redistribute connected +    neighbor r1-eth0 route-map r2 out +  exit-address-family + ! +! +route-map r2 permit 10 +  set community blackhole +! diff --git a/tests/topotests/bgp_blackhole_community/r1/zebra.conf b/tests/topotests/bgp_blackhole_community/r1/zebra.conf new file mode 100644 index 0000000000..70dc5e516d --- /dev/null +++ b/tests/topotests/bgp_blackhole_community/r1/zebra.conf @@ -0,0 +1,10 @@ +! +interface lo + ip address 172.16.255.254/32 +! +interface r1-eth0 + ip address 192.168.0.1/24 +! +ip forwarding +! + diff --git a/tests/topotests/bgp_blackhole_community/r2/bgpd.conf b/tests/topotests/bgp_blackhole_community/r2/bgpd.conf new file mode 100644 index 0000000000..a4fb45e1ff --- /dev/null +++ b/tests/topotests/bgp_blackhole_community/r2/bgpd.conf @@ -0,0 +1,6 @@ +! +router bgp 65002 +  no bgp ebgp-requires-policy +  neighbor r2-eth0 interface remote-as external +  neighbor r2-eth1 interface remote-as external +! diff --git a/tests/topotests/bgp_blackhole_community/r2/zebra.conf b/tests/topotests/bgp_blackhole_community/r2/zebra.conf new file mode 100644 index 0000000000..307e5187ca --- /dev/null +++ b/tests/topotests/bgp_blackhole_community/r2/zebra.conf @@ -0,0 +1,9 @@ +! +interface r2-eth0 + ip address 192.168.0.2/24 +! +interface r2-eth1 + ip address 192.168.1.1/24 +! +ip forwarding +! diff --git a/tests/topotests/bgp_blackhole_community/r3/bgpd.conf b/tests/topotests/bgp_blackhole_community/r3/bgpd.conf new file mode 100644 index 0000000000..f0635cb8c9 --- /dev/null +++ b/tests/topotests/bgp_blackhole_community/r3/bgpd.conf @@ -0,0 +1,5 @@ +! +router bgp 65003 +  no bgp ebgp-requires-policy +  neighbor r3-eth0 interface remote-as external +! diff --git a/tests/topotests/bgp_blackhole_community/r3/zebra.conf b/tests/topotests/bgp_blackhole_community/r3/zebra.conf new file mode 100644 index 0000000000..05ab56d6f1 --- /dev/null +++ b/tests/topotests/bgp_blackhole_community/r3/zebra.conf @@ -0,0 +1,6 @@ +! +interface r3-eth0 + ip address 192.168.1.2/24 +! +ip forwarding +! diff --git a/tests/topotests/bgp_blackhole_community/test_bgp_blackhole_community.py b/tests/topotests/bgp_blackhole_community/test_bgp_blackhole_community.py new file mode 100644 index 0000000000..b61ad354e2 --- /dev/null +++ b/tests/topotests/bgp_blackhole_community/test_bgp_blackhole_community.py @@ -0,0 +1,123 @@ +#!/usr/bin/env python + +# +# Copyright (c) 2021 by +# Donatas Abraitis <donatas.abraitis@gmail.com> +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +Test if 172.16.255.254/32 tagged with BLACKHOLE community is not +re-advertised downstream. +""" + +import os +import sys +import json +import pytest +import functools + +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topolog import logger +from mininet.topo import Topo + + +class TemplateTopo(Topo): +    def build(self, *_args, **_opts): +        tgen = get_topogen(self) + +        for routern in range(1, 4): +            tgen.add_router("r{}".format(routern)) + +        switch = tgen.add_switch("s1") +        switch.add_link(tgen.gears["r1"]) +        switch.add_link(tgen.gears["r2"]) + +        switch = tgen.add_switch("s2") +        switch.add_link(tgen.gears["r2"]) +        switch.add_link(tgen.gears["r3"]) + + +def setup_module(mod): +    tgen = Topogen(TemplateTopo, mod.__name__) +    tgen.start_topology() + +    router_list = tgen.routers() + +    for i, (rname, router) in enumerate(router_list.items(), 1): +        router.load_config( +            TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) +        ) +        router.load_config( +            TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) +        ) + +    tgen.start_router() + + +def teardown_module(mod): +    tgen = get_topogen() +    tgen.stop_topology() + + +def test_bgp_blackhole_community(): +    tgen = get_topogen() + +    if tgen.routers_have_failure(): +        pytest.skip(tgen.errors) + +    def _bgp_converge(): +        output = json.loads( +            tgen.gears["r2"].vtysh_cmd("show ip bgp 172.16.255.254/32 json") +        ) +        expected = {"paths": [{"community": {"list": ["blackhole", "noAdvertise"]}}]} +        return topotest.json_cmp(output, expected) + +    def _bgp_no_advertise(): +        output = json.loads( +            tgen.gears["r2"].vtysh_cmd( +                "show ip bgp neighbor r2-eth1 advertised-routes json" +            ) +        ) +        expected = { +            "advertisedRoutes": {}, +            "totalPrefixCounter": 0, +            "filteredPrefixCounter": 0, +        } + +        return topotest.json_cmp(output, expected) + +    test_func = functools.partial(_bgp_converge) +    success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + +    assert result is None, 'Failed bgp convergence in "{}"'.format(tgen.gears["r2"]) + +    test_func = functools.partial(_bgp_no_advertise) +    success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + +    assert result is None, 'Advertised blackhole tagged prefix in "{}"'.format( +        tgen.gears["r2"] +    ) + + +if __name__ == "__main__": +    args = ["-s"] + sys.argv[1:] +    sys.exit(pytest.main(args)) diff --git a/tests/topotests/example-test/test_template.py b/tests/topotests/example-test/test_template.py index 4305e0199f..973303b830 100644 --- a/tests/topotests/example-test/test_template.py +++ b/tests/topotests/example-test/test_template.py @@ -44,6 +44,18 @@ from lib.topolog import logger  from mininet.topo import Topo +#TODO: select markers based on daemons used during test +# pytest module level markers +""" +pytestmark = pytest.mark.bfdd # single marker +pytestmark = [ +	pytest.mark.bgpd, +	pytest.mark.ospfd, +	pytest.mark.ospf6d +] # multiple markers +""" + +  class TemplateTopo(Topo):      "Test topology builder" diff --git a/tests/topotests/example-topojson-test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py b/tests/topotests/example-topojson-test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py index f24f463b8a..cd48716905 100755 --- a/tests/topotests/example-topojson-test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py +++ b/tests/topotests/example-topojson-test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py @@ -53,6 +53,19 @@ from lib.topolog import logger  from lib.bgp import verify_bgp_convergence  from lib.topojson import build_topo_from_json, build_config_from_json + +#TODO: select markers based on daemons used during test +# pytest module level markers +""" +pytestmark = pytest.mark.bfdd # single marker +pytestmark = [ +	pytest.mark.bgpd, +	pytest.mark.ospfd, +	pytest.mark.ospf6d +] # multiple markers +""" + +  # Reading the data from JSON File for topology and configuration creation  jsonFile = "{}/example_topojson_multiple_links.json".format(CWD)  try: diff --git a/tests/topotests/example-topojson-test/test_topo_json_single_link/test_example_topojson.py b/tests/topotests/example-topojson-test/test_topo_json_single_link/test_example_topojson.py index 3ae3c9f4fe..0c72e30044 100755 --- a/tests/topotests/example-topojson-test/test_topo_json_single_link/test_example_topojson.py +++ b/tests/topotests/example-topojson-test/test_topo_json_single_link/test_example_topojson.py @@ -52,6 +52,19 @@ from lib.topolog import logger  from lib.bgp import verify_bgp_convergence  from lib.topojson import build_topo_from_json, build_config_from_json + +#TODO: select markers based on daemons used during test +# pytest module level markers +""" +pytestmark = pytest.mark.bfdd # single marker +pytestmark = [ +	pytest.mark.bgpd, +	pytest.mark.ospfd, +	pytest.mark.ospf6d +] # multiple markers +""" + +  # Reading the data from JSON File for topology and configuration creation  jsonFile = "{}/example_topojson.json".format(CWD) diff --git a/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py b/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py index 06fa2f4626..d05ad6db21 100755 --- a/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py +++ b/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py @@ -54,6 +54,19 @@ from lib.topolog import logger  from lib.bgp import verify_bgp_convergence  from lib.topojson import build_topo_from_json, build_config_from_json + +#TODO: select markers based on daemons used during test +# pytest module level markers +""" +pytestmark = pytest.mark.bfdd # single marker +pytestmark = [ +	pytest.mark.bgpd, +	pytest.mark.ospfd, +	pytest.mark.ospf6d +] # multiple markers +""" + +  # Reading the data from JSON File for topology and configuration creation  jsonFile = "{}/example_topojson.json".format(CWD) diff --git a/tests/topotests/lib/snmptest.py b/tests/topotests/lib/snmptest.py index ba5835dcf7..910e901ade 100644 --- a/tests/topotests/lib/snmptest.py +++ b/tests/topotests/lib/snmptest.py @@ -80,11 +80,14 @@ class SnmpTester(object):      def _parse_multiline(self, snmp_output):          results = snmp_output.strip().split("\r\n") -        out_dict = {} +        out_dict = {} +        out_list = []          for response in results:              out_dict[self._get_snmp_oid(response)] = self._get_snmp_value(response) -        return out_dict +            out_list.append(self._get_snmp_value(response)) + +        return out_dict, out_list      def get(self, oid):          cmd = "snmpget {0} {1}".format(self._snmp_config(), oid) @@ -114,7 +117,7 @@ class SnmpTester(object):          return self.get_next(oid) == value      def test_oid_walk(self, oid, values, oids=None): -        results_dict = self.walk(oid) +        results_dict, results_list = self.walk(oid)          print("res {}".format(results_dict))          if oids is not None:              index = 0 @@ -124,4 +127,4 @@ class SnmpTester(object):                  index += 1              return True -        return results_dict.values() == values +        return results_list == values diff --git a/tests/topotests/ospf_basic_functionality/ospf_chaos.json b/tests/topotests/ospf_basic_functionality/ospf_chaos.json new file mode 100644 index 0000000000..ed199f181b --- /dev/null +++ b/tests/topotests/ospf_basic_functionality/ospf_chaos.json @@ -0,0 +1,166 @@ +{ + +    "ipv4base": "10.0.0.0", +    "ipv4mask": 24, +    "link_ip_start": { +        "ipv4": "10.0.0.0", +        "v4mask": 24 +    }, +    "lo_prefix": { +        "ipv4": "1.0.", +        "v4mask": 32 +    }, +    "routers": { +        "r0": { +            "links": { +                "r1": { +                    "ipv4": "auto", +                    "ospf": { +                        "area": "0.0.0.0", +                        "hello_interval": 1, +                        "dead_interval": 4 +                    } +                }, +                "r2": { +                    "ipv4": "auto", +                    "ospf": { +                        "area": "0.0.0.0", +                        "hello_interval": 1, +                        "dead_interval": 4 +                    } +                }, +                "r3": { +                    "ipv4": "auto", +                    "ospf": { +                        "area": "0.0.0.0", +                        "hello_interval": 1, +                        "dead_interval": 4 +                    } +                } +            }, +            "ospf": { +                "router_id": "100.1.1.0", +                "neighbors": { +                    "r1": {}, +                    "r2": {}, +                    "r3": {} +                }, +                "redistribute": [{ +                        "redist_type": "static" +                    }, +                    { +                        "redist_type": "connected" +                    } +                ] +            } +        }, +        "r1": { +            "links": { +                "r0": { +                    "ipv4": "auto", +                    "ospf": { +                        "area": "0.0.0.0", +                        "hello_interval": 1, +                        "dead_interval": 4 +                    } +                }, +                "r2": { +                    "ipv4": "auto", +                    "ospf": { +                        "area": "0.0.0.0", +                        "hello_interval": 1, +                        "dead_interval": 4 +                    } +                }, +                "r3": { +                    "ipv4": "auto", +                    "ospf": { +                        "area": "0.0.0.0", +                        "hello_interval": 1, +                        "dead_interval": 4 +                    } +                } +            }, +            "ospf": { +                "router_id": "100.1.1.1", +                "neighbors": { +                    "r0": {}, +                    "r2": {}, +                    "r3": {} +                } +            } +        }, +        "r2": { +            "links": { +                "r0": { +                    "ipv4": "auto", +                    "ospf": { +                        "area": "0.0.0.0", +                        "hello_interval": 1, +                        "dead_interval": 4 +                    } +                }, +                "r1": { +                    "ipv4": "auto", +                    "ospf": { +                        "area": "0.0.0.0", +                        "hello_interval": 1, +                        "dead_interval": 4 +                    } +                }, +                "r3": { +                    "ipv4": "auto", +                    "ospf": { +                        "area": "0.0.0.0", +                        "hello_interval": 1, +                        "dead_interval": 4 +                    } +                } +            }, +            "ospf": { +                "router_id": "100.1.1.2", +                "neighbors": { +                    "r1": {}, +                    "r0": {}, +                    "r3": {} +                } +            } +        }, +        "r3": { +            "links": { +                "r0": { +                    "ipv4": "auto", +                    "ospf": { +                        "area": "0.0.0.0", +                        "hello_interval": 1, +                        "dead_interval": 4 +                    } +                }, +                "r1": { +                    "ipv4": "auto", +                    "ospf": { +                        "area": "0.0.0.0", +                        "hello_interval": 1, +                        "dead_interval": 4 +                    } +                }, +                "r2": { +                    "ipv4": "auto", +                    "ospf": { +                        "area": "0.0.0.0", +                        "hello_interval": 1, +                        "dead_interval": 4 +                    } +                } +            }, +            "ospf": { +                "router_id": "100.1.1.3", +                "neighbors": { +                    "r0": {}, +                    "r1": {}, +                    "r2": {} +                } +            } +        } +    } +} diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_chaos.py b/tests/topotests/ospf_basic_functionality/test_ospf_chaos.py new file mode 100644 index 0000000000..37b7528490 --- /dev/null +++ b/tests/topotests/ospf_basic_functionality/test_ospf_chaos.py @@ -0,0 +1,576 @@ +#!/usr/bin/python + +# +# Copyright (c) 2020 by VMware, Inc. ("VMware") +# Used Copyright (c) 2018 by Network Device Education Foundation, Inc. +# ("NetDEF") in this file. +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + + +"""OSPF Basic Functionality Automation.""" +import os +import sys +import time +import pytest +from copy import deepcopy +import json + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../lib/")) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from mininet.topo import Topo +from lib.topogen import Topogen, get_topogen + +# Import topoJson from lib, to create topology and initial configuration +from lib.common_config import ( +    start_topology, +    write_test_header, +    write_test_footer, +    reset_config_on_routers, +    step, +    shutdown_bringup_interface, +    topo_daemons, +    verify_rib, +    stop_router, start_router, +    create_static_routes, +    start_router_daemons, +    kill_router_daemons +) + +from lib.ospf import ( +    verify_ospf_neighbor, verify_ospf_rib, +    create_router_ospf) + +from lib.topolog import logger +from lib.topojson import build_topo_from_json, build_config_from_json +from ipaddress import IPv4Address + + + +# Global variables +topo = None + +NETWORK = { +    "ipv4": ["11.0.20.1/32", "11.0.20.2/32", "11.0.20.3/32", "11.0.20.4/32", +             "11.0.20.5/32"] +} +""" +Topology: +      Please view in a fixed-width font such as Courier. +      +---+  A1       +---+ +      +R1 +------------+R2 | +      +-+-+-           +--++ +        |  --        --  | +        |    -- A0 --    | +      A0|      ----      | +        |      ----      | A2 +        |    --    --    | +        |  --        --  | +      +-+-+-            +-+-+ +      +R0 +-------------+R3 | +      +---+     A3     +---+ + +TESTCASES = +1. Verify ospf functionality after restart ospfd. +2. Verify ospf functionality after restart FRR service. +3. Verify ospf functionality when staticd is restarted. + """ + +# Reading the data from JSON File for topology creation +jsonFile = "{}/ospf_chaos.json".format(CWD) +try: +    with open(jsonFile, "r") as topoJson: +        topo = json.load(topoJson) +except IOError: +    assert False, "Could not read file {}".format(jsonFile) + +class CreateTopo(Topo): +    """ +    Test topology builder. + +    * `Topo`: Topology object +    """ + +    def build(self, *_args, **_opts): +        """Build function.""" +        tgen = get_topogen(self) + +        # Building topology from json file +        build_topo_from_json(tgen, topo) + + +def setup_module(mod): +    """ +    Sets up the pytest environment + +    * `mod`: module name +    """ +    global topo +    testsuite_run_time = time.asctime(time.localtime(time.time())) +    logger.info("Testsuite start time: {}".format(testsuite_run_time)) +    logger.info("=" * 40) + +    logger.info("Running setup_module to create topology") + +    # This function initiates the topology build with Topogen... +    tgen = Topogen(CreateTopo, mod.__name__) +    # ... and here it calls Mininet initialization functions. + +    # get list of daemons needs to be started for this suite. +    daemons = topo_daemons(tgen, topo) + +    # Starting topology, create tmp files which are loaded to routers +    #  to start deamons and then start routers +    start_topology(tgen, daemons) + +    # Creating configuration from JSON +    build_config_from_json(tgen, topo) + +    # Don't run this test if we have any failure. +    if tgen.routers_have_failure(): +        pytest.skip(tgen.errors) + +    ospf_covergence = verify_ospf_neighbor(tgen, topo) +    assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format( +        ospf_covergence +    ) + +    logger.info("Running setup_module() done") + + +def teardown_module(mod): +    """ +    Teardown the pytest environment. + +    * `mod`: module name +    """ + +    logger.info("Running teardown_module to delete topology") + +    tgen = get_topogen() + +    # Stop toplogy and Remove tmp files +    tgen.stop_topology() + +    logger.info( +        "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) +    ) +    logger.info("=" * 40) + + +# ################################## +# Test cases start here. +# ################################## +def test_ospf_chaos_tc31_p1(request): +    """Verify ospf functionality after restart ospfd.""" +    tc_name = request.node.name +    write_test_header(tc_name) +    tgen = get_topogen() +    global topo +    step("Bring up the base config as per the topology") +    reset_config_on_routers(tgen) + +    step( +        "Create static routes(10.0.20.1/32) in R1 and redistribute " +        "to OSPF using route map.") + +    # Create Static routes +    input_dict = { +        "r0": { +            "static_routes": [ +                { +                    "network": NETWORK['ipv4'][0], +                    "no_of_ip": 5, +                    "next_hop": 'Null0', +                } +            ] +        } +    } +    result = create_static_routes(tgen, input_dict) +    assert result is True, "Testcase {} : Failed \n Error: {}".format( +        tc_name, result) + +    ospf_red_r0 = { +        "r0": { +            "ospf": { +                "redistribute": [{ +                    "redist_type": "static" +                    }] +            } +        } +    } +    result = create_router_ospf(tgen, topo, ospf_red_r0) +    assert result is True, "Testcase {} : Failed \n Error: {}".format( +        tc_name, result) + +    step("Verify OSPF neighbors after base config is done.") +    # Api call verify whether OSPF is converged +    ospf_covergence = verify_ospf_neighbor(tgen, topo) +    assert ospf_covergence is True, ("setup_module :Failed \n Error:" +                                      " {}".format(ospf_covergence)) + +    step("Verify that route is advertised to R1.") +    dut = 'r1' +    protocol = 'ospf' +    nh = topo['routers']['r0']['links']['r1']['ipv4'].split('/')[0] +    result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh) +    assert result is True, "Testcase {} : Failed \n Error: {}".format( +        tc_name, result) + +    result = verify_rib( +        tgen, "ipv4", dut, input_dict, protocol=protocol, next_hop=nh) +    assert result is True, "Testcase {} : Failed \n Error: {}".format( +        tc_name, result) + +    step("Kill OSPFd daemon on R0.") +    kill_router_daemons(tgen, "r0", ["ospfd"]) + +    step("Verify OSPF neighbors are down after killing ospfd in R0") +    dut = 'r0' +    # Api call verify whether OSPF is converged +    ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut, +        expected=False) +    assert ospf_covergence is not True, ("setup_module :Failed \n Error:" +                                          " {}".format(ospf_covergence)) + +    step("Verify that route advertised to R1 are deleted from RIB and FIB.") +    dut = 'r1' +    protocol = 'ospf' +    result = verify_ospf_rib(tgen, dut, input_dict, expected=False) +    assert result is not True, "Testcase {} : Failed \n Error: {}".format( +        tc_name, result) + +    result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, +        expected=False) +    assert result is not True, "Testcase {} : Failed \n Error: {}".format( +        tc_name, result) + +    step("Bring up OSPFd daemon on R0.") +    start_router_daemons(tgen, "r0", ["ospfd"]) + +    step("Verify OSPF neighbors are up after bringing back ospfd in R0") +    # Api call verify whether OSPF is converged +    ospf_covergence = verify_ospf_neighbor(tgen, topo) +    assert ospf_covergence is True, ("setup_module :Failed \n Error:" +                                          " {}".format(ospf_covergence)) + +    step( +        "All the neighbours are up and routes are installed before the" +        " restart. Verify OSPF route table and ip route table.") +    dut = 'r1' +    protocol = 'ospf' +    result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh) +    assert result is True, "Testcase {} : Failed \n Error: {}".format( +        tc_name, result) + +    result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, +    next_hop=nh) +    assert result is True, "Testcase {} : Failed \n Error: {}".format( +        tc_name, result) + +    step("Kill OSPFd daemon on R1.") +    kill_router_daemons(tgen, "r1", ["ospfd"]) + +    step("Verify OSPF neighbors are down after killing ospfd in R1") +    dut = 'r1' +    # Api call verify whether OSPF is converged +    ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut, +        expected=False) +    assert ospf_covergence is not True, ("setup_module :Failed \n Error:" +                                          " {}".format(ospf_covergence)) + +    step("Bring up OSPFd daemon on R1.") +    start_router_daemons(tgen, "r1", ["ospfd"]) + +    step("Verify OSPF neighbors are up after bringing back ospfd in R1") +    # Api call verify whether OSPF is converged +    ospf_covergence = verify_ospf_neighbor(tgen, topo) +    assert ospf_covergence is True, ("setup_module :Failed \n Error:" +                                      " {}".format(ospf_covergence)) + +    step( +        "All the neighbours are up and routes are installed before the" +        " restart. Verify OSPF route table and ip route table.") + +    dut = 'r1' +    protocol = 'ospf' +    result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh) +    assert result is True, "Testcase {} : Failed \n Error: {}".format( +        tc_name, result) + +    result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, +    next_hop=nh) +    assert result is True, "Testcase {} : Failed \n Error: {}".format( +        tc_name, result) + +    write_test_footer(tc_name) + + +def test_ospf_chaos_tc32_p1(request): +    """Verify ospf functionality after restart FRR service. """ +    tc_name = request.node.name +    write_test_header(tc_name) +    tgen = get_topogen() +    global topo +    step("Bring up the base config as per the topology") +    reset_config_on_routers(tgen) + +    step( +        "Create static routes(10.0.20.1/32) in R1 and redistribute " +        "to OSPF using route map.") + +    # Create Static routes +    input_dict = { +        "r0": { +            "static_routes": [ +                { +                    "network": NETWORK['ipv4'][0], +                    "no_of_ip": 5, +                    "next_hop": 'Null0', +                } +            ] +        } +    } +    result = create_static_routes(tgen, input_dict) +    assert result is True, "Testcase {} : Failed \n Error: {}".format( +        tc_name, result) + +    ospf_red_r0 = { +        "r0": { +            "ospf": { +                "redistribute": [{ +                    "redist_type": "static" +                    }] +            } +        } +    } +    result = create_router_ospf(tgen, topo, ospf_red_r0) +    assert result is True, "Testcase {} : Failed \n Error: {}".format( +        tc_name, result) + +    step("Verify OSPF neighbors after base config is done.") +    # Api call verify whether OSPF is converged +    ospf_covergence = verify_ospf_neighbor(tgen, topo) +    assert ospf_covergence is True, ("setup_module :Failed \n Error:" +                                          " {}".format(ospf_covergence)) + +    step("Verify that route is advertised to R1.") +    dut = 'r1' +    protocol = 'ospf' + +    nh = topo['routers']['r0']['links']['r1']['ipv4'].split('/')[0] +    result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh) +    assert result is True, "Testcase {} : Failed \n Error: {}".format( +        tc_name, result) + +    result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, +    next_hop=nh) +    assert result is True, "Testcase {} : Failed \n Error: {}".format( +        tc_name, result) + +    step("Restart frr on R0") +    stop_router(tgen, 'r0') +    start_router(tgen, 'r0') + +    step("Verify OSPF neighbors are up after restarting R0") +    # Api call verify whether OSPF is converged +    ospf_covergence = verify_ospf_neighbor(tgen, topo) +    assert ospf_covergence is True, ("setup_module :Failed \n Error:" +                                          " {}".format(ospf_covergence)) + +    step( +        "All the neighbours are up and routes are installed before the" +        " restart. Verify OSPF route table and ip route table.") +    dut = 'r1' +    protocol = 'ospf' +    result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh) +    assert result is True, "Testcase {} : Failed \n Error: {}".format( +        tc_name, result) + +    result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, +    next_hop=nh) +    assert result is True, "Testcase {} : Failed \n Error: {}".format( +        tc_name, result) + +    step("Restart frr on R1") +    stop_router(tgen, 'r1') +    start_router(tgen, 'r1') + +    step("Verify OSPF neighbors are up after restarting R1") +    # Api call verify whether OSPF is converged +    ospf_covergence = verify_ospf_neighbor(tgen, topo) +    assert ospf_covergence is True, ("setup_module :Failed \n Error:" +                                          " {}".format(ospf_covergence)) + +    step( +        "All the neighbours are up and routes are installed before the" +        " restart. Verify OSPF route table and ip route table.") +    dut = 'r1' +    protocol = 'ospf' +    result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh) +    assert result is True, "Testcase {} : Failed \n Error: {}".format( +        tc_name, result) + +    result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, +    next_hop=nh) +    assert result is True, "Testcase {} : Failed \n Error: {}".format( +        tc_name, result) + +    write_test_footer(tc_name) + + +def test_ospf_chaos_tc34_p1(request): +    """ +    verify ospf functionality when staticd is restarted. + +    Verify ospf functionalitywhen staticroutes are +    redistributed & Staticd is restarted. +    """ +    tc_name = request.node.name +    write_test_header(tc_name) +    tgen = get_topogen() +    global topo +    step("Bring up the base config as per the topology") +    reset_config_on_routers(tgen) + +    step( +        "Create static routes(10.0.20.1/32) in R1 and redistribute " +        "to OSPF using route map.") + +    # Create Static routes +    input_dict = { +        "r0": { +            "static_routes": [ +                { +                    "network": NETWORK['ipv4'][0], +                    "no_of_ip": 5, +                    "next_hop": 'Null0', +                } +            ] +        } +    } +    result = create_static_routes(tgen, input_dict) +    assert result is True, "Testcase {} : Failed \n Error: {}".format( +        tc_name, result) + +    ospf_red_r0 = { +        "r0": { +            "ospf": { +                "redistribute": [{ +                    "redist_type": "static" +                    }] +            } +        } +    } +    result = create_router_ospf(tgen, topo, ospf_red_r0) +    assert result is True, "Testcase {} : Failed \n Error: {}".format( +        tc_name, result) + +    step("Verify OSPF neighbors after base config is done.") +    # Api call verify whether OSPF is converged +    ospf_covergence = verify_ospf_neighbor(tgen, topo) +    assert ospf_covergence is True, ("setup_module :Failed \n Error:" +                                          " {}".format(ospf_covergence)) + +    step("Verify that route is advertised to R1.") +    dut = 'r1' +    protocol = 'ospf' +    nh = topo['routers']['r0']['links']['r1']['ipv4'].split('/')[0] +    result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh) +    assert result is True, "Testcase {} : Failed \n Error: {}".format( +        tc_name, result) + +    result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, +    next_hop=nh) +    assert result is True, "Testcase {} : Failed \n Error: {}".format( +        tc_name, result) + +    step("Kill staticd daemon on R0.") +    kill_router_daemons(tgen, "r0", ["staticd"]) + +    step("Verify that route advertised to R1 are deleted from RIB and FIB.") +    dut = 'r1' +    protocol = 'ospf' +    result = verify_ospf_rib(tgen, dut, input_dict, expected=False) +    assert result is not True, "Testcase {} : Failed \n Error: {}".format( +        tc_name, result) + +    result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, +            expected=False) +    assert result is not True, "Testcase {} : Failed \n Error: {}".format( +        tc_name, result) + +    step("Bring up staticd daemon on R0.") +    start_router_daemons(tgen, "r0", ["staticd"]) + +    step("Verify OSPF neighbors are up after bringing back ospfd in R0") +    # Api call verify whether OSPF is converged +    ospf_covergence = verify_ospf_neighbor(tgen, topo) +    assert ospf_covergence is True, ("setup_module :Failed \n Error:" +                                          " {}".format(ospf_covergence)) + +    step( +        "All the neighbours are up and routes are installed before the" +        " restart. Verify OSPF route table and ip route table.") +    dut = 'r1' +    protocol = 'ospf' +    result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh) +    assert result is True, "Testcase {} : Failed \n Error: {}".format( +        tc_name, result) + +    result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, +    next_hop=nh) +    assert result is True, "Testcase {} : Failed \n Error: {}".format( +        tc_name, result) + +    step("Kill staticd daemon on R1.") +    kill_router_daemons(tgen, "r1", ["staticd"]) + +    step("Bring up staticd daemon on R1.") +    start_router_daemons(tgen, "r1", ["staticd"]) + +    step("Verify OSPF neighbors are up after bringing back ospfd in R1") +    # Api call verify whether OSPF is converged +    ospf_covergence = verify_ospf_neighbor(tgen, topo) +    assert ospf_covergence is True, ("setup_module :Failed \n Error:" +                                          " {}".format(ospf_covergence)) + +    step( +        "All the neighbours are up and routes are installed before the" +        " restart. Verify OSPF route table and ip route table.") + +    dut = 'r1' +    protocol = 'ospf' +    result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh) +    assert result is True, "Testcase {} : Failed \n Error: {}".format( +        tc_name, result) + +    result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, +    next_hop=nh) +    assert result is True, "Testcase {} : Failed \n Error: {}".format( +        tc_name, result) + +    write_test_footer(tc_name) + + +if __name__ == "__main__": +    args = ["-s"] + sys.argv[1:] +    sys.exit(pytest.main(args)) diff --git a/tests/topotests/pytest.ini b/tests/topotests/pytest.ini index d1b18a57bb..0c45a09445 100644 --- a/tests/topotests/pytest.ini +++ b/tests/topotests/pytest.ini @@ -1,16 +1,29 @@  # Skip pytests example directory  [pytest]  norecursedirs = .git example-test example-topojson-test lib docker + +# Markers +# +# Please consult the documentation and discuss with TSC members before applying +# any changes to this list.  markers = -	babel: Tests that run against BABEL -	bfd: Tests that run against BFDD -	eigrp: Tests that run against EIGRPD -	isis: Tests that run against ISISD -	ldp: Tests that run against LDPD -	ospf: Tests that run against OSPF( v2 and v3 ) -	pbr: Tests that run against PBRD -	pim: Tests that run against pim -	rip: Tests that run against RIP, both v4 and v6 +	babeld: Tests that run against BABELD +	bfdd: Tests that run against BFDD +	bgpd: Tests that run against BGPD +	eigrpd: Tests that run against EIGRPD +	isisd: Tests that run against ISISD +	ldpd: Tests that run against LDPD +	nhrpd: Tests that run against NHRPD +	ospf6d: Tests that run against OSPF6D +	ospfd: Tests that run against OSPFD +	pathd: Tests that run against PATHD +	pbrd: Tests that run against PBRD +	pimd: Tests that run against PIMD +	ripd: Tests that run against RIPD +	ripngd: Tests that run against RIPNGD +	sharpd: Tests that run against SHARPD +	staticd: Tests that run against STATICD +	vrrpd: Tests that run against VRRPD  [topogen]  # Default configuration values diff --git a/tests/topotests/simple-snmp-test/test_simple_snmp.py b/tests/topotests/simple-snmp-test/test_simple_snmp.py index 2b609ef14c..1e56252ea3 100755 --- a/tests/topotests/simple-snmp-test/test_simple_snmp.py +++ b/tests/topotests/simple-snmp-test/test_simple_snmp.py @@ -76,6 +76,11 @@ class TemplateTopo(Topo):  def setup_module(mod):      "Sets up the pytest environment" + +    # skip tests is SNMP not installed +    if not os.path.isfile("/usr/sbin/snmpd"): +        error_msg = "SNMP not installed - skipping" +        pytest.skip(error_msg)      # This function initiates the topology build with Topogen...      tgen = Topogen(TemplateTopo, mod.__name__)      # ... and here it calls Mininet initialization functions. @@ -120,11 +125,13 @@ def test_r1_bgp_version():      "Wait for protocol convergence"      tgen = get_topogen() -    #tgen.mininet_cli() +    # tgen.mininet_cli()      r1 = tgen.net.get("r1")      r1_snmp = SnmpTester(r1, "1.1.1.1", "public", "2c")      assert r1_snmp.test_oid("bgpVersin", None)      assert r1_snmp.test_oid("bgpVersion", "10") +    assert r1_snmp.test_oid_walk("bgpVersion", ["10"]) +    assert r1_snmp.test_oid_walk("bgpVersion", ["10"], ["0"])  if __name__ == "__main__":  | 
