diff options
Diffstat (limited to 'tests')
284 files changed, 13874 insertions, 1729 deletions
diff --git a/tests/.gitignore b/tests/.gitignore index fb2edc939a..498d7dd0b7 100644 --- a/tests/.gitignore +++ b/tests/.gitignore @@ -25,6 +25,8 @@ /lib/test_atomlist /lib/test_buffer /lib/test_checksum +/lib/test_frrscript +/lib/test_frrlua /lib/test_graph /lib/test_heavy /lib/test_heavy_thread @@ -34,6 +36,7 @@ /lib/test_nexthop /lib/test_nexthop_iter /lib/test_ntop +/lib/test_plist /lib/test_prefix2str /lib/test_printfrr /lib/test_privs diff --git a/tests/bgpd/test_aspath.c b/tests/bgpd/test_aspath.c index aaf3fd2aa4..c2d39752ab 100644 --- a/tests/bgpd/test_aspath.c +++ b/tests/bgpd/test_aspath.c @@ -469,7 +469,10 @@ static struct aspath_tests { 0, 0, { - COMMON_ATTRS, BGP_ATTR_FLAG_TRANS, BGP_ATTR_AS_PATH, 10, + COMMON_ATTRS, + BGP_ATTR_FLAG_TRANS, + BGP_ATTR_AS_PATH, + 10, }, COMMON_ATTR_SIZE + 3, }, @@ -482,7 +485,10 @@ static struct aspath_tests { -1, 0, { - COMMON_ATTRS, BGP_ATTR_FLAG_TRANS, BGP_ATTR_AS_PATH, 8, + COMMON_ATTRS, + BGP_ATTR_FLAG_TRANS, + BGP_ATTR_AS_PATH, + 8, }, COMMON_ATTR_SIZE + 3, }, @@ -495,7 +501,10 @@ static struct aspath_tests { -1, 0, { - COMMON_ATTRS, BGP_ATTR_FLAG_TRANS, BGP_ATTR_AS_PATH, 12, + COMMON_ATTRS, + BGP_ATTR_FLAG_TRANS, + BGP_ATTR_AS_PATH, + 12, }, COMMON_ATTR_SIZE + 3, }, @@ -510,7 +519,8 @@ static struct aspath_tests { { COMMON_ATTRS, BGP_ATTR_FLAG_TRANS | BGP_ATTR_FLAG_OPTIONAL, - BGP_ATTR_AS_PATH, 10, + BGP_ATTR_AS_PATH, + 10, }, COMMON_ATTR_SIZE + 3, }, @@ -525,7 +535,8 @@ static struct aspath_tests { { COMMON_ATTRS, BGP_ATTR_FLAG_TRANS | BGP_ATTR_FLAG_OPTIONAL, - BGP_ATTR_AS4_PATH, 10, + BGP_ATTR_AS4_PATH, + 10, }, COMMON_ATTR_SIZE + 3, }, @@ -540,7 +551,8 @@ static struct aspath_tests { { COMMON_ATTRS, BGP_ATTR_FLAG_TRANS | BGP_ATTR_FLAG_OPTIONAL, - BGP_ATTR_AS4_PATH, 10, + BGP_ATTR_AS4_PATH, + 10, }, COMMON_ATTR_SIZE + 3, }, @@ -553,7 +565,10 @@ static struct aspath_tests { 0, PEER_CAP_AS4_RCV | PEER_CAP_AS4_ADV, { - COMMON_ATTRS, BGP_ATTR_FLAG_TRANS, BGP_ATTR_AS_PATH, 18, + COMMON_ATTRS, + BGP_ATTR_FLAG_TRANS, + BGP_ATTR_AS_PATH, + 18, }, COMMON_ATTR_SIZE + 3, }, @@ -566,7 +581,10 @@ static struct aspath_tests { -1, PEER_CAP_AS4_RCV | PEER_CAP_AS4_ADV, { - COMMON_ATTRS, BGP_ATTR_FLAG_TRANS, BGP_ATTR_AS_PATH, 16, + COMMON_ATTRS, + BGP_ATTR_FLAG_TRANS, + BGP_ATTR_AS_PATH, + 16, }, COMMON_ATTR_SIZE + 3, }, @@ -579,7 +597,10 @@ static struct aspath_tests { -1, PEER_CAP_AS4_RCV | PEER_CAP_AS4_ADV, { - COMMON_ATTRS, BGP_ATTR_FLAG_TRANS, BGP_ATTR_AS_PATH, 20, + COMMON_ATTRS, + BGP_ATTR_FLAG_TRANS, + BGP_ATTR_AS_PATH, + 20, }, COMMON_ATTR_SIZE + 3, }, @@ -592,7 +613,10 @@ static struct aspath_tests { -1, PEER_CAP_AS4_RCV | PEER_CAP_AS4_ADV, { - COMMON_ATTRS, BGP_ATTR_FLAG_TRANS, BGP_ATTR_AS_PATH, 22, + COMMON_ATTRS, + BGP_ATTR_FLAG_TRANS, + BGP_ATTR_AS_PATH, + 22, }, COMMON_ATTR_SIZE + 3, }, @@ -607,7 +631,8 @@ static struct aspath_tests { { COMMON_ATTRS, BGP_ATTR_FLAG_TRANS | BGP_ATTR_FLAG_OPTIONAL, - BGP_ATTR_AS_PATH, 18, + BGP_ATTR_AS_PATH, + 18, }, COMMON_ATTR_SIZE + 3, }, @@ -622,7 +647,8 @@ static struct aspath_tests { { COMMON_ATTRS, BGP_ATTR_FLAG_TRANS | BGP_ATTR_FLAG_OPTIONAL, - BGP_ATTR_AS4_PATH, 14, + BGP_ATTR_AS4_PATH, + 14, }, COMMON_ATTR_SIZE + 3, }, @@ -637,7 +663,8 @@ static struct aspath_tests { { COMMON_ATTRS, BGP_ATTR_FLAG_TRANS | BGP_ATTR_FLAG_OPTIONAL, - BGP_ATTR_AS4_PATH, 14, + BGP_ATTR_AS4_PATH, + 14, }, COMMON_ATTR_SIZE + 3, &test_segments[0], @@ -648,12 +675,13 @@ static struct aspath_tests { &test_segments[28], "8466 3 52737 0 4096", AS4_DATA, - -1, + -2, PEER_CAP_AS4_RCV | PEER_CAP_AS4_ADV, { COMMON_ATTRS, BGP_ATTR_FLAG_TRANS | BGP_ATTR_FLAG_OPTIONAL, - BGP_ATTR_AS4_PATH, 22, + BGP_ATTR_AS4_PATH, + 22, }, COMMON_ATTR_SIZE + 3, }, diff --git a/tests/bgpd/test_peer_attr.c b/tests/bgpd/test_peer_attr.c index 45e9912a31..b168be21c3 100644 --- a/tests/bgpd/test_peer_attr.c +++ b/tests/bgpd/test_peer_attr.c @@ -30,9 +30,6 @@ #include "bgpd/bgp_vty.h" #include "bgpd/bgp_zebra.h" #include "bgpd/bgp_network.h" -#include "lib/routing_nb.h" -#include "lib/northbound_cli.h" -#include "bgpd/bgp_nb.h" #ifdef ENABLE_BGP_VNC #include "bgpd/rfapi/rfapi_backend.h" @@ -777,10 +774,6 @@ static void test_execute(struct test *test, const char *fmt, ...) /* Execute command (non-strict). */ ret = cmd_execute_command(vline, test->vty, NULL, 0); - if (ret == CMD_SUCCESS) { - /* Commit any pending changes, irnore error */ - ret = nb_cli_pending_commit_check(test->vty); - } if (ret != CMD_SUCCESS) { test->state = TEST_COMMAND_ERROR; test->error = str_printf( @@ -939,7 +932,7 @@ static struct test *test_new(const char *desc, bool use_ibgp, test->vty = vty_new(); test->vty->type = VTY_TERM; - vty_config_enter(test->vty, true, false); + test->vty->node = CONFIG_NODE; test_initialize(test); @@ -1385,15 +1378,6 @@ static void test_peer_attr(struct test *test, struct test_peer_attr *pa) test_process(test, pa, p, g->conf, true, false); } -static const struct frr_yang_module_info *const bgpd_yang_modules[] = { - &frr_bgp_info, - &frr_filter_info, - &frr_interface_info, - &frr_route_map_info, - &frr_routing_info, - &frr_vrf_info, -}; - static void bgp_startup(void) { cmd_init(1); @@ -1402,7 +1386,7 @@ static void bgp_startup(void) zprivs_init(&bgpd_privs); master = thread_master_create(NULL); - nb_init(master, bgpd_yang_modules, array_size(bgpd_yang_modules), false); + nb_init(master, NULL, 0, false); bgp_master_init(master, BGP_SOCKET_SNDBUF_SIZE, list_new()); bgp_option_set(BGP_OPT_NO_LISTEN); vrf_init(NULL, NULL, NULL, NULL, NULL); diff --git a/tests/lib/cli/common_cli.c b/tests/lib/cli/common_cli.c index 49bc0f4fb2..8be81cc4cb 100644 --- a/tests/lib/cli/common_cli.c +++ b/tests/lib/cli/common_cli.c @@ -59,10 +59,13 @@ static void vty_do_exit(int isexit) exit(0); } +const struct frr_yang_module_info *const *test_yang_modules = NULL; + /* main routine. */ int main(int argc, char **argv) { struct thread thread; + size_t yangcount; /* Set umask before anything for security */ umask(0027); @@ -79,7 +82,11 @@ int main(int argc, char **argv) vty_init(master, false); lib_cmd_init(); - nb_init(master, NULL, 0, false); + + for (yangcount = 0; test_yang_modules && test_yang_modules[yangcount]; + yangcount++) + ; + nb_init(master, test_yang_modules, yangcount, false); test_init(argc, argv); diff --git a/tests/lib/cli/common_cli.h b/tests/lib/cli/common_cli.h index 15abe3b855..3042ff5b12 100644 --- a/tests/lib/cli/common_cli.h +++ b/tests/lib/cli/common_cli.h @@ -25,6 +25,9 @@ #include "zebra.h" #include "vty.h" #include "command.h" +#include "northbound.h" + +extern const struct frr_yang_module_info *const *test_yang_modules; /* function to be implemented by test */ extern void test_init(int argc, char **argv); diff --git a/tests/lib/script1.lua b/tests/lib/script1.lua new file mode 100644 index 0000000000..6361c960a7 --- /dev/null +++ b/tests/lib/script1.lua @@ -0,0 +1,54 @@ + +-- Positive testing + +function foo(a, b) + a = a + 1 + b = b + 1 + return { + a = a, + b = b, + } +end + +function bar(a, b) + a = a + 1 + b = b + 1 + c = 303 + return { + b = b, + c = c, + } +end + +function fact(n) + -- outer function must return a table + -- inner functions can be used to recurse or as helpers + function helper(m) + if m == 0 then + return 1 + else + return m * helper(m - 1) + end + end + return { + ans = helper(n) + } +end + +-- Negative testing + +function bad_return1() +end + +function bad_return2() + return 123 +end + +function bad_return3() + return {} +end + +function bad_return4() + error("Something bad!") +end + diff --git a/tests/lib/test_frrlua.c b/tests/lib/test_frrlua.c new file mode 100644 index 0000000000..a81446f9ca --- /dev/null +++ b/tests/lib/test_frrlua.c @@ -0,0 +1,111 @@ +/* + * frrlua unit tests + * Copyright (C) 2021 Donald Lee + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <zebra.h> +#include "string.h" +#include "stdio.h" +#include "lib/frrlua.h" + +static void test_encode_decode(void) +{ + lua_State *L = luaL_newstate(); + + long long a = 123; + long long b = a; + + lua_pushintegerp(L, &a); + lua_decode_integerp(L, -1, &a); + assert(a == b); + assert(lua_gettop(L) == 0); + + time_t time_a = 100; + time_t time_b = time_a; + + lua_pushtimet(L, &time_a); + lua_decode_timet(L, -1, &time_a); + assert(time_a == time_b); + assert(lua_gettop(L) == 0); + + char str_b[] = "Hello", str_a[6]; + + strlcpy(str_a, str_b, sizeof(str_b)); + lua_pushstring_wrapper(L, str_a); + lua_decode_stringp(L, -1, str_a); + assert(strncmp(str_a, str_b, sizeof(str_b)) == 0); + assert(lua_gettop(L) == 0); + + char p_b_str[] = "10.0.0.0/24", p_a_str[12]; + struct prefix p_a; + + strlcpy(p_a_str, p_b_str, sizeof(p_b_str)); + str2prefix(p_a_str, &p_a); + lua_pushprefix(L, &p_a); + lua_decode_prefix(L, -1, &p_a); + prefix2str(&p_a, p_a_str, sizeof(p_b_str)); + assert(strncmp(p_a_str, p_b_str, sizeof(p_b_str)) == 0); + assert(lua_gettop(L) == 0); + + struct interface ifp_a; + struct interface ifp_b = ifp_a; + + lua_pushinterface(L, &ifp_a); + lua_decode_interface(L, -1, &ifp_a); + assert(strncmp(ifp_a.name, ifp_b.name, sizeof(ifp_b.name)) == 0); + assert(ifp_a.ifindex == ifp_b.ifindex); + assert(ifp_a.status == ifp_b.status); + assert(ifp_a.flags == ifp_b.flags); + assert(ifp_a.metric == ifp_b.metric); + assert(ifp_a.speed == ifp_b.speed); + assert(ifp_a.mtu == ifp_b.mtu); + assert(ifp_a.mtu6 == ifp_b.mtu6); + assert(ifp_a.bandwidth == ifp_b.bandwidth); + assert(ifp_a.link_ifindex == ifp_b.link_ifindex); + assert(ifp_a.ll_type == ifp_b.ll_type); + assert(lua_gettop(L) == 0); + + struct in_addr addr_a; + struct in_addr addr_b = addr_a; + + lua_pushinaddr(L, &addr_a); + lua_decode_inaddr(L, -1, &addr_a); + assert(addr_a.s_addr == addr_b.s_addr); + assert(lua_gettop(L) == 0); + + struct in6_addr in6addr_a; + struct in6_addr in6addr_b = in6addr_a; + + lua_pushin6addr(L, &in6addr_a); + lua_decode_in6addr(L, -1, &in6addr_a); + assert(in6addr_cmp(&in6addr_a, &in6addr_b) == 0); + assert(lua_gettop(L) == 0); + + union sockunion su_a, su_b; + + memset(&su_a, 0, sizeof(union sockunion)); + memset(&su_b, 0, sizeof(union sockunion)); + lua_pushsockunion(L, &su_a); + lua_decode_sockunion(L, -1, &su_a); + assert(sockunion_cmp(&su_a, &su_b) == 0); + assert(lua_gettop(L) == 0); +} + +int main(int argc, char **argv) +{ + test_encode_decode(); +} diff --git a/tests/lib/test_frrlua.py b/tests/lib/test_frrlua.py new file mode 100644 index 0000000000..2f6ddc1c07 --- /dev/null +++ b/tests/lib/test_frrlua.py @@ -0,0 +1,14 @@ +import frrtest +import pytest + +if 'S["SCRIPTING_TRUE"]=""\n' not in open("../config.status").readlines(): + class TestFrrlua: + @pytest.mark.skipif(True, reason="Test unsupported") + def test_exit_cleanly(self): + pass +else: + + class TestFrrlua(frrtest.TestMultiOut): + program = "./test_frrlua" + + TestFrrlua.exit_cleanly() diff --git a/tests/lib/test_frrscript.c b/tests/lib/test_frrscript.c new file mode 100644 index 0000000000..7b23045978 --- /dev/null +++ b/tests/lib/test_frrscript.c @@ -0,0 +1,104 @@ +/* + * frrscript unit tests + * Copyright (C) 2021 Donald Lee + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <zebra.h> + +#include "lib/frrscript.h" +#include "lib/frrlua.h" + +int main(int argc, char **argv) +{ + frrscript_init("./lib"); + struct frrscript *fs = frrscript_new("script1"); + int result; + + /* Positive testing */ + + long long a = 100, b = 200; + + result = frrscript_load(fs, "foo", NULL); + assert(result == 0); + result = frrscript_call(fs, "foo", ("a", &a), ("b", &b)); + assert(result == 0); + assert(a == 101); + assert(b == 201); + + a = 100, b = 200; + + result = frrscript_load(fs, "bar", NULL); + assert(result == 0); + result = frrscript_call(fs, "bar", ("a", &a), ("b", &b)); + assert(result == 0); + long long *cptr = frrscript_get_result(fs, "bar", "c", lua_tointegerp); + + /* a should not occur in the returned table in script */ + assert(a == 100); + assert(b == 201); + assert(*cptr == 303); + XFREE(MTYPE_SCRIPT_RES, cptr); + + long long n = 5; + + result = frrscript_load(fs, "fact", NULL); + assert(result == 0); + result = frrscript_call(fs, "fact", ("n", &n)); + assert(result == 0); + long long *ansptr = + frrscript_get_result(fs, "fact", "ans", lua_tointegerp); + assert(*ansptr == 120); + XFREE(MTYPE_SCRIPT_RES, ansptr); + + /* Negative testing */ + + /* Function does not exist in script file*/ + result = frrscript_load(fs, "does_not_exist", NULL); + assert(result == 1); + + /* Function was not (successfully) loaded */ + result = frrscript_call(fs, "does_not_exist", ("a", &a), ("b", &b)); + assert(result == 1); + + /* Get result from a function that was not loaded */ + long long *llptr = + frrscript_get_result(fs, "does_not_exist", "c", lua_tointegerp); + assert(llptr == NULL); + + /* Function returns void */ + result = frrscript_call(fs, "bad_return1"); + assert(result == 1); + + /* Function returns number */ + result = frrscript_call(fs, "bad_return2"); + assert(result == 1); + + /* Get non-existent result from a function */ + result = frrscript_call(fs, "bad_return3"); + assert(result == 1); + long long *cllptr = + frrscript_get_result(fs, "bad_return3", "c", lua_tointegerp); + assert(cllptr == NULL); + + /* Function throws exception */ + result = frrscript_call(fs, "bad_return4"); + assert(result == 1); + + frrscript_delete(fs); + + return 0; +} diff --git a/tests/lib/test_frrscript.py b/tests/lib/test_frrscript.py new file mode 100644 index 0000000000..046d97b014 --- /dev/null +++ b/tests/lib/test_frrscript.py @@ -0,0 +1,14 @@ +import frrtest +import pytest + +if 'S["SCRIPTING_TRUE"]=""\n' not in open("../config.status").readlines(): + class TestFrrscript: + @pytest.mark.skipif(True, reason="Test unsupported") + def test_exit_cleanly(self): + pass +else: + + class TestFrrscript(frrtest.TestMultiOut): + program = "./test_frrscript" + + TestFrrscript.exit_cleanly() diff --git a/tests/lib/test_plist.c b/tests/lib/test_plist.c new file mode 100644 index 0000000000..ee7a9ebf30 --- /dev/null +++ b/tests/lib/test_plist.c @@ -0,0 +1,48 @@ +/* + * Simple prefix list querying tool + * + * Copyright (C) 2021 by David Lamparter, + * for Open Source Routing / NetDEF, Inc. + * + * Quagga is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * Quagga is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <zebra.h> + +#include "lib/plist.h" +#include "lib/filter.h" +#include "tests/lib/cli/common_cli.h" + +static const struct frr_yang_module_info *const my_yang_modules[] = { + &frr_filter_info, + NULL, +}; + +__attribute__((_CONSTRUCTOR(2000))) +static void test_yang_modules_set(void) +{ + test_yang_modules = my_yang_modules; +} + +void test_init(int argc, char **argv) +{ + prefix_list_init(); + filter_cli_init(); + + /* nothing else to do here, giving stand-alone access to the prefix + * list code's "debug prefix-list ..." command is the only purpose of + * this "test". + */ +} diff --git a/tests/subdir.am b/tests/subdir.am index ca477851e3..45236287cf 100644 --- a/tests/subdir.am +++ b/tests/subdir.am @@ -59,6 +59,15 @@ TESTS_ZEBRA = IGNORE_ZEBRA = --ignore=zebra/ endif +if SCRIPTING +TESTS_SCRIPTING = \ + tests/lib/test_frrlua \ + tests/lib/test_frrscript \ + #end +else +TESTS_SCRIPTING = +endif + clippy_scan += \ tests/lib/cli/test_cli.c \ tests/ospf6d/test_lsdb.c \ @@ -78,6 +87,7 @@ check_PROGRAMS = \ tests/lib/test_nexthop_iter \ tests/lib/test_nexthop \ tests/lib/test_ntop \ + tests/lib/test_plist \ tests/lib/test_prefix2str \ tests/lib/test_printfrr \ tests/lib/test_privs \ @@ -104,6 +114,7 @@ check_PROGRAMS = \ $(TESTS_OSPFD) \ $(TESTS_OSPF6D) \ $(TESTS_ZEBRA) \ + $(TESTS_SCRIPTING) \ # end if GRPC @@ -211,19 +222,6 @@ tests_bgpd_test_peer_attr_CFLAGS = $(TESTS_CFLAGS) tests_bgpd_test_peer_attr_CPPFLAGS = $(TESTS_CPPFLAGS) tests_bgpd_test_peer_attr_LDADD = $(BGP_TEST_LDADD) tests_bgpd_test_peer_attr_SOURCES = tests/bgpd/test_peer_attr.c -nodist_tests_bgpd_test_peer_attr_SOURCES = \ - yang/frr-bgp-types.yang.c \ - yang/frr-bgp.yang.c \ - yang/frr-bgp-common-structure.yang.c \ - yang/frr-bgp-common.yang.c \ - yang/frr-bgp-common-multiprotocol.yang.c \ - yang/frr-bgp-neighbor.yang.c \ - yang/frr-bgp-peer-group.yang.c \ - yang/frr-bgp-bmp.yang.c \ - yang/frr-bgp-rpki.yang.c \ - yang/frr-deviations-bgp-datacenter.yang.c \ - # end - tests_isisd_test_fuzz_isis_tlv_CFLAGS = $(TESTS_CFLAGS) -I$(top_builddir)/tests/isisd tests_isisd_test_fuzz_isis_tlv_CPPFLAGS = $(TESTS_CPPFLAGS) -I$(top_builddir)/tests/isisd @@ -289,6 +287,16 @@ tests_lib_test_checksum_CFLAGS = $(TESTS_CFLAGS) tests_lib_test_checksum_CPPFLAGS = $(TESTS_CPPFLAGS) tests_lib_test_checksum_LDADD = $(ALL_TESTS_LDADD) tests_lib_test_checksum_SOURCES = tests/lib/test_checksum.c +if SCRIPTING +tests_lib_test_frrlua_CFLAGS = $(TESTS_CFLAGS) +tests_lib_test_frrlua_CPPFLAGS = $(TESTS_CPPFLAGS) +tests_lib_test_frrlua_LDADD = $(ALL_TESTS_LDADD) +tests_lib_test_frrlua_SOURCES = tests/lib/test_frrlua.c +tests_lib_test_frrscript_CFLAGS = $(TESTS_CFLAGS) +tests_lib_test_frrscript_CPPFLAGS = $(TESTS_CPPFLAGS) +tests_lib_test_frrscript_LDADD = $(ALL_TESTS_LDADD) +tests_lib_test_frrscript_SOURCES = tests/lib/test_frrscript.c +endif tests_lib_test_graph_CFLAGS = $(TESTS_CFLAGS) tests_lib_test_graph_CPPFLAGS = $(TESTS_CPPFLAGS) tests_lib_test_graph_LDADD = $(ALL_TESTS_LDADD) @@ -324,6 +332,10 @@ tests_lib_test_ntop_CFLAGS = $(TESTS_CFLAGS) tests_lib_test_ntop_CPPFLAGS = $(CPPFLAGS_BASE) # no assert override tests_lib_test_ntop_LDADD = # none tests_lib_test_ntop_SOURCES = tests/lib/test_ntop.c tests/helpers/c/prng.c +tests_lib_test_plist_CFLAGS = $(TESTS_CFLAGS) +tests_lib_test_plist_CPPFLAGS = $(TESTS_CPPFLAGS) +tests_lib_test_plist_LDADD = $(ALL_TESTS_LDADD) +tests_lib_test_plist_SOURCES = tests/lib/test_plist.c tests/lib/cli/common_cli.c tests_lib_test_prefix2str_CFLAGS = $(TESTS_CFLAGS) tests_lib_test_prefix2str_CPPFLAGS = $(TESTS_CPPFLAGS) tests_lib_test_prefix2str_LDADD = $(ALL_TESTS_LDADD) @@ -464,6 +476,14 @@ EXTRA_DIST += \ tests/zebra/test_lm_plugin.refout \ # end + +if SCRIPTING +EXTRA_DIST += \ + tests/lib/test_frrscript.py \ + tests/lib/test_frrlua.py \ + #end +endif + .PHONY: tests/tests.xml tests/tests.xml: $(check_PROGRAMS) ( cd tests; $(PYTHON) ../$(srcdir)/tests/runtests.py --junitxml=tests.xml -v ../$(srcdir)/tests $(IGNORE_BGPD) $(IGNORE_ISISD) $(IGNORE_OSPF6D); ) diff --git a/tests/topotests/bfd_topo2/r2/ospf6d.conf b/tests/topotests/bfd_topo2/r2/ospf6d.conf index 48a729ce19..524e2c9430 100644 --- a/tests/topotests/bfd_topo2/r2/ospf6d.conf +++ b/tests/topotests/bfd_topo2/r2/ospf6d.conf @@ -1,4 +1,5 @@ interface r2-eth2 + ipv6 ospf6 area 0.0.0.1 ipv6 ospf6 bfd ipv6 ospf6 hello-interval 2 ipv6 ospf6 dead-interval 10 @@ -7,5 +8,4 @@ router ospf6 ospf6 router-id 10.254.254.2 redistribute connected redistribute bgp - interface r2-eth2 area 0.0.0.1 ! diff --git a/tests/topotests/bfd_topo2/r4/ospf6d.conf b/tests/topotests/bfd_topo2/r4/ospf6d.conf index 57f7f6c079..2f38c51c35 100644 --- a/tests/topotests/bfd_topo2/r4/ospf6d.conf +++ b/tests/topotests/bfd_topo2/r4/ospf6d.conf @@ -1,4 +1,5 @@ interface r4-eth0 + ipv6 ospf6 area 0.0.0.1 ipv6 ospf6 bfd ipv6 ospf6 hello-interval 2 ipv6 ospf6 dead-interval 10 @@ -6,5 +7,4 @@ interface r4-eth0 router ospf6 ospf6 router-id 10.254.254.4 redistribute connected - interface r4-eth0 area 0.0.0.1 ! diff --git a/tests/topotests/bfd_topo2/test_bfd_topo2.py b/tests/topotests/bfd_topo2/test_bfd_topo2.py index e85b2644dd..2cc12bc7b0 100644 --- a/tests/topotests/bfd_topo2/test_bfd_topo2.py +++ b/tests/topotests/bfd_topo2/test_bfd_topo2.py @@ -80,33 +80,28 @@ def setup_module(mod): router_list = tgen.routers() for rname, router in router_list.items(): - router.load_config( - TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) - ) - router.load_config( - TopoRouter.RD_BFD, os.path.join(CWD, "{}/bfdd.conf".format(rname)) - ) - router.load_config( - TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) - ) - router.load_config( - TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname)) - ) - router.load_config( - TopoRouter.RD_OSPF6, os.path.join(CWD, "{}/ospf6d.conf".format(rname)) - ) + daemon_file = "{}/{}/zebra.conf".format(CWD, rname) + router.load_config(TopoRouter.RD_ZEBRA, daemon_file) + + daemon_file = "{}/{}/bfdd.conf".format(CWD, rname) + if os.path.isfile(daemon_file): + router.load_config(TopoRouter.RD_BFD, daemon_file) + + daemon_file = "{}/{}/bgpd.conf".format(CWD, rname) + if os.path.isfile(daemon_file): + router.load_config(TopoRouter.RD_BGP, daemon_file) + + daemon_file = "{}/{}/ospfd.conf".format(CWD, rname) + if os.path.isfile(daemon_file): + router.load_config(TopoRouter.RD_OSPF, daemon_file) + + daemon_file = "{}/{}/ospf6d.conf".format(CWD, rname) + if os.path.isfile(daemon_file): + router.load_config(TopoRouter.RD_OSPF6, daemon_file) # Initialize all routers. tgen.start_router() - # Verify that we are using the proper version and that the BFD - # daemon exists. - for router in router_list.values(): - # Check for Version - if router.has_version("<", "5.1"): - tgen.set_error("Unsupported FRR version") - break - def teardown_module(_mod): "Teardown the pytest environment" @@ -135,7 +130,7 @@ def test_protocols_convergence(): test_func = partial( topotest.router_json_cmp, router, "show ip route json", expected ) - _, result = topotest.run_and_expect(test_func, None, count=160, wait=0.5) + _, result = topotest.run_and_expect(test_func, None, count=40, wait=2) assertmsg = '"{}" JSON output mismatches'.format(router.name) assert result is None, assertmsg @@ -151,7 +146,7 @@ def test_protocols_convergence(): test_func = partial( topotest.router_json_cmp, router, "show ipv6 route json", expected ) - _, result = topotest.run_and_expect(test_func, None, count=160, wait=0.5) + _, result = topotest.run_and_expect(test_func, None, count=40, wait=2) assertmsg = '"{}" JSON output mismatches'.format(router.name) assert result is None, assertmsg diff --git a/tests/topotests/bgp-evpn-overlay-index-gateway/test_bgp_evpn_overlay_index_gateway.py b/tests/topotests/bgp-evpn-overlay-index-gateway/test_bgp_evpn_overlay_index_gateway.py index fbce2809e0..a411f13d2e 100755 --- a/tests/topotests/bgp-evpn-overlay-index-gateway/test_bgp_evpn_overlay_index_gateway.py +++ b/tests/topotests/bgp-evpn-overlay-index-gateway/test_bgp_evpn_overlay_index_gateway.py @@ -77,6 +77,9 @@ from lib.common_config import ( # Required to instantiate the topology builder class. from mininet.topo import Topo +pytestmark = [pytest.mark.bgpd] + + #Global variables PES = ['PE1', 'PE2'] HOSTS = ['host1', 'host2'] @@ -194,7 +197,6 @@ def setup_module(mod): tgen.start_router() logger.info("Running setup_module() done") - topotest.sleep(200) def teardown_module(mod): diff --git a/tests/topotests/bgp_aggregate_address_topo1/test_bgp_aggregate_address_topo1.py b/tests/topotests/bgp_aggregate_address_topo1/test_bgp_aggregate_address_topo1.py index 089b1acb1c..9f26978259 100644 --- a/tests/topotests/bgp_aggregate_address_topo1/test_bgp_aggregate_address_topo1.py +++ b/tests/topotests/bgp_aggregate_address_topo1/test_bgp_aggregate_address_topo1.py @@ -42,6 +42,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo +pytestmark = [pytest.mark.bgpd] + class BgpAggregateAddressTopo1(Topo): def build(self, *_args, **_opts): diff --git a/tests/topotests/bgp_aspath_zero/__init__.py b/tests/topotests/bgp_aspath_zero/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/topotests/bgp_aspath_zero/__init__.py diff --git a/tests/topotests/bgp_features/exabgp.env b/tests/topotests/bgp_aspath_zero/exabgp.env index 6c554f5fa8..28e642360a 100644 --- a/tests/topotests/bgp_features/exabgp.env +++ b/tests/topotests/bgp_aspath_zero/exabgp.env @@ -1,4 +1,3 @@ - [exabgp.api] encoder = text highres = false @@ -16,6 +15,7 @@ nexthops = true daemonize = true pid = '/var/run/exabgp/exabgp.pid' user = 'exabgp' +##daemonize = false [exabgp.log] all = false diff --git a/tests/topotests/bgp_aspath_zero/peer1/exabgp.cfg b/tests/topotests/bgp_aspath_zero/peer1/exabgp.cfg new file mode 100644 index 0000000000..fe9ea01eca --- /dev/null +++ b/tests/topotests/bgp_aspath_zero/peer1/exabgp.cfg @@ -0,0 +1,17 @@ +neighbor 10.0.0.1 { + router-id 10.0.0.2; + local-address 10.0.0.2; + local-as 65001; + peer-as 65534; + + static { + route 192.168.100.101/32 { + next-hop 10.0.0.2; + } + + route 192.168.100.102/32 { + as-path [65000 0 65001]; + next-hop 10.0.0.2; + } + } +} diff --git a/tests/topotests/bgp_aspath_zero/r1/bgpd.conf b/tests/topotests/bgp_aspath_zero/r1/bgpd.conf new file mode 100644 index 0000000000..002a5c78c0 --- /dev/null +++ b/tests/topotests/bgp_aspath_zero/r1/bgpd.conf @@ -0,0 +1,6 @@ +! +router bgp 65534 + no bgp ebgp-requires-policy + neighbor 10.0.0.2 remote-as external + neighbor 10.0.0.2 timers 3 10 +! diff --git a/tests/topotests/bgp_aspath_zero/r1/zebra.conf b/tests/topotests/bgp_aspath_zero/r1/zebra.conf new file mode 100644 index 0000000000..22a26ac610 --- /dev/null +++ b/tests/topotests/bgp_aspath_zero/r1/zebra.conf @@ -0,0 +1,6 @@ +! +interface r1-eth0 + ip address 10.0.0.1/24 +! +ip forwarding +! diff --git a/tests/topotests/bgp_aspath_zero/test_bgp_aspath_zero.py b/tests/topotests/bgp_aspath_zero/test_bgp_aspath_zero.py new file mode 100644 index 0000000000..903ab12a13 --- /dev/null +++ b/tests/topotests/bgp_aspath_zero/test_bgp_aspath_zero.py @@ -0,0 +1,121 @@ +#!/usr/bin/env python + +# +# Copyright (c) 2021 by +# Donatas Abraitis <donatas.abraitis@gmail.com> +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +Test if BGP UPDATE with AS-PATH attribute with value zero (0) +is threated as withdrawal. +""" + +import os +import sys +import json +import time +import pytest +import functools + +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topolog import logger +from mininet.topo import Topo + +pytestmark = [pytest.mark.bgpd] + + +class BgpAggregatorAsnZero(Topo): + def build(self, *_args, **_opts): + tgen = get_topogen(self) + + r1 = tgen.add_router("r1") + peer1 = tgen.add_exabgp_peer( + "peer1", ip="10.0.0.2", defaultRoute="via 10.0.0.1" + ) + + switch = tgen.add_switch("s1") + switch.add_link(r1) + switch.add_link(peer1) + + +def setup_module(mod): + tgen = Topogen(BgpAggregatorAsnZero, mod.__name__) + tgen.start_topology() + + router = tgen.gears["r1"] + router.load_config(TopoRouter.RD_ZEBRA, os.path.join(CWD, "r1/zebra.conf")) + router.load_config(TopoRouter.RD_BGP, os.path.join(CWD, "r1/bgpd.conf")) + router.start() + + peer = tgen.gears["peer1"] + peer.start(os.path.join(CWD, "peer1"), os.path.join(CWD, "exabgp.env")) + + +def teardown_module(mod): + tgen = get_topogen() + tgen.stop_topology() + + +def test_bgp_aggregator_zero(): + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + def _bgp_converge(): + output = json.loads( + tgen.gears["r1"].vtysh_cmd("show ip bgp neighbor 10.0.0.2 json") + ) + expected = { + "10.0.0.2": { + "bgpState": "Established", + "addressFamilyInfo": {"ipv4Unicast": {"acceptedPrefixCounter": 1}}, + } + } + return topotest.json_cmp(output, expected) + + test_func = functools.partial(_bgp_converge) + success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "More than one prefix seen at r1, SHOULD be only one." + + def _bgp_has_correct_routes_without_asn_0(): + output = json.loads(tgen.gears["r1"].vtysh_cmd("show ip bgp json")) + expected = {"routes": {"192.168.100.101/32": [{"valid": True}]}} + return topotest.json_cmp(output, expected) + + test_func = functools.partial(_bgp_has_correct_routes_without_asn_0) + success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed listing 192.168.100.101/32, SHOULD be accepted." + + +def test_memory_leak(): + "Run the memory leak test and report results." + tgen = get_topogen() + if not tgen.is_memleak_enabled(): + pytest.skip("Memory leak test/report is disabled") + + tgen.report_memory_leaks() + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_basic_functionality_topo1/test_bgp_basic_functionality.py b/tests/topotests/bgp_basic_functionality_topo1/test_bgp_basic_functionality.py index 374cce21f6..4753c49397 100644 --- a/tests/topotests/bgp_basic_functionality_topo1/test_bgp_basic_functionality.py +++ b/tests/topotests/bgp_basic_functionality_topo1/test_bgp_basic_functionality.py @@ -774,9 +774,9 @@ def test_BGP_attributes_with_vrf_default_keyword_p0(request): } result = verify_bgp_rib(tgen, addr_type, dut, input_dict) - assert result is True, "Testcase : Failed \n Error: {}".format(tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) result = verify_rib(tgen, addr_type, dut, input_dict) - assert result is True, "Testcase : Failed \n Error: {}".format(tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) for addr_type in ADDR_TYPES: dut = "r4" @@ -793,9 +793,9 @@ def test_BGP_attributes_with_vrf_default_keyword_p0(request): } result = verify_bgp_rib(tgen, addr_type, dut, input_dict) - assert result is True, "Testcase : Failed \n Error: {}".format(tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) result = verify_rib(tgen, addr_type, dut, input_dict) - assert result is True, "Testcase : Failed \n Error: {}".format(tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) input_dict_4 = {"largeCommunity": "500:500:500", "community": "500:500"} @@ -1134,12 +1134,7 @@ def test_bgp_with_loopback_with_same_subnet_p1(request): dut = "r1" protocol = "bgp" for addr_type in ADDR_TYPES: - result = verify_rib(tgen, addr_type, dut, input_dict_r1, protocol=protocol) - assert result is True, "Testcase {} :Failed \n Error: {}".format( - tc_name, result - ) - - result = verify_fib_routes(tgen, addr_type, dut, input_dict_r1, expected=False) + result = verify_fib_routes(tgen, addr_type, dut, input_dict_r1) assert result is not True, "Testcase {} : Failed \n" "Expected behavior: routes should not present in fib \n" "Error: {}".format(tc_name, result) @@ -1156,14 +1151,7 @@ def test_bgp_with_loopback_with_same_subnet_p1(request): dut = "r3" protocol = "bgp" for addr_type in ADDR_TYPES: - result = verify_rib( - tgen, addr_type, dut, input_dict_r3, protocol=protocol, fib=None - ) - assert result is True, "Testcase {} :Failed \n Error: {}".format( - tc_name, result - ) - - result = verify_fib_routes(tgen, addr_type, dut, input_dict_r1, expected=False) + result = verify_fib_routes(tgen, addr_type, dut, input_dict_r1) assert result is not True, "Testcase {} : Failed \n" "Expected behavior: routes should not present in fib \n" "Error: {}".format(tc_name, result) diff --git a/tests/topotests/bgp_community_alias/r1/bgpd.conf b/tests/topotests/bgp_community_alias/r1/bgpd.conf index 06113bdd2a..a6366204e8 100644 --- a/tests/topotests/bgp_community_alias/r1/bgpd.conf +++ b/tests/topotests/bgp_community_alias/r1/bgpd.conf @@ -6,4 +6,17 @@ bgp community alias 65001:1:1 large-community-r2-1 router bgp 65001 no bgp ebgp-requires-policy neighbor 192.168.1.2 remote-as external + address-family ipv4 unicast + redistribute connected + neighbor 192.168.1.2 route-map r2 in + exit-address-family +! +route-map r2 permit 10 + match alias community-r2-1 + set tag 10 +route-map r2 permit 20 + match alias community-r2-2 + set tag 20 +route-map r2 permit 30 + set tag 100 ! diff --git a/tests/topotests/bgp_community_alias/r2/bgpd.conf b/tests/topotests/bgp_community_alias/r2/bgpd.conf index fc67ff2ad2..9276fe592d 100644 --- a/tests/topotests/bgp_community_alias/r2/bgpd.conf +++ b/tests/topotests/bgp_community_alias/r2/bgpd.conf @@ -8,6 +8,7 @@ router bgp 65002 ! ip prefix-list p1 permit 172.16.16.1/32 ip prefix-list p2 permit 172.16.16.2/32 +ip prefix-list p3 permit 172.16.16.3/32 ! route-map r1 permit 10 match ip address prefix-list p1 @@ -16,4 +17,6 @@ route-map r1 permit 10 route-map r1 permit 20 match ip address prefix-list p2 set community 65002:1 65002:2 +route-map r1 permit 30 + match ip address prefix-list p3 ! diff --git a/tests/topotests/bgp_community_alias/r2/zebra.conf b/tests/topotests/bgp_community_alias/r2/zebra.conf index a806628a8e..b8cb9baf3c 100644 --- a/tests/topotests/bgp_community_alias/r2/zebra.conf +++ b/tests/topotests/bgp_community_alias/r2/zebra.conf @@ -2,6 +2,7 @@ int lo ip address 172.16.16.1/32 ip address 172.16.16.2/32 + ip address 172.16.16.3/32 ! int r2-eth0 ip address 192.168.1.2/24 diff --git a/tests/topotests/bgp_community_alias/test_bgp-community-alias.py b/tests/topotests/bgp_community_alias/test_bgp-community-alias.py index 90eeaaa731..6aadff1cfa 100644 --- a/tests/topotests/bgp_community_alias/test_bgp-community-alias.py +++ b/tests/topotests/bgp_community_alias/test_bgp-community-alias.py @@ -40,6 +40,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo +pytestmark = [pytest.mark.bgpd] + class TemplateTopo(Topo): def build(self, *_args, **_opts): @@ -84,39 +86,57 @@ def test_bgp_community_alias(): router = tgen.gears["r1"] def _bgp_converge(router): - output = json.loads( - router.vtysh_cmd("show bgp ipv4 unicast 172.16.16.1/32 json") - ) + output = json.loads(router.vtysh_cmd("show ip route json")) expected = { - "paths": [ + "172.16.16.1/32": [ + { + "tag": 10, + "communities": "community-r2-1 65001:2", + "largeCommunities": "large-community-r2-1 65001:1:2", + } + ], + "172.16.16.2/32": [ + { + "tag": 20, + "communities": "65002:1 community-r2-2", + "largeCommunities": "", + } + ], + "172.16.16.3/32": [ { - "community": {"string": "community-r2-1 65001:2"}, - "largeCommunity": {"string": "large-community-r2-1 65001:1:2"}, + "tag": 100, + "communities": "", + "largeCommunities": "", } - ] + ], } return topotest.json_cmp(output, expected) test_func = functools.partial(_bgp_converge, router) success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) - assert result is None, 'Cannot see BGP community aliases "{}"'.format(router) + assert result is None, "Cannot see BGP community aliases at r1" def _bgp_show_prefixes_by_alias(router): output = json.loads( - router.vtysh_cmd("show bgp ipv4 unicast alias community-r2-2 json detail") + router.vtysh_cmd( + "show bgp ipv4 unicast alias large-community-r2-1 json detail" + ) ) expected = { "routes": { - "172.16.16.2/32": [{"community": {"string": "65002:1 community-r2-2"}}] + "172.16.16.1/32": [ + { + "community": {"string": "community-r2-1 65001:2"}, + "largeCommunity": {"string": "large-community-r2-1 65001:1:2"}, + } + ] } } return topotest.json_cmp(output, expected) test_func = functools.partial(_bgp_show_prefixes_by_alias, router) success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) - assert result is None, 'Cannot see BGP prefixes by community alias "{}"'.format( - router - ) + assert result is None, "Cannot see BGP prefixes by community alias at r1" if __name__ == "__main__": diff --git a/tests/topotests/bgp_community_change_update/test_bgp_community_change_update.py b/tests/topotests/bgp_community_change_update/test_bgp_community_change_update.py index 95e63c617e..9f449d7979 100644 --- a/tests/topotests/bgp_community_change_update/test_bgp_community_change_update.py +++ b/tests/topotests/bgp_community_change_update/test_bgp_community_change_update.py @@ -59,7 +59,7 @@ from mininet.topo import Topo from lib.common_config import step from time import sleep -pytestmark = [pytest.mark.bgpd, pytest.mark.bgpd] +pytestmark = [pytest.mark.bgpd] class TemplateTopo(Topo): diff --git a/tests/topotests/bgp_conditional_advertisement/r1/bgpd.conf b/tests/topotests/bgp_conditional_advertisement/r1/bgpd.conf index 633d1832fd..293b38c7e8 100644 --- a/tests/topotests/bgp_conditional_advertisement/r1/bgpd.conf +++ b/tests/topotests/bgp_conditional_advertisement/r1/bgpd.conf @@ -17,6 +17,7 @@ route-map DEF permit 10 ! router bgp 1 bgp log-neighbor-changes + bgp conditional-advertisement timer 5 no bgp ebgp-requires-policy neighbor 10.10.10.2 remote-as 2 ! diff --git a/tests/topotests/bgp_conditional_advertisement/r2/bgpd.conf b/tests/topotests/bgp_conditional_advertisement/r2/bgpd.conf index c6147fe658..82525fac64 100644 --- a/tests/topotests/bgp_conditional_advertisement/r2/bgpd.conf +++ b/tests/topotests/bgp_conditional_advertisement/r2/bgpd.conf @@ -32,6 +32,7 @@ route-map RMAP-2 deny 10 ! router bgp 2 bgp log-neighbor-changes + bgp conditional-advertisement timer 5 no bgp ebgp-requires-policy neighbor 10.10.10.1 remote-as 1 neighbor 10.10.20.3 remote-as 3 diff --git a/tests/topotests/bgp_conditional_advertisement/r3/bgpd.conf b/tests/topotests/bgp_conditional_advertisement/r3/bgpd.conf index 2f4f5068d8..f389f309a6 100644 --- a/tests/topotests/bgp_conditional_advertisement/r3/bgpd.conf +++ b/tests/topotests/bgp_conditional_advertisement/r3/bgpd.conf @@ -1,6 +1,7 @@ ! router bgp 3 bgp log-neighbor-changes + bgp conditional-advertisement timer 5 no bgp ebgp-requires-policy neighbor 10.10.20.2 remote-as 2 ! diff --git a/tests/topotests/bgp_conditional_advertisement/test_bgp_conditional_advertisement.py b/tests/topotests/bgp_conditional_advertisement/test_bgp_conditional_advertisement.py index 0e31ab1995..44f54c7b51 100644 --- a/tests/topotests/bgp_conditional_advertisement/test_bgp_conditional_advertisement.py +++ b/tests/topotests/bgp_conditional_advertisement/test_bgp_conditional_advertisement.py @@ -139,6 +139,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo +pytestmark = [pytest.mark.bgpd] + class BgpConditionalAdvertisementTopo(Topo): def build(self, *_args, **_opts): diff --git a/tests/topotests/bgp_default_afi_safi/test_bgp-default-afi-safi.py b/tests/topotests/bgp_default_afi_safi/test_bgp-default-afi-safi.py index 28117b7fe4..6ed7023044 100644 --- a/tests/topotests/bgp_default_afi_safi/test_bgp-default-afi-safi.py +++ b/tests/topotests/bgp_default_afi_safi/test_bgp-default-afi-safi.py @@ -47,6 +47,8 @@ from lib.topolog import logger from mininet.topo import Topo from lib.common_config import step +pytestmark = [pytest.mark.bgpd] + class TemplateTopo(Topo): def build(self, *_args, **_opts): diff --git a/tests/topotests/bgp_default_route/r2/bgpd.conf b/tests/topotests/bgp_default_route/r2/bgpd.conf index 00c96cc58b..6d1080c119 100644 --- a/tests/topotests/bgp_default_route/r2/bgpd.conf +++ b/tests/topotests/bgp_default_route/r2/bgpd.conf @@ -2,7 +2,4 @@ router bgp 65001 no bgp ebgp-requires-policy neighbor 192.168.255.1 remote-as 65000 neighbor 192.168.255.1 timers 3 10 - address-family ipv4 unicast - redistribute connected - exit-address-family ! diff --git a/tests/topotests/bgp_default_route/test_bgp_default-originate.py b/tests/topotests/bgp_default_route/test_bgp_default-originate.py index d8de0f0ac6..6fbdfbe78a 100644 --- a/tests/topotests/bgp_default_route/test_bgp_default-originate.py +++ b/tests/topotests/bgp_default_route/test_bgp_default-originate.py @@ -38,6 +38,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo +pytestmark = [pytest.mark.bgpd] + class TemplateTopo(Topo): def build(self, *_args, **_opts): @@ -79,10 +81,10 @@ def test_bgp_default_originate_route_map(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - router = tgen.gears["r2"] - - def _bgp_converge(router): - output = json.loads(router.vtysh_cmd("show ip bgp neighbor 192.168.255.1 json")) + def _bgp_check_if_received(): + output = json.loads( + tgen.gears["r2"].vtysh_cmd("show ip bgp neighbor 192.168.255.1 json") + ) expected = { "192.168.255.1": { "bgpState": "Established", @@ -91,22 +93,27 @@ def test_bgp_default_originate_route_map(): } return topotest.json_cmp(output, expected) + def _bgp_check_if_originated(): + output = json.loads(tgen.gears["r1"].vtysh_cmd("show ip bgp summary json")) + expected = {"ipv4Unicast": {"peers": {"192.168.255.2": {"pfxSnt": 1}}}} + return topotest.json_cmp(output, expected) + def _bgp_default_route_is_valid(router): output = json.loads(router.vtysh_cmd("show ip bgp 0.0.0.0/0 json")) expected = {"paths": [{"valid": True}]} return topotest.json_cmp(output, expected) - test_func = functools.partial(_bgp_converge, router) + test_func = functools.partial(_bgp_check_if_received) success, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5) + assert result is None, "No 0.0.0.0/0 at r2 from r1" - assert result is None, 'Failed to see bgp convergence in "{}"'.format(router) - - test_func = functools.partial(_bgp_default_route_is_valid, router) + test_func = functools.partial(_bgp_check_if_originated) success, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5) + assert result is None, "No 0.0.0.0/0 from r1 to r2" - assert ( - result is None - ), 'Failed to see applied metric for default route in "{}"'.format(router) + test_func = functools.partial(_bgp_default_route_is_valid, tgen.gears["r2"]) + success, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5) + assert result is None, "Failed to see 0.0.0.0/0 in r2" if __name__ == "__main__": diff --git a/tests/topotests/bgp_default_route_route_map_match/test_bgp_default-originate_route-map_match.py b/tests/topotests/bgp_default_route_route_map_match/test_bgp_default-originate_route-map_match.py index 089c9a964e..e7e3512b17 100644 --- a/tests/topotests/bgp_default_route_route_map_match/test_bgp_default-originate_route-map_match.py +++ b/tests/topotests/bgp_default_route_route_map_match/test_bgp_default-originate_route-map_match.py @@ -38,6 +38,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo +pytestmark = [pytest.mark.bgpd] + class TemplateTopo(Topo): def build(self, *_args, **_opts): diff --git a/tests/topotests/bgp_default_route_route_map_match2/test_bgp_default-originate_route-map_match2.py b/tests/topotests/bgp_default_route_route_map_match2/test_bgp_default-originate_route-map_match2.py index 42a6b6edf6..5852ac268b 100644 --- a/tests/topotests/bgp_default_route_route_map_match2/test_bgp_default-originate_route-map_match2.py +++ b/tests/topotests/bgp_default_route_route_map_match2/test_bgp_default-originate_route-map_match2.py @@ -41,6 +41,8 @@ from lib.topolog import logger from mininet.topo import Topo from lib.common_config import step +pytestmark = [pytest.mark.bgpd] + class TemplateTopo(Topo): def build(self, *_args, **_opts): diff --git a/tests/topotests/bgp_default_route_route_map_match_set/test_bgp_default-originate_route-map_match_set.py b/tests/topotests/bgp_default_route_route_map_match_set/test_bgp_default-originate_route-map_match_set.py index 12d1d01bfb..e2fa89fccb 100644 --- a/tests/topotests/bgp_default_route_route_map_match_set/test_bgp_default-originate_route-map_match_set.py +++ b/tests/topotests/bgp_default_route_route_map_match_set/test_bgp_default-originate_route-map_match_set.py @@ -40,6 +40,9 @@ from lib.topolog import logger from mininet.topo import Topo +pytestmark = [pytest.mark.bgpd] + + class TemplateTopo(Topo): def build(self, *_args, **_opts): tgen = get_topogen(self) diff --git a/tests/topotests/bgp_default_route_route_map_set/test_bgp_default-originate_route-map_set.py b/tests/topotests/bgp_default_route_route_map_set/test_bgp_default-originate_route-map_set.py index 2622c33f5b..be87dc61cf 100644 --- a/tests/topotests/bgp_default_route_route_map_set/test_bgp_default-originate_route-map_set.py +++ b/tests/topotests/bgp_default_route_route_map_set/test_bgp_default-originate_route-map_set.py @@ -38,6 +38,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo +pytestmark = [pytest.mark.bgpd] + class TemplateTopo(Topo): def build(self, *_args, **_opts): diff --git a/tests/topotests/bgp_distance_change/test_bgp_distance_change.py b/tests/topotests/bgp_distance_change/test_bgp_distance_change.py index f338d52e70..bf26714087 100644 --- a/tests/topotests/bgp_distance_change/test_bgp_distance_change.py +++ b/tests/topotests/bgp_distance_change/test_bgp_distance_change.py @@ -49,6 +49,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo +pytestmark = [pytest.mark.bgpd] + class TemplateTopo(Topo): def build(self, *_args, **_opts): diff --git a/tests/topotests/bgp_dont_capability_negogiate/__init__.py b/tests/topotests/bgp_dont_capability_negogiate/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/topotests/bgp_dont_capability_negogiate/__init__.py diff --git a/tests/topotests/bgp_dont_capability_negogiate/r1/bgpd.conf b/tests/topotests/bgp_dont_capability_negogiate/r1/bgpd.conf new file mode 100644 index 0000000000..b429efe076 --- /dev/null +++ b/tests/topotests/bgp_dont_capability_negogiate/r1/bgpd.conf @@ -0,0 +1,6 @@ +! +router bgp 65001 + no bgp ebgp-requires-policy + neighbor 192.168.1.2 remote-as external + neighbor 192.168.1.2 dont-capability-negotiate +! diff --git a/tests/topotests/bgp_dont_capability_negogiate/r1/zebra.conf b/tests/topotests/bgp_dont_capability_negogiate/r1/zebra.conf new file mode 100644 index 0000000000..b29940f46a --- /dev/null +++ b/tests/topotests/bgp_dont_capability_negogiate/r1/zebra.conf @@ -0,0 +1,4 @@ +! +int r1-eth0 + ip address 192.168.1.1/24 +! diff --git a/tests/topotests/bgp_dont_capability_negogiate/r2/bgpd.conf b/tests/topotests/bgp_dont_capability_negogiate/r2/bgpd.conf new file mode 100644 index 0000000000..4af2cd6a80 --- /dev/null +++ b/tests/topotests/bgp_dont_capability_negogiate/r2/bgpd.conf @@ -0,0 +1,7 @@ +router bgp 65002 + no bgp ebgp-requires-policy + neighbor 192.168.1.1 remote-as external + address-family ipv4 unicast + redistribute connected + exit-address-family +! diff --git a/tests/topotests/bgp_dont_capability_negogiate/r2/zebra.conf b/tests/topotests/bgp_dont_capability_negogiate/r2/zebra.conf new file mode 100644 index 0000000000..dc15cf756a --- /dev/null +++ b/tests/topotests/bgp_dont_capability_negogiate/r2/zebra.conf @@ -0,0 +1,7 @@ +! +int lo + ip address 172.16.16.1/32 +! +int r2-eth0 + ip address 192.168.1.2/24 +! diff --git a/tests/topotests/bgp_dont_capability_negogiate/test_bgp_dont_capability_negotiate.py b/tests/topotests/bgp_dont_capability_negogiate/test_bgp_dont_capability_negotiate.py new file mode 100644 index 0000000000..398fa57ba9 --- /dev/null +++ b/tests/topotests/bgp_dont_capability_negogiate/test_bgp_dont_capability_negotiate.py @@ -0,0 +1,110 @@ +#!/usr/bin/env python + +# Copyright (c) 2021 by +# Donatas Abraitis <donatas.abraitis@gmail.com> +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +Test if BGP connection is established if at least one peer +sets `dont-capability-negotiate`. +""" + +import os +import sys +import json +import time +import pytest +import functools + +pytestmark = pytest.mark.bgpd + +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topolog import logger +from mininet.topo import Topo + +pytestmark = [pytest.mark.bgpd] + + +class TemplateTopo(Topo): + def build(self, *_args, **_opts): + tgen = get_topogen(self) + + for routern in range(1, 3): + tgen.add_router("r{}".format(routern)) + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) + + +def setup_module(mod): + tgen = Topogen(TemplateTopo, mod.__name__) + tgen.start_topology() + + router_list = tgen.routers() + + for i, (rname, router) in enumerate(router_list.items(), 1): + router.load_config( + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) + ) + router.load_config( + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) + ) + + tgen.start_router() + + +def teardown_module(mod): + tgen = get_topogen() + tgen.stop_topology() + + +def test_bgp_dont_capability_negotiate(): + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + router = tgen.gears["r1"] + + def _bgp_converge(router): + output = json.loads(router.vtysh_cmd("show bgp ipv4 unicast summary json")) + expected = { + "peers": { + "192.168.1.2": { + "pfxRcd": 2, + "pfxSnt": 2, + "state": "Established", + "peerState": "OK", + } + } + } + return topotest.json_cmp(output, expected) + + test_func = functools.partial(_bgp_converge, router) + success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Can't converge with dont-capability-negotiate" + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_ebgp_common_subnet_nexthop_unchanged/test_bgp-ebgp-common-subnet-nexthop-unchanged.py b/tests/topotests/bgp_ebgp_common_subnet_nexthop_unchanged/test_bgp-ebgp-common-subnet-nexthop-unchanged.py index 3b99065fe0..6db2697e75 100644 --- a/tests/topotests/bgp_ebgp_common_subnet_nexthop_unchanged/test_bgp-ebgp-common-subnet-nexthop-unchanged.py +++ b/tests/topotests/bgp_ebgp_common_subnet_nexthop_unchanged/test_bgp-ebgp-common-subnet-nexthop-unchanged.py @@ -51,6 +51,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo +pytestmark = [pytest.mark.bgpd] + class TemplateTopo(Topo): def build(self, *_args, **_opts): diff --git a/tests/topotests/bgp_ebgp_requires_policy/test_bgp_ebgp_requires_policy.py b/tests/topotests/bgp_ebgp_requires_policy/test_bgp_ebgp_requires_policy.py index fa155dd5fe..2731d37fb0 100644 --- a/tests/topotests/bgp_ebgp_requires_policy/test_bgp_ebgp_requires_policy.py +++ b/tests/topotests/bgp_ebgp_requires_policy/test_bgp_ebgp_requires_policy.py @@ -57,6 +57,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo +pytestmark = [pytest.mark.bgpd] + class TemplateTopo(Topo): def build(self, *_args, **_opts): diff --git a/tests/topotests/bgp_evpn_mh/test_evpn_mh.py b/tests/topotests/bgp_evpn_mh/test_evpn_mh.py index f389632b1e..2dcf70f14a 100644 --- a/tests/topotests/bgp_evpn_mh/test_evpn_mh.py +++ b/tests/topotests/bgp_evpn_mh/test_evpn_mh.py @@ -50,6 +50,9 @@ from lib.topolog import logger # Required to instantiate the topology builder class. from mininet.topo import Topo +pytestmark = [pytest.mark.bgpd, pytest.mark.pimd] + + ##################################################### ## ## Network Topology Definition @@ -594,14 +597,23 @@ def test_evpn_ead_update(): def ping_anycast_gw(tgen): - local_host = tgen.gears["hostd11"] - remote_host = tgen.gears["hostd21"] - # ping the anycast gw from the local and remote hosts to populate # the mac address on the PEs - cmd_str = "arping -I torbond -c 1 45.0.0.1" - local_host.run(cmd_str) - remote_host.run(cmd_str) + script_path = os.path.abspath(os.path.join(CWD, "../lib/scapy_sendpkt.py")) + intf = "torbond" + ipaddr = "45.0.0.1" + ping_cmd = [ + script_path, + "--imports=Ether,ARP", + "--interface=" + intf, + "'Ether(dst=\"ff:ff:ff:ff:ff:ff\")/ARP(pdst=\"{}\")'".format(ipaddr) + ] + for name in ("hostd11", "hostd21"): + host = tgen.net[name] + stdout = host.cmd(ping_cmd) + stdout = stdout.strip() + if stdout: + host.logger.debug("%s: arping on %s for %s returned: %s", name, intf, ipaddr, stdout) def check_mac(dut, vni, mac, m_type, esi, intf, ping_gw=False, tgen=None): diff --git a/tests/topotests/bgp_evpn_rt5/test_bgp_evpn.py b/tests/topotests/bgp_evpn_rt5/test_bgp_evpn.py index 36605d44f0..59024f7b71 100644 --- a/tests/topotests/bgp_evpn_rt5/test_bgp_evpn.py +++ b/tests/topotests/bgp_evpn_rt5/test_bgp_evpn.py @@ -46,6 +46,7 @@ from lib.topolog import logger # Required to instantiate the topology builder class. from mininet.topo import Topo +pytestmark = [pytest.mark.bgpd] class BGPEVPNTopo(Topo): "Test topology builder" diff --git a/tests/topotests/bgp_evpn_vxlan_topo1/test_bgp_evpn_vxlan.py b/tests/topotests/bgp_evpn_vxlan_topo1/test_bgp_evpn_vxlan.py index 086bad6481..fd5bb38b98 100755 --- a/tests/topotests/bgp_evpn_vxlan_topo1/test_bgp_evpn_vxlan.py +++ b/tests/topotests/bgp_evpn_vxlan_topo1/test_bgp_evpn_vxlan.py @@ -365,6 +365,10 @@ def test_ip_pe1_learn(): "run the IP learn test for PE1" tgen = get_topogen() + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + host1 = tgen.gears["host1"] pe1 = tgen.gears["PE1"] pe2 = tgen.gears["PE2"] @@ -380,6 +384,10 @@ def test_ip_pe2_learn(): "run the IP learn test for PE2" tgen = get_topogen() + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + host2 = tgen.gears["host2"] pe1 = tgen.gears["PE1"] pe2 = tgen.gears["PE2"] diff --git a/tests/topotests/bgp_features/peer1/exa_readpipe.py b/tests/topotests/bgp_features/peer1/exa_readpipe.py deleted file mode 100644 index 9e689a27e3..0000000000 --- a/tests/topotests/bgp_features/peer1/exa_readpipe.py +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env python -"Helper script to read api commands from a pipe and feed them to ExaBGP" - -import sys - -if len(sys.argv) != 2: - sys.exit(1) -fifo = sys.argv[1] - -while True: - pipe = open(fifo, "r") - with pipe: - line = pipe.readline().strip() - if line != "": - sys.stdout.write("{}\n".format(line)) - sys.stdout.flush() - pipe.close() - -sys.exit(0) diff --git a/tests/topotests/bgp_features/peer1/exabgp.cfg b/tests/topotests/bgp_features/peer1/exabgp.cfg deleted file mode 100644 index 2e95252cf6..0000000000 --- a/tests/topotests/bgp_features/peer1/exabgp.cfg +++ /dev/null @@ -1,12 +0,0 @@ -group exabgp { - process announce-routes { - run "/etc/exabgp/exa_readpipe.py /var/run/exabgp_peer1.in"; - encoder text; - } - neighbor 192.168.101.1 { - router-id 192.168.101.3; - local-address 192.168.101.3; - local-as 65403; - peer-as 65000; - } -} diff --git a/tests/topotests/bgp_features/peer2/exa_readpipe.py b/tests/topotests/bgp_features/peer2/exa_readpipe.py deleted file mode 100644 index 9e689a27e3..0000000000 --- a/tests/topotests/bgp_features/peer2/exa_readpipe.py +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env python -"Helper script to read api commands from a pipe and feed them to ExaBGP" - -import sys - -if len(sys.argv) != 2: - sys.exit(1) -fifo = sys.argv[1] - -while True: - pipe = open(fifo, "r") - with pipe: - line = pipe.readline().strip() - if line != "": - sys.stdout.write("{}\n".format(line)) - sys.stdout.flush() - pipe.close() - -sys.exit(0) diff --git a/tests/topotests/bgp_features/peer2/exabgp.cfg b/tests/topotests/bgp_features/peer2/exabgp.cfg deleted file mode 100644 index 1f65547bc5..0000000000 --- a/tests/topotests/bgp_features/peer2/exabgp.cfg +++ /dev/null @@ -1,12 +0,0 @@ -group exabgp { - process announce-routes { - run "/etc/exabgp/exa_readpipe.py /var/run/exabgp_peer2.in"; - encoder text; - } - neighbor 192.168.101.1 { - router-id 192.168.101.4; - local-address 192.168.101.4; - local-as 65404; - peer-as 65000; - } -} diff --git a/tests/topotests/bgp_features/peer3/exa_readpipe.py b/tests/topotests/bgp_features/peer3/exa_readpipe.py deleted file mode 100644 index 9e689a27e3..0000000000 --- a/tests/topotests/bgp_features/peer3/exa_readpipe.py +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env python -"Helper script to read api commands from a pipe and feed them to ExaBGP" - -import sys - -if len(sys.argv) != 2: - sys.exit(1) -fifo = sys.argv[1] - -while True: - pipe = open(fifo, "r") - with pipe: - line = pipe.readline().strip() - if line != "": - sys.stdout.write("{}\n".format(line)) - sys.stdout.flush() - pipe.close() - -sys.exit(0) diff --git a/tests/topotests/bgp_features/peer3/exabgp.cfg b/tests/topotests/bgp_features/peer3/exabgp.cfg deleted file mode 100644 index 8632cc86c5..0000000000 --- a/tests/topotests/bgp_features/peer3/exabgp.cfg +++ /dev/null @@ -1,12 +0,0 @@ -group exabgp { - process announce-routes { - run "/etc/exabgp/exa_readpipe.py /var/run/exabgp_peer3.in"; - encoder text; - } - neighbor 192.168.101.1 { - router-id 192.168.101.5; - local-address 192.168.101.5; - local-as 65405; - peer-as 65000; - } -} diff --git a/tests/topotests/bgp_features/peer4/exa_readpipe.py b/tests/topotests/bgp_features/peer4/exa_readpipe.py deleted file mode 100644 index 9e689a27e3..0000000000 --- a/tests/topotests/bgp_features/peer4/exa_readpipe.py +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env python -"Helper script to read api commands from a pipe and feed them to ExaBGP" - -import sys - -if len(sys.argv) != 2: - sys.exit(1) -fifo = sys.argv[1] - -while True: - pipe = open(fifo, "r") - with pipe: - line = pipe.readline().strip() - if line != "": - sys.stdout.write("{}\n".format(line)) - sys.stdout.flush() - pipe.close() - -sys.exit(0) diff --git a/tests/topotests/bgp_features/peer4/exabgp.cfg b/tests/topotests/bgp_features/peer4/exabgp.cfg deleted file mode 100644 index 06bc0d6e64..0000000000 --- a/tests/topotests/bgp_features/peer4/exabgp.cfg +++ /dev/null @@ -1,12 +0,0 @@ -group exabgp { - process announce-routes { - run "/etc/exabgp/exa_readpipe.py /var/run/exabgp_peer4.in"; - encoder text; - } - neighbor 192.168.101.1 { - router-id 192.168.101.6; - local-address 192.168.101.6; - local-as 65406; - peer-as 65000; - } -} diff --git a/tests/topotests/bgp_features/r1/bgp_damp_announced.json b/tests/topotests/bgp_features/r1/bgp_damp_announced.json deleted file mode 100644 index cb4a2c9b2f..0000000000 --- a/tests/topotests/bgp_features/r1/bgp_damp_announced.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "localAS":65000, - "routes":{ - "192.168.31.0/24": [ { "valid":true, "network":"192.168.31.0\/24", "peerId":"192.168.101.3" } ], - "192.168.32.0/24": [ { "valid":true, "network":"192.168.32.0\/24", "peerId":"192.168.101.3" } ], - "192.168.33.0/24": [ { "valid":true, "network":"192.168.33.0\/24", "peerId":"192.168.101.3" } ], - "192.168.34.0/24": [ { "valid":true, "network":"192.168.34.0\/24", "peerId":"192.168.101.3" } ], - "192.168.41.0/24": [ { "valid":true, "network":"192.168.41.0\/24", "peerId":"192.168.101.4" } ], - "192.168.42.0/24": [ { "valid":true, "network":"192.168.42.0\/24", "peerId":"192.168.101.4" } ], - "192.168.43.0/24": [ { "valid":true, "network":"192.168.43.0\/24", "peerId":"192.168.101.4" } ], - "192.168.44.0/24": [ { "valid":true, "network":"192.168.44.0\/24", "peerId":"192.168.101.4" } ], - "192.168.51.0/24": [ { "valid":true, "network":"192.168.51.0\/24", "peerId":"192.168.101.5" } ], - "192.168.52.0/24": [ { "valid":true, "network":"192.168.52.0\/24", "peerId":"192.168.101.5" } ], - "192.168.53.0/24": [ { "valid":true, "network":"192.168.53.0\/24", "peerId":"192.168.101.5" } ], - "192.168.54.0/24": [ { "valid":true, "network":"192.168.54.0\/24", "peerId":"192.168.101.5" } ], - "192.168.61.0/24": [ { "valid":true, "network":"192.168.61.0\/24", "peerId":"192.168.101.6" } ], - "192.168.62.0/24": [ { "valid":true, "network":"192.168.62.0\/24", "peerId":"192.168.101.6" } ], - "192.168.63.0/24": [ { "valid":true, "network":"192.168.63.0\/24", "peerId":"192.168.101.6" } ], - "192.168.64.0/24": [ { "valid":true, "network":"192.168.64.0\/24", "peerId":"192.168.101.6" } ] - } -} diff --git a/tests/topotests/bgp_features/r1/bgp_damp_setup.json b/tests/topotests/bgp_features/r1/bgp_damp_setup.json deleted file mode 100644 index f9f89db894..0000000000 --- a/tests/topotests/bgp_features/r1/bgp_damp_setup.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "ipv4Unicast":{ - "peers":{ - "192.168.101.3":{"remoteAs":65403, "state":"Established"}, - "192.168.101.4":{"remoteAs":65404, "state":"Established"}, - "192.168.101.5":{"remoteAs":65405, "state":"Established"}, - "192.168.101.6":{"remoteAs":65406, "state":"Established"} - } - } -} diff --git a/tests/topotests/bgp_features/r2/bgp_damp_announced.json b/tests/topotests/bgp_features/r2/bgp_damp_announced.json deleted file mode 100644 index 9394358f82..0000000000 --- a/tests/topotests/bgp_features/r2/bgp_damp_announced.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "localAS":65000, - "routes":{ - "192.168.31.0/24": [ { "network":"192.168.31.0\/24", "peerId":"192.168.0.1" } ], - "192.168.32.0/24": [ { "network":"192.168.32.0\/24", "peerId":"192.168.0.1" } ], - "192.168.33.0/24": [ { "network":"192.168.33.0\/24", "peerId":"192.168.0.1" } ], - "192.168.34.0/24": [ { "network":"192.168.34.0\/24", "peerId":"192.168.0.1" } ], - "192.168.41.0/24": [ { "network":"192.168.41.0\/24", "peerId":"192.168.0.1" } ], - "192.168.42.0/24": [ { "network":"192.168.42.0\/24", "peerId":"192.168.0.1" } ], - "192.168.43.0/24": [ { "network":"192.168.43.0\/24", "peerId":"192.168.0.1" } ], - "192.168.44.0/24": [ { "network":"192.168.44.0\/24", "peerId":"192.168.0.1" } ], - "192.168.51.0/24": [ { "network":"192.168.51.0\/24", "peerId":"192.168.0.1" } ], - "192.168.52.0/24": [ { "network":"192.168.52.0\/24", "peerId":"192.168.0.1" } ], - "192.168.53.0/24": [ { "network":"192.168.53.0\/24", "peerId":"192.168.0.1" } ], - "192.168.54.0/24": [ { "network":"192.168.54.0\/24", "peerId":"192.168.0.1" } ], - "192.168.61.0/24": [ { "network":"192.168.61.0\/24", "peerId":"192.168.0.1" } ], - "192.168.62.0/24": [ { "network":"192.168.62.0\/24", "peerId":"192.168.0.1" } ], - "192.168.63.0/24": [ { "network":"192.168.63.0\/24", "peerId":"192.168.0.1" } ], - "192.168.64.0/24": [ { "network":"192.168.64.0\/24", "peerId":"192.168.0.1" } ] - } -} diff --git a/tests/topotests/bgp_features/r2/bgp_damp_withdrawn.json b/tests/topotests/bgp_features/r2/bgp_damp_withdrawn.json deleted file mode 100644 index f3c54a70a1..0000000000 --- a/tests/topotests/bgp_features/r2/bgp_damp_withdrawn.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "192.168.31.0/24": null, - "192.168.32.0/24": null, - "192.168.33.0/24": null, - "192.168.34.0/24": null, - "192.168.41.0/24": null, - "192.168.42.0/24": null, - "192.168.43.0/24": null, - "192.168.44.0/24": null, - "192.168.51.0/24": null, - "192.168.52.0/24": null, - "192.168.53.0/24": null, - "192.168.54.0/24": null, - "192.168.61.0/24": null, - "192.168.62.0/24": null, - "192.168.63.0/24": null, - "192.168.64.0/24": null -} diff --git a/tests/topotests/bgp_features/test_bgp_features.py b/tests/topotests/bgp_features/test_bgp_features.py index a68508c4ae..d19b7722d0 100644 --- a/tests/topotests/bgp_features/test_bgp_features.py +++ b/tests/topotests/bgp_features/test_bgp_features.py @@ -33,7 +33,6 @@ import sys import pytest import re import time -from time import sleep # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) @@ -67,14 +66,6 @@ class BGPFeaturesTopo1(Topo): for rtrNum in range(1, 6): tgen.add_router("r{}".format(rtrNum)) - # create ExaBGP peers - for peer_num in range(1, 5): - tgen.add_exabgp_peer( - "peer{}".format(peer_num), - ip="192.168.101.{}".format(peer_num + 2), - defaultRoute="via 192.168.101.1", - ) - # Setup Switches and connections for swNum in range(1, 11): tgen.add_switch("sw{}".format(swNum)) @@ -100,12 +91,6 @@ class BGPFeaturesTopo1(Topo): tgen.gears["r2"].add_link(tgen.gears["sw5"]) tgen.gears["r5"].add_link(tgen.gears["sw5"]) - # Add ExaBGP peers to sw4 - tgen.gears["peer1"].add_link(tgen.gears["sw4"]) - tgen.gears["peer2"].add_link(tgen.gears["sw4"]) - tgen.gears["peer3"].add_link(tgen.gears["sw4"]) - tgen.gears["peer4"].add_link(tgen.gears["sw4"]) - ##################################################### # @@ -1110,662 +1095,6 @@ def test_bgp_delayopen_dual(): # end test_bgp_delayopen_dual -def test_bgp_dampening_setup(): - "BGP route-flap dampening test setup" - - # This test starts four ExaBGP peers, adds them as neighbors to the - # configuration of router r1 and checks if connections get established. - - tgen = get_topogen() - - # Skip if previous fatal error condition is raised - if tgen.routers_have_failure(): - pytest.skip(tgen.errors) - - logger.info("Starting BGP route-flap dampening test setup") - - # Start ExaBGP peers connected to r1 via switch 4 - logger.info("Starting ExaBGP peers") - for peer_num in range(1, 5): - logger.info("Creating named pipe for ExaBGP peer peer{}".format(peer_num)) - fifo_in = "/var/run/exabgp_peer{}.in".format(peer_num) - if os.path.exists(fifo_in): - os.remove(fifo_in) - os.mkfifo(fifo_in, 0o777) - logger.info("Starting ExaBGP on peer peer{}".format(peer_num)) - peer = tgen.gears["peer{}".format(peer_num)] - peer_dir = os.path.join(CWD, "peer{}".format(peer_num)) - env_file = os.path.join(CWD, "exabgp.env") - peer.start(peer_dir, env_file) - - # Add ExaBGP peers to configuration of router r2 - logger.info("Adding ExaBGP peers as neighbors to configuration of router r2") - tgen.net["r1"].cmd( - 'vtysh -c "conf t" -c "router bgp 65000" -c "neighbor 192.168.101.3 remote-as 65403"' - ) - tgen.net["r1"].cmd( - 'vtysh -c "conf t" -c "router bgp 65000" -c "address-family ipv4 unicast" -c "neighbor 192.168.101.3 route-map testmap-in"' - ) - tgen.net["r1"].cmd( - 'vtysh -c "conf t" -c "router bgp 65000" -c "address-family ipv4 unicast" -c "neighbor 192.168.101.3 route-map testmap-out"' - ) - tgen.net["r1"].cmd( - 'vtysh -c "conf t" -c "router bgp 65000" -c "neighbor 192.168.101.4 remote-as 65404"' - ) - tgen.net["r1"].cmd( - 'vtysh -c "conf t" -c "router bgp 65000" -c "address-family ipv4 unicast" -c "neighbor 192.168.101.4 route-map testmap-in"' - ) - tgen.net["r1"].cmd( - 'vtysh -c "conf t" -c "router bgp 65000" -c "address-family ipv4 unicast" -c "neighbor 192.168.101.4 route-map testmap-out"' - ) - tgen.net["r1"].cmd( - 'vtysh -c "conf t" -c "router bgp 65000" -c "neighbor 192.168.101.5 remote-as 65405"' - ) - tgen.net["r1"].cmd( - 'vtysh -c "conf t" -c "router bgp 65000" -c "address-family ipv4 unicast" -c "neighbor 192.168.101.5 route-map testmap-in"' - ) - tgen.net["r1"].cmd( - 'vtysh -c "conf t" -c "router bgp 65000" -c "address-family ipv4 unicast" -c "neighbor 192.168.101.5 route-map testmap-out"' - ) - tgen.net["r1"].cmd( - 'vtysh -c "conf t" -c "router bgp 65000" -c "neighbor 192.168.101.6 remote-as 65406"' - ) - tgen.net["r1"].cmd( - 'vtysh -c "conf t" -c "router bgp 65000" -c "address-family ipv4 unicast" -c "neighbor 192.168.101.6 route-map testmap-in"' - ) - tgen.net["r1"].cmd( - 'vtysh -c "conf t" -c "router bgp 65000" -c "address-family ipv4 unicast" -c "neighbor 192.168.101.6 route-map testmap-out"' - ) - - # Check if exabgp peers are up and running - logger.info("Checking for established connections to ExaBGP peers on router r1") - router = tgen.gears["r1"] - reffile = os.path.join(CWD, "r1/bgp_damp_setup.json") - expected = json.loads(open(reffile).read()) - test_func = functools.partial( - topotest.router_json_cmp, router, "show ip bgp summary json", expected - ) - _, res = topotest.run_and_expect(test_func, None, count=10, wait=1) - assertmsg = ( - "BGP session on r1 did not establish connections with one ore more ExaBGP peers" - ) - assert res is None, assertmsg - - # end test_bgp_dampening_setup - - -def test_bgp_dampening_route_announce(): - "Test of BGP route-flap dampening route announcement" - - # This test checks if the four ExaBGP peers can announce routes to router - # r1 and if these routes get forwarded to router r2. - - tgen = get_topogen() - - # Skip if previous fatal error condition is raised - if tgen.routers_have_failure(): - pytest.skip(tgen.errors) - - logger.info("Starting test of BGP route-flap dampening route announcement") - - # Announce routes on exabgp peers to r2 - logger.info("Announcing routes on ExaBGP peers to r1") - for prefix_iter in range(1, 5): - for peer_num in range(1, 5): - pipe = open("/run/exabgp_peer{}.in".format(peer_num), "w") - with pipe: - pipe.write( - "announce route 192.168.{}{}.0/24 next-hop 192.168.101.{}\n".format( - (peer_num + 2), prefix_iter, (peer_num + 2) - ) - ) - pipe.close() - sleep(0.1) # ExaBGP API command processing delay - - # Check if routes announced by ExaBGP peers are present in RIB of router r1 - logger.info( - "Checking if routes announced by ExaBGP peers are present in RIB of router r1" - ) - router = tgen.gears["r1"] - reffile = os.path.join(CWD, "r1/bgp_damp_announced.json") - expected = json.loads(open(reffile).read()) - test_func = functools.partial( - topotest.router_json_cmp, router, "show ip bgp json", expected - ) - _, res = topotest.run_and_expect(test_func, None, count=10, wait=1) - assertmsg = ( - "BGP session on router r1 did not receive routes announced by ExaBGP peers" - ) - assert res is None, assertmsg - - # Check if routes announced by ExaBGP peers to router r1 have been forwarded - # and are now present in RIB of router r2 - logger.info( - "Checking if forwarded routes announced by ExaBGP peers are present in RIB of router r2" - ) - router = tgen.gears["r2"] - reffile = os.path.join(CWD, "r2/bgp_damp_announced.json") - expected = json.loads(open(reffile).read()) - test_func = functools.partial( - topotest.router_json_cmp, router, "show ip bgp json", expected - ) - _, res = topotest.run_and_expect(test_func, None, count=10, wait=1) - assertmsg = "BGP session on router r2 did not receive routes announced by ExaBGP peers forwarded by router r1" - assert res is None, assertmsg - - # end test_bgp_dampening_route_announce - - -def test_bgp_dampening_disabled(): - "Test of BGP route-flapping with dampening disabled" - - # This test verifies that flapped routes do not get withdrawn from the RIB - # of router r1 if dampening is disabled. - - tgen = get_topogen() - - # Skip if previous fatal error condition is raised - if tgen.routers_have_failure(): - pytest.skip(tgen.errors) - - logger.info("Starting test of BGP route-flapping with dampening disabled") - - # Flapping routes on ExaBGP peer peer1 - logger.info( - "Flapping routes on ExaBGP peer peer1 with route-flap dampening disabled" - ) - for _ in range(1, 5): - for prefix_iter in range(1, 5): - pipe = open("/run/exabgp_peer1.in", "w") - with pipe: - pipe.write( - "withdraw route 192.168.3{}.0/24 next-hop 192.168.101.3\n".format( - prefix_iter - ) - ) - pipe.close() - sleep(0.1) # ExaBGP API command processing delay - sleep(1) # Give the BGP session on router r1 time to process routes - for prefix_iter in range(1, 5): - pipe = open("/run/exabgp_peer1.in", "w") - with pipe: - pipe.write( - "announce route 192.168.3{}.0/24 next-hop 192.168.101.3\n".format( - prefix_iter - ) - ) - pipe.close() - sleep(0.1) # ExaBGP API command processing delay - - # Verify flapped routes are still present in RIB of router r1 - logger.info( - "Verifying that the flapped routes are still present in RIB of router r1" - ) - router = tgen.gears["r1"] - reffile = os.path.join(CWD, "r1/bgp_damp_announced.json") - expected = json.loads(open(reffile).read()) - test_func = functools.partial( - topotest.router_json_cmp, router, "show ip bgp json", expected - ) - _, res = topotest.run_and_expect(test_func, None, count=10, wait=1) - assertmsg = "BGP session on router r1 removed flapped routes despite route-flap dampening being disabled" - assert res is None, assertmsg - - # end test_bgp_dampening_disabled - - -def test_bgp_dampening_config(): - "Test of BGP route-flap dampening configuration" - - # This test adds peer-group group1 with peers peer1 and peer2 to the - # configuration of router r1, sets up dampening configurations with - # different profiles and verifies the configured dampening parameters. - - tgen = get_topogen() - r_1 = tgen.net["r1"] - - # Skip if previous fatal error condition is raised - if tgen.routers_have_failure(): - pytest.skip(tgen.errors) - - logger.info("Starting test of BGP route-flap dampening configuration") - - # Add peer-group group1 with peers peer1 and peer2 - logger.info( - "Creating peer-group group1 and adding ExaBGP peers peer1 and peer2 to it" - ) - r_1.cmd('vtysh -c "conf t" -c "router bgp 65000" -c "neighbor group1 peer-group"') - r_1.cmd( - 'vtysh -c "conf t" -c "router bgp 65000" -c "neighbor 192.168.101.3 peer-group group1"' - ) # peer1 - r_1.cmd( - 'vtysh -c "conf t" -c "router bgp 65000" -c "neighbor 192.168.101.4 peer-group group1"' - ) # peer2 - - # Enable different dampening profiles for peer1, peer3, group1 and global - # configuration - logger.info( - "Enabling different dampening profiles for peer1, peer3, group1 and global configuration" - ) - r_1.cmd( - 'vtysh -c "conf t" -c "router bgp 65000" -c "address-family ipv4 unicast" -c "bgp dampening 30 300 900 90"' - ) - r_1.cmd( - 'vtysh -c "conf t" -c "router bgp 65000" -c "address-family ipv4 unicast" -c "neighbor group1 dampening 20 200 600 60"' - ) - r_1.cmd( - 'vtysh -c "conf t" -c "router bgp 65000" -c "address-family ipv4 unicast" -c "neighbor 192.168.101.3 dampening 10 100 300 30"' - ) # peer1 - r_1.cmd( - 'vtysh -c "conf t" -c "router bgp 65000" -c "address-family ipv4 unicast" -c "neighbor 192.168.101.5 dampening 10 100 300 30"' - ) # peer3 - - # Verify route-flap dampening configuration - logger.info("Verifying route-flap dampening configuration on router r1") - vtyout = r_1.cmd('vtysh -c "show running-config"') - assertmsg = "BGP Session on r1 does not show enabled global route-flap dampening in running configuration" - assert re.search("bgp dampening 30 300 900 90", vtyout), assertmsg - assertmsg = "BGP Session on r1 does not show route-flap dampening enabled for peer-group group1 in running configuration" - assert re.search("neighbor group1 dampening 20 200 600 60", vtyout), assertmsg - assertmsg = "BGP Session on r1 does not show route-flap dampening enabled for peer peer1 in running configuration" - assert re.search( - "neighbor 192.168.101.3 dampening 10 100 300 30", vtyout - ), assertmsg - assertmsg = "BGP Session on r1 does not show route-flap dampening enabled for peer peer3 in running configuration" - assert re.search( - "neighbor 192.168.101.5 dampening 10 100 300 30", vtyout - ), assertmsg - - # end test_bgp_dampening_config - - -def test_bgp_dampening_profile_peer_over_group(): - "Test of BGP route-flap dampening profile preferences: peer over group" - - # This test verifies that the dampening profile of a peer takes precedence - # over the dampening profile of its peer-group by flapping the peers routes - # until dampened and comparing the reuse times to the one specified in the - # dampening configuration. - - tgen = get_topogen() - r_1 = tgen.net["r1"] - - # Skip if previous fatal error condition is raised - if tgen.routers_have_failure(): - pytest.skip(tgen.errors) - - logger.info( - "Starting test of BGP route-flap dampening profile preferences: peer over group" - ) - - # Flapping routes on ExaBGP peer peer1 - logger.info( - "Flapping routes on ExaBGP peer peer1 with route-flap dampening enabled" - ) - for _ in range(1, 5): - for prefix_iter in range(1, 5): - pipe = open("/run/exabgp_peer1.in", "w") - with pipe: - pipe.write( - "withdraw route 192.168.3{}.0/24 next-hop 192.168.101.3\n".format( - prefix_iter - ) - ) - pipe.close() - sleep(0.1) # ExaBGP API command processing delay - sleep(1) # Give the BGP session on router r1 time to process routes - for prefix_iter in range(1, 5): - pipe = open("/run/exabgp_peer1.in", "w") - with pipe: - pipe.write( - "announce route 192.168.3{}.0/24 next-hop 192.168.101.3\n".format( - prefix_iter - ) - ) - pipe.close() - sleep(0.1) # ExaBGP API command processing delay - - # Check damped paths on r1 for routes of peer1 witn peer profile - logger.info( - "Checking if router r1 used the correct dampening profile on routes flapped by ExaBGP peer peer1" - ) - sleep(5) # Wait 5 seconds for paths to show up in dampened-paths list - vtyout = r_1.cmd('vtysh -c "show ip bgp dampening dampened-paths"') - routes = re.findall(r"\*d 192\.168\.3\d\.0\/24.*", vtyout) - assertmsg = ( - "BGP session on router r1 did not dampen routes flapped by ExaBGP peer peer1" - ) - assert len(routes) == 4, assertmsg - assertmsg = "BGP session on router r1 used wrong dampening profile for a route flapped by ExaBGP peer peer1" - for route in routes: - assert (int(route.split()[3].split(":")[0]) == 0) and ( # hours of reuse time - 35 > int(route.split()[3].split(":")[1]) > 25 - ), assertmsg # minutes of reuse time - - # end test_bgp_dampening_profile_peer_over_group - - -def test_bgp_dampening_profile_group_over_global(): - "Test of BGP route-flap dampening profile preferences: group over global" - - # This test verifies that the dampening profile of a peer-group takes - # precedence over the global dampening profile by flapping the routes of a - # peer-group member until dampened and comparing the reuse times to the one - # specified in the dampening configuration. - - tgen = get_topogen() - r_1 = tgen.net["r1"] - - # Skip if previous fatal error condition is raised - if tgen.routers_have_failure(): - pytest.skip(tgen.errors) - - logger.info( - "Starting test of BGP route-flap dampening profile preferences: group over global" - ) - - # Flapping routes on ExaBGP peer peer2 - logger.info( - "Flapping routes on ExaBGP peer peer2 with route-flap dampening enabled" - ) - for _ in range(1, 5): - for prefix_iter in range(1, 5): - pipe = open("/run/exabgp_peer2.in", "w") - with pipe: - pipe.write( - "withdraw route 192.168.4{}.0/24 next-hop 192.168.101.4\n".format( - prefix_iter - ) - ) - pipe.close() - sleep(0.1) # ExaBGP API command processing delay - sleep(1) # Give the BGP session on router r1 time to process routes - for prefix_iter in range(1, 5): - pipe = open("/run/exabgp_peer2.in", "w") - with pipe: - pipe.write( - "announce route 192.168.4{}.0/24 next-hop 192.168.101.4\n".format( - prefix_iter - ) - ) - pipe.close() - sleep(0.1) # ExaBGP API command processing delay - - # Check damped paths on r1 for routes of peer2 witn group profile - logger.info( - "Checking if router r1 used the correct dampening profile on routes flapped by ExaBGP peer peer2" - ) - sleep(5) # wait 5 seconds for paths to shop up in damp list - vtyout = r_1.cmd('vtysh -c "show ip bgp dampening dampened-paths"') - routes = re.findall(r"\*d 192\.168\.4\d\.0\/24.*", vtyout) - assertmsg = ( - "BGP session on router r1 did not dampen routes flapped by ExaBGP peer peer2" - ) - assert len(routes) == 4, assertmsg - assertmsg = "BGP session on router r1 used wrong dampening profile for a route flapped by ExaBGP peer peer2" - for route in routes: - assert (int(route.split()[3].split(":")[0]) == 0) and ( # hours of reuse time - 65 > int(route.split()[3].split(":")[1]) > 55 - ), assertmsg # minutes of reuse time - - # end test_bgp_dampening_profile_group_over_global - - -def test_bgp_dampening_profile_peer_over_global(): - "Test of BGP route-flap dampening profile preferences: peer over global" - - # This test verifies that the dampening profile of a peer takes precedence - # over the global dampening profile by flapping the routes of the peer until - # dampened and comparing the reuse times to the one specified in the - # dampening configuration. - - tgen = get_topogen() - r_1 = tgen.net["r1"] - - # Skip if previous fatal error condition is raised - if tgen.routers_have_failure(): - pytest.skip(tgen.errors) - - logger.info( - "Starting test of BGP route-flap dampening profile preferences: peer over global" - ) - - # Flapping routes on ExaBGP peer peer3 - logger.info( - "Flapping routes on ExaBGP peer peer3 with route-flap dampening enabled" - ) - for _ in range(1, 5): - for prefix_iter in range(1, 5): - pipe = open("/run/exabgp_peer3.in", "w") - with pipe: - pipe.write( - "withdraw route 192.168.5{}.0/24 next-hop 192.168.101.5\n".format( - prefix_iter - ) - ) - pipe.close() - sleep(0.1) # ExaBGP API command processing delay - sleep(1) # Give the BGP session on router r1 time to process routes - for prefix_iter in range(1, 5): - pipe = open("/run/exabgp_peer3.in", "w") - with pipe: - pipe.write( - "announce route 192.168.5{}.0/24 next-hop 192.168.101.5\n".format( - prefix_iter - ) - ) - pipe.close() - sleep(0.1) # ExaBGP API command processing delay - - # Check damped paths on r1 for routes of peer3 witn peer profile - logger.info( - "Checking if router r1 used the correct dampening profile on routes flapped by ExaBGP peer peer3" - ) - sleep(5) # wait 5 seconds for paths to shop up in damp list - vtyout = r_1.cmd('vtysh -c "show ip bgp dampening dampened-paths"') - routes = re.findall(r"\*d 192\.168\.5\d\.0\/24.*", vtyout) - assertmsg = ( - "BGP session on router r1 did not dampen routes flapped by ExaBGP peer peer3" - ) - assert len(routes) == 4, assertmsg - assertmsg = "BGP session on router r1 used wrong dampening profile for a route flapped by ExaBGP peer peer3" - for route in routes: - assert (int(route.split()[3].split(":")[0]) == 0) and ( # hours of reuse time - 35 > int(route.split()[3].split(":")[1]) > 25 - ), assertmsg # minutes of reuse time - - # end test_bgp_dampening_profile_peer_over_global - - -def test_bgp_dampening_profile_global(): - "Test of BGP route-flap dampening global profile" - - # This test verifies the application of the global dampening profile by - # flapping the routes of a peer until dampened and comparing the reuse times - # to the one specified in the dampening configuration. - - tgen = get_topogen() - r_1 = tgen.net["r1"] - - # Skip if previous fatal error condition is raised - if tgen.routers_have_failure(): - pytest.skip(tgen.errors) - - logger.info("Starting test of BGP route-flap dampening global profile") - - # Flapping routes on ExaBGP peer peer4 - logger.info( - "Flapping routes on ExaBGP peer peer4 with route-flap dampening enabled" - ) - for _ in range(1, 5): - for prefix_iter in range(1, 5): - pipe = open("/run/exabgp_peer4.in", "w") - with pipe: - pipe.write( - "withdraw route 192.168.6{}.0/24 next-hop 192.168.101.6\n".format( - prefix_iter - ) - ) - pipe.close() - sleep(0.1) # ExaBGP API command processing delay - sleep(1) # Give the BGP session on router r1 time to process routes - for prefix_iter in range(1, 5): - pipe = open("/run/exabgp_peer4.in", "w") - with pipe: - pipe.write( - "announce route 192.168.6{}.0/24 next-hop 192.168.101.6\n".format( - prefix_iter - ) - ) - pipe.close() - sleep(0.1) # ExaBGP API command processing delay - - # Check damped paths on r1 for routes of peer4 witn global profile - logger.info( - "Checking if router r1 used the global dampening profile on routes flapped by ExaBGP peer peer4" - ) - sleep(5) # wait 5 seconds for paths to shop up in damp list - vtyout = r_1.cmd('vtysh -c "show ip bgp dampening dampened-paths"') - routes = re.findall(r"\*d 192\.168\.6\d\.0\/24.*", vtyout) - assertmsg = ( - "BGP session on router r1 did not dampen routes flapped by ExaBGP peer peer4" - ) - assert len(routes) == 4, assertmsg - assertmsg = "BGP session on router r1 did not use the global dampening profile for a route flapped by ExaBGP peer peer4" - for route in routes: - assert (int(route.split()[3].split(":")[0]) == 1) and ( # hours of reuse time - 35 > int(route.split()[3].split(":")[1]) > 25 - ), assertmsg # minutes of reuse time - - # end test_bgp_dampening_profile_global - - -def test_bgp_dampening_withdaw(): - "Test BGP route-flap dampening route withdraw" - - # This test verifies that the withrawl of dampened routes from the RIB of - # router r1 was propagated to router r2. - - tgen = get_topogen() - - # Skip if previous fatal error condition is raised - if tgen.routers_have_failure(): - pytest.skip(tgen.errors) - - logger.info("Starting test of BGP route-flap dampening route withdraw") - - # Check if routes dampened on router r1 have been withdrawn from the RIB on - # router r2 - logger.info( - "Checking if routes dampened on router r1 have been withdrawn of RIB on router r2" - ) - reffile = os.path.join(CWD, "r2/bgp_damp_withdrawn.json") - expected = json.loads(open(reffile).read()) - test_func = functools.partial( - topotest.router_json_cmp, tgen.gears["r2"], "show ip bgp json", expected - ) - _, res = topotest.run_and_expect(test_func, None, count=5, wait=1) - assertmsg = "BGP session on router r2 did not receive withdraw of routes dampened on router r1" - assert res is None, assertmsg - - # end test_bgp_dampening_withdaw - - -def test_bgp_dampening_cleanup(): - "BGP route-flap dampening test cleanup" - - # This test cleans up after other tests associated with route-flap dampening - # by disabling all dampening configurations, removing added peers and - # peer-groups from the configuration on router r1, and shutting down ExaBGP - # peers peer1, peer2 and peer3. - - tgen = get_topogen() - r_1 = tgen.net["r1"] - - # Skip if previous fatal error condition is raised - if tgen.routers_have_failure(): - pytest.skip(tgen.errors) - - logger.info("Starting BGP route-flap dampening test cleanup") - - # Disable all dampening configurations - logger.info("Disabling all dampening configurations") - r_1.cmd( - 'vtysh -c "conf t" -c "router bgp 65000" -c "address-family ipv4 unicast" -c "no bgp dampening"' - ) - r_1.cmd( - 'vtysh -c "conf t" -c "router bgp 65000" -c "address-family ipv4 unicast" -c "no neighbor group1 dampening"' - ) - r_1.cmd( - 'vtysh -c "conf t" -c "router bgp 65000" -c "address-family ipv4 unicast" -c "no neighbor 192.168.101.3 dampening"' - ) - r_1.cmd( - 'vtysh -c "conf t" -c "router bgp 65000" -c "address-family ipv4 unicast" -c "no neighbor 192.168.101.5 dampening"' - ) - - # Remove ExaBGP peers from configuration of router r1 - logger.info("Removing ExaBGP peers from configuration of router r1") - for router_num in range(3, 7): - r_1.cmd( - 'vtysh -c "conf t" -c "router bgp 65000" -c "no neighbor 192.168.101.{}"'.format( - router_num - ) - ) - - # Remove peer-group group1 from configuration of router r1 - logger.info("Removing peer-group group1 peers from configuration of router r1") - r_1.cmd( - 'vtysh -c "conf t" -c "router bgp 65000" -c "no neighbor group1 peer-group"' - ) - - # Stop ExaBGP peers and remove associated named pipes - logger.info("Stopping ExaBGP peers and removing associated named pipes") - for peer_num in range(1, 5): - logger.info("Terminating ExaBGP on peer peer{}".format(peer_num)) - peer = tgen.gears["peer{}".format(peer_num)] - logger.info("Removing named pipe of ExaBGP peer peer{}".format(peer_num)) - fifo_in = "/var/run/exabgp_peer{}.in".format(peer_num) - peer.stop() - if os.path.exists(fifo_in): - os.remove(fifo_in) - - # end test_bgp_dampening_cleanup - - -def test_bgp_dampening_aftermath(): - "BGP route-flap dampening aftermath test" - - # This test verifies routers r1 and r2 not being affected by the route-flap - # dampening test series. - - tgen = get_topogen() - - # Skip if previous fatal error condition is raised - if tgen.routers_have_failure(): - pytest.skip(tgen.errors) - - # Check BGP Summary on routers r1 and r2 - for rtr_num in [1, 2]: - logger.info( - "Checking if BGP router on r{} remains unaffected by route-flap dampening tests".format( - rtr_num - ) - ) - router = tgen.gears["r{}".format(rtr_num)] - reffile = os.path.join(CWD, "r{}/show_bgp.json".format(rtr_num)) - expected = json.loads(open(reffile).read()) - test_func = functools.partial( - topotest.router_json_cmp, router, "show ip bgp json", expected - ) - _, res = topotest.run_and_expect(test_func, None, count=10, wait=2) - assertmsg = "BGP routes on router r{} are wrong after route-flap dampening tests".format( - rtr_num - ) - assert res is None, assertmsg - - # end test_bgp_dampening_aftermath - - if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_gr_functionality_topo1/test_bgp_gr_functionality_topo1.py b/tests/topotests/bgp_gr_functionality_topo1/test_bgp_gr_functionality_topo1.py index ee57b9c479..330ae5e437 100644 --- a/tests/topotests/bgp_gr_functionality_topo1/test_bgp_gr_functionality_topo1.py +++ b/tests/topotests/bgp_gr_functionality_topo1/test_bgp_gr_functionality_topo1.py @@ -139,6 +139,9 @@ from lib.common_config import ( required_linux_kernel_version, ) +pytestmark = [pytest.mark.bgpd] + + # Reading the data from JSON File for topology and configuration creation jsonFile = "{}/bgp_gr_topojson_topo1.json".format(CWD) try: diff --git a/tests/topotests/bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2.py b/tests/topotests/bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2.py index b6f8bf4cd9..e7ce216042 100644 --- a/tests/topotests/bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2.py +++ b/tests/topotests/bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2.py @@ -138,6 +138,9 @@ from lib.common_config import ( required_linux_kernel_version, ) +pytestmark = [pytest.mark.bgpd] + + # Reading the data from JSON File for topology and configuration creation jsonFile = "{}/bgp_gr_topojson_topo2.json".format(CWD) try: diff --git a/tests/topotests/bgp_gshut/test_bgp_gshut.py b/tests/topotests/bgp_gshut/test_bgp_gshut.py index fe945a4565..77f86a0bb8 100644 --- a/tests/topotests/bgp_gshut/test_bgp_gshut.py +++ b/tests/topotests/bgp_gshut/test_bgp_gshut.py @@ -75,6 +75,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo +pytestmark = [pytest.mark.bgpd] + class TemplateTopo(Topo): def build(self, *_args, **_opts): diff --git a/tests/topotests/bgp_gshut_topo1/test_ebgp_gshut_topo1.py b/tests/topotests/bgp_gshut_topo1/test_ebgp_gshut_topo1.py index 868aec9f3e..fcfeaab613 100644 --- a/tests/topotests/bgp_gshut_topo1/test_ebgp_gshut_topo1.py +++ b/tests/topotests/bgp_gshut_topo1/test_ebgp_gshut_topo1.py @@ -75,6 +75,9 @@ from lib.bgp import ( ) from lib.topojson import build_topo_from_json, build_config_from_json +pytestmark = [pytest.mark.bgpd, pytest.mark.staticd] + + # Reading the data from JSON File for topology and configuration creation jsonFile = "{}/ebgp_gshut_topo1.json".format(CWD) try: diff --git a/tests/topotests/bgp_gshut_topo1/test_ibgp_gshut_topo1.py b/tests/topotests/bgp_gshut_topo1/test_ibgp_gshut_topo1.py index 69f4916374..d83e9e25a1 100644 --- a/tests/topotests/bgp_gshut_topo1/test_ibgp_gshut_topo1.py +++ b/tests/topotests/bgp_gshut_topo1/test_ibgp_gshut_topo1.py @@ -75,6 +75,9 @@ from lib.bgp import ( ) from lib.topojson import build_topo_from_json, build_config_from_json +pytestmark = [pytest.mark.bgpd, pytest.mark.staticd] + + # Reading the data from JSON File for topology and configuration creation jsonFile = "{}/ibgp_gshut_topo1.json".format(CWD) try: diff --git a/tests/topotests/bgp_large_community/test_bgp_large_community_topo_1.py b/tests/topotests/bgp_large_community/test_bgp_large_community_topo_1.py index 31fbdcd4b5..69eba23e0f 100644 --- a/tests/topotests/bgp_large_community/test_bgp_large_community_topo_1.py +++ b/tests/topotests/bgp_large_community/test_bgp_large_community_topo_1.py @@ -73,6 +73,9 @@ from lib.topolog import logger from lib.bgp import verify_bgp_convergence, create_router_bgp, clear_bgp_and_verify from lib.topojson import build_topo_from_json, build_config_from_json +pytestmark = [pytest.mark.bgpd] + + # Save the Current Working Directory to find configuration files. CWD = os_path.dirname(os_path.realpath(__file__)) sys.path.append(os_path.join(CWD, "../")) diff --git a/tests/topotests/bgp_large_community/test_bgp_large_community_topo_2.py b/tests/topotests/bgp_large_community/test_bgp_large_community_topo_2.py index 84d9c48f35..b033c7e5cd 100644 --- a/tests/topotests/bgp_large_community/test_bgp_large_community_topo_2.py +++ b/tests/topotests/bgp_large_community/test_bgp_large_community_topo_2.py @@ -97,6 +97,9 @@ from lib.topolog import logger from lib.bgp import verify_bgp_convergence, create_router_bgp, clear_bgp_and_verify from lib.topojson import build_topo_from_json, build_config_from_json +pytestmark = [pytest.mark.bgpd] + + # Reading the data from JSON File for topology and configuration creation jsonFile = "{}/bgp_large_community_topo_2.json".format(CWD) diff --git a/tests/topotests/bgp_link_bw_ip/test_bgp_linkbw_ip.py b/tests/topotests/bgp_link_bw_ip/test_bgp_linkbw_ip.py index f09ff20651..3fcc3bec9a 100644 --- a/tests/topotests/bgp_link_bw_ip/test_bgp_linkbw_ip.py +++ b/tests/topotests/bgp_link_bw_ip/test_bgp_linkbw_ip.py @@ -46,6 +46,9 @@ from lib.topolog import logger # Required to instantiate the topology builder class. from mininet.topo import Topo +pytestmark = [pytest.mark.bgpd] + + """ This topology is for validating one of the primary use cases for weighted ECMP (a.k.a. Unequal cost multipath) using BGP link-bandwidth: diff --git a/tests/topotests/bgp_listen_on_multiple_addresses/test_bgp_listen_on_multiple_addresses.py b/tests/topotests/bgp_listen_on_multiple_addresses/test_bgp_listen_on_multiple_addresses.py index a3ca1408e2..a7959fe61b 100755 --- a/tests/topotests/bgp_listen_on_multiple_addresses/test_bgp_listen_on_multiple_addresses.py +++ b/tests/topotests/bgp_listen_on_multiple_addresses/test_bgp_listen_on_multiple_addresses.py @@ -50,11 +50,14 @@ sys.path.append(os.path.join(CWD, "../")) from lib.topogen import Topogen, get_topogen from lib.topojson import build_topo_from_json, build_config_from_json +from lib.topojson import linux_intf_config_from_json from lib.common_config import start_topology from lib.topotest import router_json_cmp, run_and_expect from mininet.topo import Topo from functools import partial +pytestmark = [pytest.mark.bgpd] + LISTEN_ADDRESSES = { "r1": ["10.0.0.1"], @@ -94,6 +97,9 @@ def setup_module(mod): ) start_topology(tgen) + + linux_intf_config_from_json(tgen, topo) + build_config_from_json(tgen, topo) diff --git a/tests/topotests/bgp_local_as_private_remove/test_bgp_local_as_private_remove.py b/tests/topotests/bgp_local_as_private_remove/test_bgp_local_as_private_remove.py index 32e7a4df61..7c5ed87dd0 100644 --- a/tests/topotests/bgp_local_as_private_remove/test_bgp_local_as_private_remove.py +++ b/tests/topotests/bgp_local_as_private_remove/test_bgp_local_as_private_remove.py @@ -43,6 +43,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo +pytestmark = [pytest.mark.bgpd] + class TemplateTopo(Topo): def build(self, *_args, **_opts): diff --git a/tests/topotests/bgp_maximum_prefix_invalid_update/test_bgp_maximum_prefix_invalid_update.py b/tests/topotests/bgp_maximum_prefix_invalid_update/test_bgp_maximum_prefix_invalid_update.py index 8494653dfe..0fde32a68b 100644 --- a/tests/topotests/bgp_maximum_prefix_invalid_update/test_bgp_maximum_prefix_invalid_update.py +++ b/tests/topotests/bgp_maximum_prefix_invalid_update/test_bgp_maximum_prefix_invalid_update.py @@ -47,6 +47,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo +pytestmark = [pytest.mark.bgpd] + class TemplateTopo(Topo): def build(self, *_args, **_opts): diff --git a/tests/topotests/bgp_maximum_prefix_out/test_bgp_maximum_prefix_out.py b/tests/topotests/bgp_maximum_prefix_out/test_bgp_maximum_prefix_out.py index b99664e700..5c93910788 100644 --- a/tests/topotests/bgp_maximum_prefix_out/test_bgp_maximum_prefix_out.py +++ b/tests/topotests/bgp_maximum_prefix_out/test_bgp_maximum_prefix_out.py @@ -43,6 +43,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo +pytestmark = [pytest.mark.bgpd] + class TemplateTopo(Topo): def build(self, *_args, **_opts): diff --git a/tests/topotests/bgp_multi_vrf_topo1/test_bgp_multi_vrf_topo1.py b/tests/topotests/bgp_multi_vrf_topo1/test_bgp_multi_vrf_topo1.py index 7ea5a24fd7..c9a93bd75f 100644 --- a/tests/topotests/bgp_multi_vrf_topo1/test_bgp_multi_vrf_topo1.py +++ b/tests/topotests/bgp_multi_vrf_topo1/test_bgp_multi_vrf_topo1.py @@ -145,6 +145,7 @@ from lib.bgp import ( ) from lib.topojson import build_topo_from_json, build_config_from_json +pytestmark = [pytest.mark.bgpd, pytest.mark.staticd] # Reading the data from JSON File for topology creation jsonFile = "{}/bgp_multi_vrf_topo1.json".format(CWD) diff --git a/tests/topotests/bgp_multiview_topo1/test_bgp_multiview_topo1.py b/tests/topotests/bgp_multiview_topo1/test_bgp_multiview_topo1.py index d8815a0d39..01e90fb4b8 100644 --- a/tests/topotests/bgp_multiview_topo1/test_bgp_multiview_topo1.py +++ b/tests/topotests/bgp_multiview_topo1/test_bgp_multiview_topo1.py @@ -81,6 +81,10 @@ from functools import partial sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from lib import topotest + +pytestmark = [pytest.mark.bgpd] + + fatal_error = "" diff --git a/tests/topotests/bgp_path_attributes_topo1/test_bgp_path_attributes.py b/tests/topotests/bgp_path_attributes_topo1/test_bgp_path_attributes.py index a9541a55c5..a591c2f3f4 100644 --- a/tests/topotests/bgp_path_attributes_topo1/test_bgp_path_attributes.py +++ b/tests/topotests/bgp_path_attributes_topo1/test_bgp_path_attributes.py @@ -94,6 +94,9 @@ from lib.bgp import ( ) from lib.topojson import build_topo_from_json, build_config_from_json +pytestmark = [pytest.mark.bgpd, pytest.mark.staticd] + + # Reading the data from JSON File for topology creation jsonFile = "{}/bgp_path_attributes.json".format(CWD) diff --git a/tests/topotests/bgp_peer_type_multipath_relax/test_bgp_peer-type_multipath-relax.py b/tests/topotests/bgp_peer_type_multipath_relax/test_bgp_peer-type_multipath-relax.py index 39a0beeb11..743fcf7b3a 100755 --- a/tests/topotests/bgp_peer_type_multipath_relax/test_bgp_peer-type_multipath-relax.py +++ b/tests/topotests/bgp_peer_type_multipath_relax/test_bgp_peer-type_multipath-relax.py @@ -73,6 +73,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo +pytestmark = [pytest.mark.bgpd, pytest.mark.staticd] + class PeerTypeRelaxTopo(Topo): def build(self, *_args, **_opts): diff --git a/tests/topotests/bgp_prefix_list_topo1/test_prefix_lists.py b/tests/topotests/bgp_prefix_list_topo1/test_prefix_lists.py index 22952f645c..10dee0f77b 100644 --- a/tests/topotests/bgp_prefix_list_topo1/test_prefix_lists.py +++ b/tests/topotests/bgp_prefix_list_topo1/test_prefix_lists.py @@ -73,6 +73,9 @@ from lib.topolog import logger from lib.bgp import verify_bgp_convergence, create_router_bgp, clear_bgp_and_verify from lib.topojson import build_topo_from_json, build_config_from_json +pytestmark = [pytest.mark.bgpd] + + # Reading the data from JSON File for topology creation jsonFile = "{}/prefix_lists.json".format(CWD) diff --git a/tests/topotests/bgp_prefix_sid/test_bgp_prefix_sid.py b/tests/topotests/bgp_prefix_sid/test_bgp_prefix_sid.py index ceac84709b..fffe135b77 100644 --- a/tests/topotests/bgp_prefix_sid/test_bgp_prefix_sid.py +++ b/tests/topotests/bgp_prefix_sid/test_bgp_prefix_sid.py @@ -41,6 +41,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo +pytestmark = [pytest.mark.bgpd] + class TemplateTopo(Topo): def build(self, **_opts): diff --git a/tests/topotests/bgp_prefix_sid2/test_bgp_prefix_sid2.py b/tests/topotests/bgp_prefix_sid2/test_bgp_prefix_sid2.py index 25362530d4..703dcd7e2d 100755 --- a/tests/topotests/bgp_prefix_sid2/test_bgp_prefix_sid2.py +++ b/tests/topotests/bgp_prefix_sid2/test_bgp_prefix_sid2.py @@ -41,6 +41,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo +pytestmark = [pytest.mark.bgpd] + class TemplateTopo(Topo): def build(self, **_opts): diff --git a/tests/topotests/bgp_reject_as_sets/test_bgp_reject_as_sets.py b/tests/topotests/bgp_reject_as_sets/test_bgp_reject_as_sets.py index d514dccd4a..c644d2104f 100644 --- a/tests/topotests/bgp_reject_as_sets/test_bgp_reject_as_sets.py +++ b/tests/topotests/bgp_reject_as_sets/test_bgp_reject_as_sets.py @@ -50,6 +50,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo +pytestmark = [pytest.mark.bgpd] + class TemplateTopo(Topo): def build(self, *_args, **_opts): diff --git a/tests/topotests/bgp_route_aggregation/test_bgp_aggregation.py b/tests/topotests/bgp_route_aggregation/test_bgp_aggregation.py index 0467bf1bfb..ecf1ed521c 100644 --- a/tests/topotests/bgp_route_aggregation/test_bgp_aggregation.py +++ b/tests/topotests/bgp_route_aggregation/test_bgp_aggregation.py @@ -70,6 +70,9 @@ from lib.bgp import ( ) from lib.topojson import build_topo_from_json, build_config_from_json +pytestmark = [pytest.mark.bgpd, pytest.mark.staticd] + + # Reading the data from JSON File for topology and configuration creation jsonFile = "{}/bgp_aggregation.json".format(CWD) try: diff --git a/tests/topotests/bgp_route_map/test_route_map_topo1.py b/tests/topotests/bgp_route_map/test_route_map_topo1.py index 74172501db..7de56849c8 100644 --- a/tests/topotests/bgp_route_map/test_route_map_topo1.py +++ b/tests/topotests/bgp_route_map/test_route_map_topo1.py @@ -67,6 +67,9 @@ from lib.bgp import ( ) from lib.topojson import build_topo_from_json, build_config_from_json +pytestmark = [pytest.mark.bgpd, pytest.mark.staticd] + + ################################# # TOPOLOGY ################################# diff --git a/tests/topotests/bgp_route_map/test_route_map_topo2.py b/tests/topotests/bgp_route_map/test_route_map_topo2.py index 958eceba62..230a89ace1 100644 --- a/tests/topotests/bgp_route_map/test_route_map_topo2.py +++ b/tests/topotests/bgp_route_map/test_route_map_topo2.py @@ -149,6 +149,9 @@ from lib.bgp import ( ) from lib.topojson import build_topo_from_json, build_config_from_json +pytestmark = [pytest.mark.bgpd, pytest.mark.staticd] + + # Reading the data from JSON File for topology and configuration creation jsonFile = "{}/bgp_route_map_topo2.json".format(CWD) diff --git a/tests/topotests/bgp_rr_ibgp/test_bgp_rr_ibgp_topo1.py b/tests/topotests/bgp_rr_ibgp/test_bgp_rr_ibgp_topo1.py index 6a604765ca..664c9dc91a 100644 --- a/tests/topotests/bgp_rr_ibgp/test_bgp_rr_ibgp_topo1.py +++ b/tests/topotests/bgp_rr_ibgp/test_bgp_rr_ibgp_topo1.py @@ -49,6 +49,9 @@ from lib.topolog import logger # Required to instantiate the topology builder class. from mininet.topo import Topo +pytestmark = [pytest.mark.bgpd] + + ##################################################### ## ## Network Topology Definition diff --git a/tests/topotests/bgp_set_local_preference_add_subtract/test_bgp_set_local-preference_add_subtract.py b/tests/topotests/bgp_set_local_preference_add_subtract/test_bgp_set_local-preference_add_subtract.py index af64648951..b4af911d91 100644 --- a/tests/topotests/bgp_set_local_preference_add_subtract/test_bgp_set_local-preference_add_subtract.py +++ b/tests/topotests/bgp_set_local_preference_add_subtract/test_bgp_set_local-preference_add_subtract.py @@ -44,6 +44,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo +pytestmark = [pytest.mark.bgpd] + class TemplateTopo(Topo): def build(self, *_args, **_opts): diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/test_bgp_srv6l3vpn_to_bgp_vrf.py b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/test_bgp_srv6l3vpn_to_bgp_vrf.py index 2d80c66b0b..3251484514 100755 --- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/test_bgp_srv6l3vpn_to_bgp_vrf.py +++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/test_bgp_srv6l3vpn_to_bgp_vrf.py @@ -39,6 +39,8 @@ from lib.topolog import logger from lib.common_config import required_linux_kernel_version from mininet.topo import Topo +pytestmark = [pytest.mark.bgpd] + class Topology(Topo): """ diff --git a/tests/topotests/bgp_suppress_fib/test_bgp_suppress_fib.py b/tests/topotests/bgp_suppress_fib/test_bgp_suppress_fib.py index c75055c26f..476f6b556b 100644 --- a/tests/topotests/bgp_suppress_fib/test_bgp_suppress_fib.py +++ b/tests/topotests/bgp_suppress_fib/test_bgp_suppress_fib.py @@ -40,6 +40,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo +pytestmark = [pytest.mark.bgpd] + class TemplateTopo(Topo): def build(self, *_args, **_opts): diff --git a/tests/topotests/bgp_tcp_mss/test_bgp_tcp_mss.py b/tests/topotests/bgp_tcp_mss/test_bgp_tcp_mss.py index 7500c3b3ad..cb1d28cc06 100644 --- a/tests/topotests/bgp_tcp_mss/test_bgp_tcp_mss.py +++ b/tests/topotests/bgp_tcp_mss/test_bgp_tcp_mss.py @@ -51,6 +51,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo +pytestmark = [pytest.mark.bgpd] + class TemplateTopo(Topo): def build(self, *_args, **_opts): diff --git a/tests/topotests/bgp_update_delay/test_bgp_update_delay.py b/tests/topotests/bgp_update_delay/test_bgp_update_delay.py index 71bd58bf73..2972a25f38 100644 --- a/tests/topotests/bgp_update_delay/test_bgp_update_delay.py +++ b/tests/topotests/bgp_update_delay/test_bgp_update_delay.py @@ -73,6 +73,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo +pytestmark = [pytest.mark.bgpd] + class TemplateTopo(Topo): def build(self, *_args, **_opts): diff --git a/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo1.py b/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo1.py index 83682fb36d..d6f1058a98 100644 --- a/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo1.py +++ b/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo1.py @@ -81,6 +81,9 @@ from lib.bgp import ( ) from lib.topojson import build_topo_from_json, build_config_from_json +pytestmark = [pytest.mark.bgpd, pytest.mark.staticd] + + # Reading the data from JSON File for topology creation jsonFile = "{}/bgp_vrf_dynamic_route_leak_topo1.json".format(CWD) try: diff --git a/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo2.py b/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo2.py index 6e7495d929..f701529b52 100644 --- a/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo2.py +++ b/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo2.py @@ -78,6 +78,9 @@ from lib.bgp import ( ) from lib.topojson import build_topo_from_json, build_config_from_json +pytestmark = [pytest.mark.bgpd, pytest.mark.staticd] + + # Reading the data from JSON File for topology creation jsonFile = "{}/bgp_vrf_dynamic_route_leak_topo2.json".format(CWD) try: diff --git a/tests/topotests/bgp_vrf_lite_ipv6_rtadv/test_bgp_vrf_lite_ipv6_rtadv.py b/tests/topotests/bgp_vrf_lite_ipv6_rtadv/test_bgp_vrf_lite_ipv6_rtadv.py index a17819f747..57ba87e887 100644 --- a/tests/topotests/bgp_vrf_lite_ipv6_rtadv/test_bgp_vrf_lite_ipv6_rtadv.py +++ b/tests/topotests/bgp_vrf_lite_ipv6_rtadv/test_bgp_vrf_lite_ipv6_rtadv.py @@ -47,6 +47,8 @@ from lib.common_config import required_linux_kernel_version # Required to instantiate the topology builder class. from mininet.topo import Topo +pytestmark = [pytest.mark.bgpd] + class BGPIPV6RTADVVRFTopo(Topo): "Test topology builder" diff --git a/tests/topotests/bgp_vrf_netns/test_bgp_vrf_netns_topo.py b/tests/topotests/bgp_vrf_netns/test_bgp_vrf_netns_topo.py index 30bb9595b7..9889e1cdd5 100644 --- a/tests/topotests/bgp_vrf_netns/test_bgp_vrf_netns_topo.py +++ b/tests/topotests/bgp_vrf_netns/test_bgp_vrf_netns_topo.py @@ -44,6 +44,9 @@ from lib.topolog import logger # Required to instantiate the topology builder class. from mininet.topo import Topo +pytestmark = [pytest.mark.bgpd] + + total_ebgp_peers = 1 CustomizeVrfWithNetns = True diff --git a/tests/topotests/bgp_vrf_route_leak_basic/test_bgp-vrf-route-leak-basic.py b/tests/topotests/bgp_vrf_route_leak_basic/test_bgp-vrf-route-leak-basic.py index 71f64e9b70..fcec0c23af 100644 --- a/tests/topotests/bgp_vrf_route_leak_basic/test_bgp-vrf-route-leak-basic.py +++ b/tests/topotests/bgp_vrf_route_leak_basic/test_bgp-vrf-route-leak-basic.py @@ -41,6 +41,8 @@ from lib.topolog import logger from mininet.topo import Topo +pytestmark = [pytest.mark.bgpd] + class BGPVRFTopo(Topo): def build(self, *_args, **_opts): diff --git a/tests/topotests/conftest.py b/tests/topotests/conftest.py index e57db7471c..d119b0931b 100755 --- a/tests/topotests/conftest.py +++ b/tests/topotests/conftest.py @@ -26,6 +26,12 @@ def pytest_addoption(parser): only run the setup_module() to setup the topology without running any tests. """ parser.addoption( + "--asan-abort", + action="store_true", + help="Configure address sanitizer to abort process on error", + ) + + parser.addoption( "--gdb-breakpoints", metavar="SYMBOL[,SYMBOL...]", help="Comma-separated list of functions to set gdb breakpoints on", @@ -68,6 +74,12 @@ def pytest_addoption(parser): ) parser.addoption( + "--strace-daemons", + metavar="DAEMON[,DAEMON...]", + help="Comma-separated list of daemons to strace, or 'all'", + ) + + parser.addoption( "--topology-only", action="store_true", default=False, @@ -167,6 +179,9 @@ def pytest_configure(config): if not diagnose_env(): pytest.exit("environment has errors, please read the logs") + asan_abort = config.getoption("--asan-abort") + topotest_extra_config["asan_abort"] = asan_abort + gdb_routers = config.getoption("--gdb-routers") gdb_routers = gdb_routers.split(",") if gdb_routers else [] topotest_extra_config["gdb_routers"] = gdb_routers @@ -185,6 +200,9 @@ def pytest_configure(config): shell = config.getoption("--shell") topotest_extra_config["shell"] = shell.split(",") if shell else [] + strace = config.getoption("--strace-daemons") + topotest_extra_config["strace_daemons"] = strace.split(",") if strace else [] + pause_after = config.getoption("--pause-after") shell_on_error = config.getoption("--shell-on-error") @@ -244,6 +262,11 @@ def pytest_runtest_makereport(item, call): ) ) + # We want to pause, if requested, on any error not just test cases + # (e.g., call.when == "setup") + if not pause: + pause = topotest_extra_config["pause_after"] + # (topogen) Set topology error to avoid advancing in the test. tgen = get_topogen() if tgen is not None: diff --git a/tests/topotests/docker/frr-topotests.sh b/tests/topotests/docker/frr-topotests.sh index 9ef59b3bbc..1eaaea2971 100755 --- a/tests/topotests/docker/frr-topotests.sh +++ b/tests/topotests/docker/frr-topotests.sh @@ -145,7 +145,15 @@ if [ "${TOPOTEST_PULL:-1}" = "1" ]; then docker pull frrouting/topotests:latest fi +if [[ -n "$TMUX" ]]; then + TMUX_OPTIONS="-v $(dirname $TMUX):$(dirname $TMUX) -e TMUX=$TMUX -e TMUX_PANE=$TMUX_PANE" +fi + +if [[ -n "$STY" ]]; then + SCREEN_OPTIONS="-v /run/screen:/run/screen -e STY=$STY" +fi set -- --rm -i \ + -v "$HOME:$HOME:ro" \ -v "$TOPOTEST_LOGS:/tmp" \ -v "$TOPOTEST_FRR:/root/host-frr:ro" \ -v "$TOPOTEST_BUILDCACHE:/root/persist" \ @@ -154,6 +162,8 @@ set -- --rm -i \ -e "TOPOTEST_DOC=$TOPOTEST_DOC" \ -e "TOPOTEST_SANITIZER=$TOPOTEST_SANITIZER" \ --privileged \ + $SCREEN_OPTINS \ + $TMUX_OPTIONS \ $TOPOTEST_OPTIONS \ frrouting/topotests:latest "$@" diff --git a/tests/topotests/evpn_pim_1/leaf1/pimd.conf b/tests/topotests/evpn_pim_1/leaf1/pimd.conf index 293e252086..d85f33d1fc 100644 --- a/tests/topotests/evpn_pim_1/leaf1/pimd.conf +++ b/tests/topotests/evpn_pim_1/leaf1/pimd.conf @@ -2,6 +2,7 @@ debug pim events debug pim nht debug pim zebra ip pim rp 192.168.100.1 +ip pim join-prune-interval 5 ! int lo ip pim diff --git a/tests/topotests/evpn_pim_1/leaf2/pimd.conf b/tests/topotests/evpn_pim_1/leaf2/pimd.conf index 08d5a19a2a..d775b800b3 100644 --- a/tests/topotests/evpn_pim_1/leaf2/pimd.conf +++ b/tests/topotests/evpn_pim_1/leaf2/pimd.conf @@ -1,4 +1,5 @@ ip pim rp 192.168.100.1 +ip pim join-prune-interval 5 ! int lo ip pim diff --git a/tests/topotests/evpn_pim_1/spine/pimd.conf b/tests/topotests/evpn_pim_1/spine/pimd.conf index 56adda5cc4..12c6d6f85c 100644 --- a/tests/topotests/evpn_pim_1/spine/pimd.conf +++ b/tests/topotests/evpn_pim_1/spine/pimd.conf @@ -1,4 +1,5 @@ ip pim rp 192.168.100.1 +ip pim join-prune-interval 5 ! int lo ip pim diff --git a/tests/topotests/evpn_pim_1/test_evpn_pim_topo1.py b/tests/topotests/evpn_pim_1/test_evpn_pim_topo1.py index 260a197aca..b1f5daef1e 100644 --- a/tests/topotests/evpn_pim_1/test_evpn_pim_topo1.py +++ b/tests/topotests/evpn_pim_1/test_evpn_pim_topo1.py @@ -49,6 +49,9 @@ from lib.topolog import logger # Required to instantiate the topology builder class. from mininet.topo import Topo +pytestmark = [pytest.mark.bgpd, pytest.mark.bgpd] + + ##################################################### ## ## Network Topology Definition diff --git a/tests/topotests/evpn_type5_test_topo1/evpn_type5_topo1.json b/tests/topotests/evpn_type5_test_topo1/evpn_type5_topo1.json index 14842da326..dd412708bb 100644 --- a/tests/topotests/evpn_type5_test_topo1/evpn_type5_topo1.json +++ b/tests/topotests/evpn_type5_test_topo1/evpn_type5_topo1.json @@ -41,7 +41,10 @@ "neighbor": { "e1": { "dest_link": { - "r1": {} + "r1": { + "keepalivetimer": 1, + "holddowntimer": 3 + } } } } @@ -55,7 +58,10 @@ "neighbor": { "e1": { "dest_link": { - "r1": {} + "r1": { + "keepalivetimer": 1, + "holddowntimer": 3 + } } } } @@ -106,7 +112,10 @@ "neighbor": { "e1": { "dest_link": { - "r2-link1": {} + "r2-link1": { + "keepalivetimer": 1, + "holddowntimer": 3 + } } } } @@ -120,7 +129,10 @@ "neighbor": { "e1": { "dest_link": { - "r2-link1": {} + "r2-link1": { + "keepalivetimer": 1, + "holddowntimer": 3 + } } } } @@ -140,7 +152,10 @@ "neighbor": { "e1": { "dest_link": { - "r2-link2": {} + "r2-link2": { + "keepalivetimer": 1, + "holddowntimer": 3 + } } } } @@ -154,7 +169,10 @@ "neighbor": { "e1": { "dest_link": { - "r2-link2": {} + "r2-link2": { + "keepalivetimer": 1, + "holddowntimer": 3 + } } } } @@ -222,7 +240,10 @@ "neighbor": { "r1": { "dest_link": { - "e1": {} + "e1": { + "keepalivetimer": 1, + "holddowntimer": 3 + } } } } @@ -233,7 +254,10 @@ "neighbor": { "r1": { "dest_link": { - "e1": {} + "e1": { + "keepalivetimer": 1, + "holddowntimer": 3 + } } } } @@ -262,7 +286,10 @@ "neighbor": { "r2": { "dest_link": { - "e1-link1": {} + "e1-link1": { + "keepalivetimer": 1, + "holddowntimer": 3 + } } } } @@ -273,7 +300,10 @@ "neighbor": { "r2": { "dest_link": { - "e1-link1": {} + "e1-link1": { + "keepalivetimer": 1, + "holddowntimer": 3 + } } } } @@ -302,7 +332,10 @@ "neighbor": { "r2": { "dest_link": { - "e1-link2": {} + "e1-link2": { + "keepalivetimer": 1, + "holddowntimer": 3 + } } } } @@ -313,7 +346,10 @@ "neighbor": { "r2": { "dest_link": { - "e1-link2": {} + "e1-link2": { + "keepalivetimer": 1, + "holddowntimer": 3 + } } } } @@ -342,6 +378,8 @@ "d1": { "dest_link": { "e1-link1": { + "keepalivetimer": 1, + "holddowntimer": 3, "deactivate": "ipv4" } } @@ -349,6 +387,8 @@ "d2": { "dest_link": { "e1-link1": { + "keepalivetimer": 1, + "holddowntimer": 3, "deactivate": "ipv4" } } @@ -412,6 +452,8 @@ "e1": { "dest_link": { "d1-link1": { + "keepalivetimer": 1, + "holddowntimer": 3, "deactivate": "ipv4" } } @@ -442,7 +484,10 @@ "neighbor": { "r3": { "dest_link": { - "d1": {} + "d1": { + "keepalivetimer": 1, + "holddowntimer": 3 + } } } } @@ -453,7 +498,10 @@ "neighbor": { "r3": { "dest_link": { - "d1": {} + "d1": { + "keepalivetimer": 1, + "holddowntimer": 3 + } } } } @@ -482,7 +530,10 @@ "neighbor": { "r4": { "dest_link": { - "d1-link1": {} + "d1-link1": { + "keepalivetimer": 1, + "holddowntimer": 3 + } } } } @@ -493,7 +544,10 @@ "neighbor": { "r4": { "dest_link": { - "d1-link1": {} + "d1-link1": { + "keepalivetimer": 1, + "holddowntimer": 3 + } } } } @@ -522,7 +576,10 @@ "neighbor": { "r4": { "dest_link": { - "d1-link2": {} + "d1-link2": { + "keepalivetimer": 1, + "holddowntimer": 3 + } } } } @@ -533,7 +590,10 @@ "neighbor": { "r4": { "dest_link": { - "d1-link2": {} + "d1-link2": { + "keepalivetimer": 1, + "holddowntimer": 3 + } } } } @@ -590,7 +650,9 @@ "e1": { "dest_link": { "d2-link1": { - "deactivate": "ipv4" + "deactivate": "ipv4", + "keepalivetimer": 1, + "holddowntimer": 3 } } } @@ -620,7 +682,10 @@ "neighbor": { "r3": { "dest_link": { - "d2": {} + "d2": { + "keepalivetimer": 1, + "holddowntimer": 3 + } } } } @@ -631,7 +696,10 @@ "neighbor": { "r3": { "dest_link": { - "d2": {} + "d2": { + "keepalivetimer": 1, + "holddowntimer": 3 + } } } } @@ -660,7 +728,10 @@ "neighbor": { "r4": { "dest_link": { - "d2-link1": {} + "d2-link1": { + "keepalivetimer": 1, + "holddowntimer": 3 + } } } } @@ -671,7 +742,10 @@ "neighbor": { "r4": { "dest_link": { - "d2-link1": {} + "d2-link1": { + "keepalivetimer": 1, + "holddowntimer": 3 + } } } } @@ -700,7 +774,10 @@ "neighbor": { "r4": { "dest_link": { - "d2-link2": {} + "d2-link2": { + "keepalivetimer": 1, + "holddowntimer": 3 + } } } } @@ -711,7 +788,10 @@ "neighbor": { "r4": { "dest_link": { - "d2-link2": {} + "d2-link2": { + "keepalivetimer": 1, + "holddowntimer": 3 + } } } } @@ -755,12 +835,18 @@ "neighbor": { "d1": { "dest_link": { - "r3": {} + "r3": { + "keepalivetimer": 1, + "holddowntimer": 3 + } } }, "d2": { "dest_link": { - "r3": {} + "r3": { + "keepalivetimer": 1, + "holddowntimer": 3 + } } } } @@ -771,12 +857,18 @@ "neighbor": { "d1": { "dest_link": { - "r3": {} + "r3": { + "keepalivetimer": 1, + "holddowntimer": 3 + } } }, "d2": { "dest_link": { - "r3": {} + "r3": { + "keepalivetimer": 1, + "holddowntimer": 3 + } } } } @@ -814,12 +906,18 @@ "neighbor": { "d1": { "dest_link": { - "r4-link1": {} + "r4-link1": { + "keepalivetimer": 1, + "holddowntimer": 3 + } } }, "d2": { "dest_link": { - "r4-link1": {} + "r4-link1": { + "keepalivetimer": 1, + "holddowntimer": 3 + } } } } @@ -830,12 +928,18 @@ "neighbor": { "d1": { "dest_link": { - "r4-link1": {} + "r4-link1": { + "keepalivetimer": 1, + "holddowntimer": 3 + } } }, "d2": { "dest_link": { - "r4-link1": {} + "r4-link1": { + "keepalivetimer": 1, + "holddowntimer": 3 + } } } } @@ -852,12 +956,18 @@ "neighbor": { "d1": { "dest_link": { - "r4-link2": {} + "r4-link2": { + "keepalivetimer": 1, + "holddowntimer": 3 + } } }, "d2": { "dest_link": { - "r4-link2": {} + "r4-link2": { + "keepalivetimer": 1, + "holddowntimer": 3 + } } } } @@ -868,12 +978,18 @@ "neighbor": { "d1": { "dest_link": { - "r4-link2": {} + "r4-link2": { + "keepalivetimer": 1, + "holddowntimer": 3 + } } }, "d2": { "dest_link": { - "r4-link2": {} + "r4-link2": { + "keepalivetimer": 1, + "holddowntimer": 3 + } } } } @@ -885,3 +1001,4 @@ } } } + diff --git a/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_chaos_topo1.py b/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_chaos_topo1.py index 46e21857c8..09d66baa79 100644 --- a/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_chaos_topo1.py +++ b/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_chaos_topo1.py @@ -85,6 +85,9 @@ from lib.bgp import ( ) from lib.topojson import build_topo_from_json, build_config_from_json +pytestmark = [pytest.mark.bgpd, pytest.mark.staticd] + + # Reading the data from JSON File for topology creation jsonFile = "{}/evpn_type5_chaos_topo1.json".format(CWD) try: diff --git a/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py b/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py index 1a399ab32e..521f2335b4 100644 --- a/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py +++ b/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py @@ -91,6 +91,9 @@ from lib.bgp import ( ) from lib.topojson import build_topo_from_json, build_config_from_json +pytestmark = [pytest.mark.bgpd, pytest.mark.staticd] + + # Reading the data from JSON File for topology creation jsonFile = "{}/evpn_type5_topo1.json".format(CWD) try: diff --git a/tests/topotests/isis_lsp_bits_topo1/test_isis_lsp_bits_topo1.py b/tests/topotests/isis_lsp_bits_topo1/test_isis_lsp_bits_topo1.py index 27dc1073c6..70dcff035f 100755 --- a/tests/topotests/isis_lsp_bits_topo1/test_isis_lsp_bits_topo1.py +++ b/tests/topotests/isis_lsp_bits_topo1/test_isis_lsp_bits_topo1.py @@ -84,6 +84,9 @@ from lib.topolog import logger # Required to instantiate the topology builder class. from mininet.topo import Topo +pytestmark = [pytest.mark.isisd] + + # Global multi-dimensional dictionary containing all expected outputs outputs = {} diff --git a/tests/topotests/isis_rlfa_topo1/test_isis_rlfa_topo1.py b/tests/topotests/isis_rlfa_topo1/test_isis_rlfa_topo1.py index 9ad41c5934..ded1a4cc22 100755 --- a/tests/topotests/isis_rlfa_topo1/test_isis_rlfa_topo1.py +++ b/tests/topotests/isis_rlfa_topo1/test_isis_rlfa_topo1.py @@ -82,7 +82,7 @@ from lib.topolog import logger # Required to instantiate the topology builder class. from mininet.topo import Topo -pytestmark = [pytest.mark.isisd] +pytestmark = [pytest.mark.isisd, pytest.mark.ldpd] # Global multi-dimensional dictionary containing all expected outputs outputs = {} diff --git a/tests/topotests/isis_snmp/test_isis_snmp.py b/tests/topotests/isis_snmp/test_isis_snmp.py index 04e043847d..2cd07299b0 100755 --- a/tests/topotests/isis_snmp/test_isis_snmp.py +++ b/tests/topotests/isis_snmp/test_isis_snmp.py @@ -82,6 +82,8 @@ from lib.snmptest import SnmpTester # Required to instantiate the topology builder class. from mininet.topo import Topo +pytestmark = [pytest.mark.isisd, pytest.mark.ldpd, pytest.mark.snmp] + class TemplateTopo(Topo): "Test topology builder" diff --git a/tests/topotests/ldp_snmp/test_ldp_snmp_topo1.py b/tests/topotests/ldp_snmp/test_ldp_snmp_topo1.py index f47d906157..8052316d73 100644 --- a/tests/topotests/ldp_snmp/test_ldp_snmp_topo1.py +++ b/tests/topotests/ldp_snmp/test_ldp_snmp_topo1.py @@ -81,6 +81,7 @@ from lib.snmptest import SnmpTester # Required to instantiate the topology builder class. from mininet.topo import Topo +pytestmark = [pytest.mark.ldpd, pytest.mark.isisd, pytest.mark.snmp] class TemplateTopo(Topo): "Test topology builder" diff --git a/tests/topotests/ldp_sync_isis_topo1/test_ldp_sync_isis_topo1.py b/tests/topotests/ldp_sync_isis_topo1/test_ldp_sync_isis_topo1.py index 331e6fafd4..44b34c485f 100644 --- a/tests/topotests/ldp_sync_isis_topo1/test_ldp_sync_isis_topo1.py +++ b/tests/topotests/ldp_sync_isis_topo1/test_ldp_sync_isis_topo1.py @@ -80,6 +80,8 @@ from lib.topolog import logger # Required to instantiate the topology builder class. from mininet.topo import Topo +pytestmark = [pytest.mark.isisd, pytest.mark.ldpd] + class TemplateTopo(Topo): "Test topology builder" diff --git a/tests/topotests/lib/bgp.py b/tests/topotests/lib/bgp.py index a236a916b5..2f1f67439f 100644 --- a/tests/topotests/lib/bgp.py +++ b/tests/topotests/lib/bgp.py @@ -44,6 +44,7 @@ from lib.common_config import ( FRRCFG_FILE, retry, get_ipv6_linklocal_address, + get_frr_ipv6_linklocal ) LOGDIR = "/tmp/topotests/" @@ -265,6 +266,11 @@ def __create_bgp_global(tgen, input_dict, router, build=False): config_data.append("bgp router-id {}".format(router_id)) config_data.append("no bgp network import-check") + bgp_peer_grp_data = bgp_data.setdefault("peer-group", {}) + + if "peer-group" in bgp_data and bgp_peer_grp_data: + peer_grp_data = __create_bgp_peer_group(tgen, bgp_peer_grp_data, router) + config_data.extend(peer_grp_data) bst_path = bgp_data.setdefault("bestpath", None) if bst_path: @@ -380,6 +386,7 @@ def __create_bgp_unicast_neighbor( addr_data = addr_dict["unicast"] if addr_data: config_data.append("address-family {} unicast".format(addr_type)) + advertise_network = addr_data.setdefault("advertise_networks", []) for advertise_network_dict in advertise_network: network = advertise_network_dict["network"] @@ -404,14 +411,29 @@ def __create_bgp_unicast_neighbor( config_data.append(cmd) + import_cmd = addr_data.setdefault("import", {}) + if import_cmd: + try: + if import_cmd["delete"]: + config_data.append("no import vrf {}".format(import_cmd["vrf"])) + except KeyError: + config_data.append("import vrf {}".format(import_cmd["vrf"])) + max_paths = addr_data.setdefault("maximum_paths", {}) if max_paths: ibgp = max_paths.setdefault("ibgp", None) ebgp = max_paths.setdefault("ebgp", None) + del_cmd = max_paths.setdefault("delete", False) if ibgp: - config_data.append("maximum-paths ibgp {}".format(ibgp)) + if del_cmd: + config_data.append("no maximum-paths ibgp {}".format(ibgp)) + else: + config_data.append("maximum-paths ibgp {}".format(ibgp)) if ebgp: - config_data.append("maximum-paths {}".format(ebgp)) + if del_cmd: + config_data.append("no maximum-paths {}".format(ebgp)) + else: + config_data.append("maximum-paths {}".format(ebgp)) aggregate_addresses = addr_data.setdefault("aggregate_address", []) for aggregate_address in aggregate_addresses: @@ -649,6 +671,38 @@ def __create_l2vpn_evpn_address_family( return config_data +def __create_bgp_peer_group(topo, input_dict, router): + """ + Helper API to create neighbor specific configuration + + Parameters + ---------- + * `topo` : json file data + * `input_dict` : Input dict data, required when configuring from testcase + * `router` : router id to be configured + """ + config_data = [] + logger.debug("Entering lib API: __create_bgp_peer_group()") + + for grp, grp_dict in input_dict.items(): + config_data.append("neighbor {} peer-group".format(grp)) + neigh_cxt = "neighbor {} ".format(grp) + update_source = grp_dict.setdefault("update-source", None) + remote_as = grp_dict.setdefault("remote-as", None) + capability = grp_dict.setdefault("capability", None) + if update_source: + config_data.append("{} update-source {}".format(neigh_cxt, update_source)) + + if remote_as: + config_data.append("{} remote-as {}".format(neigh_cxt, remote_as)) + + if capability: + config_data.append("{} capability {}".format(neigh_cxt, capability)) + + logger.debug("Exiting lib API: __create_bgp_peer_group()") + return config_data + + def __create_bgp_neighbor(topo, input_dict, router, addr_type, add_neigh=True): """ Helper API to create neighbor specific configuration @@ -660,10 +714,9 @@ def __create_bgp_neighbor(topo, input_dict, router, addr_type, add_neigh=True): * `input_dict` : Input dict data, required when configuring from testcase * `router` : router id to be configured """ - config_data = [] logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) - + tgen = get_topogen() bgp_data = input_dict["address_family"] neigh_data = bgp_data[addr_type]["unicast"]["neighbor"] @@ -672,35 +725,91 @@ def __create_bgp_neighbor(topo, input_dict, router, addr_type, add_neigh=True): nh_details = topo[name] if "vrfs" in topo[router] or type(nh_details["bgp"]) is list: - remote_as = nh_details["bgp"][0]["local_as"] + for vrf_data in nh_details["bgp"]: + if "vrf" in nh_details["links"][dest_link] and "vrf" in vrf_data: + if nh_details["links"][dest_link]["vrf"] == vrf_data["vrf"]: + remote_as = vrf_data["local_as"] + break + else: + if "vrf" not in vrf_data: + remote_as = vrf_data["local_as"] + break + else: remote_as = nh_details["bgp"]["local_as"] update_source = None - if dest_link in nh_details["links"].keys(): - ip_addr = nh_details["links"][dest_link][addr_type].split("/")[0] - # Loopback interface - if "source_link" in peer and peer["source_link"] == "lo": - update_source = topo[router]["links"]["lo"][addr_type].split("/")[0] + if "neighbor_type" in peer and peer["neighbor_type"] == "unnumbered": + ip_addr = nh_details["links"][dest_link]["peer-interface"] + elif "neighbor_type" in peer and peer["neighbor_type"] == "link-local": + intf = topo[name]["links"][dest_link]["interface"] + ip_addr = get_frr_ipv6_linklocal(tgen, name, intf) + elif dest_link in nh_details["links"].keys(): + try: + ip_addr = nh_details["links"][dest_link][addr_type].split("/")[0] + except KeyError: + intf = topo[name]["links"][dest_link]["interface"] + ip_addr = get_frr_ipv6_linklocal(tgen, name, intf) + if "delete" in peer and peer["delete"]: + neigh_cxt = "no neighbor {}".format(ip_addr) + config_data.append("{}".format(neigh_cxt)) + return config_data + else: + neigh_cxt = "neighbor {}".format(ip_addr) - neigh_cxt = "neighbor {}".format(ip_addr) + if "peer-group" in peer: + config_data.append( + "neighbor {} interface peer-group {}".format( + ip_addr, peer["peer-group"] + ) + ) + + # Loopback interface + if "source_link" in peer: + if peer["source_link"] == "lo": + update_source = topo[router]["links"]["lo"][addr_type].split("/")[0] + else: + update_source = topo[router]["links"][peer["source_link"]][ + "interface" + ] + if "peer-group" not in peer: + if "neighbor_type" in peer and peer["neighbor_type"] == "unnumbered": + config_data.append( + "{} interface remote-as {}".format(neigh_cxt, remote_as) + ) + elif add_neigh: + config_data.append("{} remote-as {}".format(neigh_cxt, remote_as)) - if add_neigh: - config_data.append("{} remote-as {}".format(neigh_cxt, remote_as)) if addr_type == "ipv6": config_data.append("address-family ipv6 unicast") config_data.append("{} activate".format(neigh_cxt)) + if "neighbor_type" in peer and peer["neighbor_type"] == "link-local": + config_data.append( + "{} update-source {}".format( + neigh_cxt, nh_details["links"][dest_link]["peer-interface"] + ) + ) + config_data.append( + "{} interface {}".format( + neigh_cxt, nh_details["links"][dest_link]["peer-interface"] + ) + ) + disable_connected = peer.setdefault("disable_connected_check", False) keep_alive = peer.setdefault("keepalivetimer", 3) hold_down = peer.setdefault("holddowntimer", 10) password = peer.setdefault("password", None) no_password = peer.setdefault("no_password", None) + capability = peer.setdefault("capability", None) max_hop_limit = peer.setdefault("ebgp_multihop", 1) + graceful_restart = peer.setdefault("graceful-restart", None) graceful_restart_helper = peer.setdefault("graceful-restart-helper", None) graceful_restart_disable = peer.setdefault("graceful-restart-disable", None) + if capability: + config_data.append("{} capability {}".format(neigh_cxt, capability)) if update_source: config_data.append( @@ -718,7 +827,6 @@ def __create_bgp_neighbor(topo, input_dict, router, addr_type, add_neigh=True): config_data.append( "{} timers {} {}".format(neigh_cxt, keep_alive, hold_down) ) - if graceful_restart: config_data.append("{} graceful-restart".format(neigh_cxt)) elif graceful_restart == False: @@ -768,7 +876,7 @@ def __create_bgp_unicast_address_family( config_data = [] logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) - + tgen = get_topogen() bgp_data = input_dict["address_family"] neigh_data = bgp_data[addr_type]["unicast"]["neighbor"] @@ -784,16 +892,34 @@ def __create_bgp_unicast_address_family( for destRouterLink, data in sorted(nh_details["links"].items()): if "type" in data and data["type"] == "loopback": if dest_link == destRouterLink: - ip_addr = nh_details["links"][destRouterLink][ - addr_type - ].split("/")[0] + ip_addr = ( + nh_details["links"][destRouterLink][addr_type] + .split("/")[0] + .lower() + ) # Physical interface else: - if dest_link in nh_details["links"].keys(): - - ip_addr = nh_details["links"][dest_link][addr_type].split("/")[0] - if addr_type == "ipv4" and bgp_data["ipv6"]: + # check the neighbor type if un numbered nbr, use interface. + if "neighbor_type" in peer and peer["neighbor_type"] == "unnumbered": + ip_addr = nh_details["links"][dest_link]["peer-interface"] + elif "neighbor_type" in peer and peer["neighbor_type"] == "link-local": + intf = topo[peer_name]["links"][dest_link]["interface"] + ip_addr = get_frr_ipv6_linklocal(tgen, peer_name, intf) + elif dest_link in nh_details["links"].keys(): + try: + ip_addr = nh_details["links"][dest_link][addr_type].split("/")[ + 0 + ] + except KeyError: + intf = topo[peer_name]["links"][dest_link]["interface"] + ip_addr = get_frr_ipv6_linklocal(tgen, peer_name, intf) + if ( + addr_type == "ipv4" + and bgp_data["ipv6"] + and check_address_types("ipv6") + and "ipv6" in nh_details["links"][dest_link] + ): deactivate = nh_details["links"][dest_link]["ipv6"].split("/")[ 0 ] @@ -822,6 +948,7 @@ def __create_bgp_unicast_address_family( prefix_lists = peer.setdefault("prefix_lists", {}) route_maps = peer.setdefault("route_maps", {}) no_send_community = peer.setdefault("no_send_community", None) + capability = peer.setdefault("capability", None) allowas_in = peer.setdefault("allowas-in", None) # next-hop-self @@ -841,6 +968,11 @@ def __create_bgp_unicast_address_family( "no {} send-community {}".format(neigh_cxt, no_send_community) ) + # capability_ext_nh + if capability and addr_type == "ipv6": + config_data.append("address-family ipv4 unicast") + config_data.append("{} activate".format(neigh_cxt)) + if "allowas_in" in peer: allow_as_in = peer["allowas_in"] config_data.append("{} allowas-in {}".format(neigh_cxt, allow_as_in)) @@ -1067,33 +1199,37 @@ def verify_bgp_convergence(tgen, topo, dut=None, expected=True): API will verify if BGP is converged with in the given time frame. Running "show bgp summary json" command and verify bgp neighbor state is established, + Parameters ---------- * `tgen`: topogen object * `topo`: input json file data * `dut`: device under test - * `expected` : expected results from API, by-default True Usage ----- # To veriry is BGP is converged for all the routers used in topology results = verify_bgp_convergence(tgen, topo, dut="r1") + Returns ------- errormsg(str) or True """ - logger.debug("Entering lib API: verify_bgp_convergence()") + result = False + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + tgen = get_topogen() for router, rnode in tgen.routers().items(): - if dut is not None and dut != router: + if 'bgp' not in topo['routers'][router]: continue - if "bgp" not in topo["routers"][router]: + if dut is not None and dut != router: continue logger.info("Verifying BGP Convergence on router %s:", router) - show_bgp_json = run_frr_cmd(rnode, "show bgp vrf all summary json", isjson=True) + show_bgp_json = run_frr_cmd(rnode, "show bgp vrf all summary json", + isjson=True) # Verifying output dictionary show_bgp_json is empty or not if not bool(show_bgp_json): errormsg = "BGP is not running" @@ -1115,100 +1251,6 @@ def verify_bgp_convergence(tgen, topo, dut=None, expected=True): # To find neighbor ip type bgp_addr_type = bgp_data["address_family"] - if "ipv4" in bgp_addr_type or "ipv6" in bgp_addr_type: - for addr_type in bgp_addr_type.keys(): - if not check_address_types(addr_type): - continue - total_peer = 0 - - bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"] - - for bgp_neighbor in bgp_neighbors: - total_peer += len(bgp_neighbors[bgp_neighbor]["dest_link"]) - - for addr_type in bgp_addr_type.keys(): - if not check_address_types(addr_type): - continue - bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"] - - no_of_peer = 0 - for bgp_neighbor, peer_data in bgp_neighbors.items(): - for dest_link in peer_data["dest_link"].keys(): - data = topo["routers"][bgp_neighbor]["links"] - if dest_link in data: - peer_details = peer_data["dest_link"][dest_link] - # for link local neighbors - if ( - "neighbor_type" in peer_details - and peer_details["neighbor_type"] == "link-local" - ): - neighbor_ip = get_ipv6_linklocal_address( - topo["routers"], bgp_neighbor, dest_link - ) - elif "source_link" in peer_details: - neighbor_ip = topo["routers"][bgp_neighbor][ - "links" - ][peer_details["source_link"]][addr_type].split( - "/" - )[ - 0 - ] - elif ( - "neighbor_type" in peer_details - and peer_details["neighbor_type"] == "unnumbered" - ): - neighbor_ip = data[dest_link]["peer-interface"] - else: - neighbor_ip = data[dest_link][addr_type].split("/")[ - 0 - ] - nh_state = None - - if addr_type == "ipv4": - if "ipv4Unicast" in show_bgp_json[vrf]: - ipv4_data = show_bgp_json[vrf]["ipv4Unicast"][ - "peers" - ] - nh_state = ipv4_data[neighbor_ip]["state"] - else: - if "ipv6Unicast" in show_bgp_json[vrf]: - ipv6_data = show_bgp_json[vrf]["ipv6Unicast"][ - "peers" - ] - nh_state = ipv6_data[neighbor_ip]["state"] - if nh_state == "Established": - no_of_peer += 1 - - if "l2vpn" in bgp_addr_type: - if "neighbor" not in bgp_addr_type["l2vpn"]["evpn"]: - if no_of_peer == total_peer: - logger.info( - "[DUT: %s] VRF: %s, BGP is Converged for %s address-family", - router, - vrf, - addr_type, - ) - else: - errormsg = ( - "[DUT: %s] VRF: %s, BGP is not converged for %s address-family" - % (router, vrf, addr_type) - ) - return errormsg - else: - if no_of_peer == total_peer: - logger.info( - "[DUT: %s] VRF: %s, BGP is Converged for %s address-family", - router, - vrf, - addr_type, - ) - else: - errormsg = ( - "[DUT: %s] VRF: %s, BGP is not converged for %s address-family" - % (router, vrf, addr_type) - ) - return errormsg - if "l2vpn" in bgp_addr_type: total_evpn_peer = 0 @@ -1224,46 +1266,120 @@ def verify_bgp_convergence(tgen, topo, dut=None, expected=True): data = topo["routers"][bgp_neighbor]["links"] for dest_link in dest_link_dict.keys(): if dest_link in data: - peer_details = peer_data[_addr_type][dest_link] + peer_details = \ + peer_data[_addr_type][dest_link] - neighbor_ip = data[dest_link][_addr_type].split("/")[0] + neighbor_ip = \ + data[dest_link][_addr_type].split( + "/")[0] nh_state = None - if ( - "ipv4Unicast" in show_bgp_json[vrf] - or "ipv6Unicast" in show_bgp_json[vrf] - ): - errormsg = ( - "[DUT: %s] VRF: %s, " - "ipv4Unicast/ipv6Unicast" - " address-family present" - " under l2vpn" % (router, vrf) - ) + if "ipv4Unicast" in show_bgp_json[vrf] or \ + "ipv6Unicast" in show_bgp_json[vrf]: + errormsg = ("[DUT: %s] VRF: %s, " + "ipv4Unicast/ipv6Unicast" + " address-family present" + " under l2vpn" % (router, + vrf)) return errormsg - l2VpnEvpn_data = show_bgp_json[vrf]["l2VpnEvpn"][ - "peers" - ] - nh_state = l2VpnEvpn_data[neighbor_ip]["state"] + l2VpnEvpn_data = \ + show_bgp_json[vrf]["l2VpnEvpn"][ + "peers"] + nh_state = \ + l2VpnEvpn_data[neighbor_ip]["state"] if nh_state == "Established": no_of_evpn_peer += 1 if no_of_evpn_peer == total_evpn_peer: - logger.info( - "[DUT: %s] VRF: %s, BGP is Converged for " "epvn peers", - router, - vrf, - ) + logger.info("[DUT: %s] VRF: %s, BGP is Converged for " + "epvn peers", router, vrf) + result = True else: - errormsg = ( - "[DUT: %s] VRF: %s, BGP is not converged " - "for evpn peers" % (router, vrf) - ) + errormsg = ("[DUT: %s] VRF: %s, BGP is not converged " + "for evpn peers" % (router, vrf)) return errormsg + else: + total_peer = 0 + for addr_type in bgp_addr_type.keys(): + if not check_address_types(addr_type): + continue - logger.debug("Exiting API: verify_bgp_convergence()") - return True + bgp_neighbors = \ + bgp_addr_type[addr_type]["unicast"]["neighbor"] + + for bgp_neighbor in bgp_neighbors: + total_peer += \ + len(bgp_neighbors[bgp_neighbor]["dest_link"]) + + no_of_peer = 0 + for addr_type in bgp_addr_type.keys(): + if not check_address_types(addr_type): + continue + bgp_neighbors = \ + bgp_addr_type[addr_type]["unicast"]["neighbor"] + + for bgp_neighbor, peer_data in bgp_neighbors.items(): + for dest_link in peer_data["dest_link"].\ + keys(): + data = \ + topo["routers"][bgp_neighbor]["links"] + if dest_link in data: + peer_details = \ + peer_data['dest_link'][dest_link] + # for link local neighbors + if "neighbor_type" in peer_details and \ + peer_details["neighbor_type"] == \ + 'link-local': + intf = topo["routers"][bgp_neighbor][ + "links"][dest_link]["interface"] + neighbor_ip = get_frr_ipv6_linklocal( + tgen, bgp_neighbor, intf) + elif "source_link" in peer_details: + neighbor_ip = \ + topo["routers"][bgp_neighbor][ + "links"][peer_details[ + 'source_link']][ + addr_type].\ + split("/")[0] + elif "neighbor_type" in peer_details and \ + peer_details["neighbor_type"] == \ + 'unnumbered': + neighbor_ip = \ + data[dest_link]["peer-interface"] + else: + neighbor_ip = \ + data[dest_link][addr_type].split( + "/")[0] + nh_state = None + neighbor_ip = neighbor_ip.lower() + if addr_type == "ipv4": + ipv4_data = show_bgp_json[vrf][ + "ipv4Unicast"]["peers"] + nh_state = \ + ipv4_data[neighbor_ip]["state"] + else: + ipv6_data = show_bgp_json[vrf][ + "ipv6Unicast"]["peers"] + if neighbor_ip in ipv6_data: + nh_state = \ + ipv6_data[neighbor_ip]["state"] + + if nh_state == "Established": + no_of_peer += 1 + + if no_of_peer == total_peer and no_of_peer > 0: + logger.info("[DUT: %s] VRF: %s, BGP is Converged", + router, vrf) + result = True + else: + errormsg = ("[DUT: %s] VRF: %s, BGP is not converged" + % (router, vrf)) + return errormsg + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return result @retry(retry_timeout=16) diff --git a/tests/topotests/lib/common_config.py b/tests/topotests/lib/common_config.py index 9e38608631..22a678862a 100644 --- a/tests/topotests/lib/common_config.py +++ b/tests/topotests/lib/common_config.py @@ -22,18 +22,16 @@ from collections import OrderedDict from datetime import datetime, timedelta from time import sleep from copy import deepcopy -from subprocess import call -from subprocess import STDOUT as SUB_STDOUT -from subprocess import PIPE as SUB_PIPE -from subprocess import Popen from functools import wraps from re import search as re_search from tempfile import mkdtemp +import json import os import sys import traceback import socket +import subprocess import ipaddress import platform @@ -235,14 +233,12 @@ def run_frr_cmd(rnode, cmd, isjson=False): if True: if isjson: - logger.debug(ret_data) - print_data = rnode.vtysh_cmd(cmd.rstrip("json"), isjson=False) + print_data = json.dumps(ret_data) else: print_data = ret_data - logger.info( - "Output for command [ %s] on router %s:\n%s", - cmd.rstrip("json"), + "Output for command [%s] on router %s:\n%s", + cmd, rnode.name, print_data, ) @@ -365,7 +361,7 @@ def create_common_configuration( return True -def kill_router_daemons(tgen, router, daemons): +def kill_router_daemons(tgen, router, daemons, save_config=True): """ Router's current config would be saved to /etc/frr/ for each daemon and daemon would be killed forcefully using SIGKILL. @@ -379,9 +375,10 @@ def kill_router_daemons(tgen, router, daemons): try: router_list = tgen.routers() - # Saving router config to /etc/frr, which will be loaded to router - # when it starts - router_list[router].vtysh_cmd("write memory") + if save_config: + # Saving router config to /etc/frr, which will be loaded to router + # when it starts + router_list[router].vtysh_cmd("write memory") # Kill Daemons result = router_list[router].killDaemons(daemons) @@ -469,111 +466,114 @@ def reset_config_on_routers(tgen, routerName=None): logger.debug("Entering API: reset_config_on_routers") + # Trim the router list if needed router_list = tgen.routers() - for rname in ROUTER_LIST: - if routerName and routerName != rname: - continue - - router = router_list[rname] - logger.info("Configuring router %s to initial test configuration", rname) - - cfg = router.run("vtysh -c 'show running'") - fname = "{}/{}/frr.sav".format(TMPDIR, rname) - dname = "{}/{}/delta.conf".format(TMPDIR, rname) - f = open(fname, "w") - for line in cfg.split("\n"): - line = line.strip() - - if ( - line == "Building configuration..." - or line == "Current configuration:" - or not line - ): - continue - f.write(line) - f.write("\n") - - f.close() - run_cfg_file = "{}/{}/frr.sav".format(TMPDIR, rname) - init_cfg_file = "{}/{}/frr_json_initial.conf".format(TMPDIR, rname) - command = "/usr/lib/frr/frr-reload.py --input {} --test {} > {}".format( - run_cfg_file, init_cfg_file, dname + if routerName: + if ((routerName not in ROUTER_LIST) or (routerName not in router_list)): + logger.debug("Exiting API: reset_config_on_routers: no routers") + return True + router_list = { routerName: router_list[routerName] } + + delta_fmt = TMPDIR + "/{}/delta.conf" + init_cfg_fmt = TMPDIR + "/{}/frr_json_initial.conf" + run_cfg_fmt = TMPDIR + "/{}/frr.sav" + + # + # Get all running configs in parallel + # + procs = {} + for rname in router_list: + logger.info("Fetching running config for router %s", rname) + procs[rname] = router_list[rname].popen( + ["/usr/bin/env", "vtysh", "-c", "show running-config no-header"], + stdin=None, + stdout=open(run_cfg_fmt.format(rname), "w"), + stderr=subprocess.PIPE, ) - result = call(command, shell=True, stderr=SUB_STDOUT, stdout=SUB_PIPE) - - # Assert if command fail - if result > 0: - logger.error("Delta file creation failed. Command executed %s", command) - with open(run_cfg_file, "r") as fd: - logger.info( - "Running configuration saved in %s is:\n%s", run_cfg_file, fd.read() - ) - with open(init_cfg_file, "r") as fd: - logger.info( - "Test configuration saved in %s is:\n%s", init_cfg_file, fd.read() + for rname, p in procs.items(): + _, error = p.communicate() + if p.returncode: + logger.error("Get running config for %s failed %d: %s", rname, p.returncode, error) + raise InvalidCLIError("vtysh show running error on {}: {}".format(rname, error)) + + # + # Get all delta's in parallel + # + procs = {} + for rname in router_list: + logger.info("Generating delta for router %s to new configuration", rname) + procs[rname] = subprocess.Popen( + [ "/usr/lib/frr/frr-reload.py", + "--test-reset", + "--input", + run_cfg_fmt.format(rname), + "--test", + init_cfg_fmt.format(rname) ], + stdin=None, + stdout=open(delta_fmt.format(rname), "w"), + stderr=subprocess.PIPE, + ) + for rname, p in procs.items(): + _, error = p.communicate() + if p.returncode: + logger.error("Delta file creation for %s failed %d: %s", rname, p.returncode, error) + raise InvalidCLIError("frr-reload error for {}: {}".format(rname, error)) + + # + # Apply all the deltas in parallel + # + procs = {} + for rname in router_list: + logger.info("Applying delta config on router %s", rname) + + procs[rname] = router_list[rname].popen( + ["/usr/bin/env", "vtysh", "-f", delta_fmt.format(rname)], + stdin=None, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) + for rname, p in procs.items(): + output, _ = p.communicate() + vtysh_command = "vtysh -f {}".format(delta_fmt.format(rname)) + if not p.returncode: + router_list[rname].logger.info( + '\nvtysh config apply => "{}"\nvtysh output <= "{}"'.format(vtysh_command, output) + ) + else: + router_list[rname].logger.error( + '\nvtysh config apply => "{}"\nvtysh output <= "{}"'.format(vtysh_command, output) + ) + logger.error("Delta file apply for %s failed %d: %s", rname, p.returncode, output) + + # We really need to enable this failure; however, currently frr-reload.py + # producing invalid "no" commands as it just preprends "no", but some of the + # command forms lack matching values (e.g., final values). Until frr-reload + # is fixed to handle this (or all the CLI no forms are adjusted) we can't + # fail tests. + # raise InvalidCLIError("frr-reload error for {}: {}".format(rname, output)) + + # + # Optionally log all new running config if "show_router_config" is defined in + # "pytest.ini" + # + if show_router_config: + procs = {} + for rname in router_list: + logger.info("Fetching running config for router %s", rname) + procs[rname] = router_list[rname].popen( + ["/usr/bin/env", "vtysh", "-c", "show running-config no-header"], + stdin=None, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) + for rname, p in procs.items(): + output, _ = p.communicate() + if p.returncode: + logger.warning( + "Get running config for %s failed %d: %s", rname, p.returncode, output ) - - err_cmd = ["/usr/bin/vtysh", "-m", "-f", run_cfg_file] - result = Popen(err_cmd, stdout=SUB_PIPE, stderr=SUB_PIPE) - output = result.communicate() - for out_data in output: - temp_data = out_data.decode("utf-8").lower() - for out_err in ERROR_LIST: - if out_err.lower() in temp_data: - logger.error( - "Found errors while validating data in" " %s", run_cfg_file - ) - raise InvalidCLIError(out_data) - raise InvalidCLIError("Unknown error in %s", output) - - f = open(dname, "r") - delta = StringIO() - delta.write("configure terminal\n") - t_delta = f.read() - - # Don't disable debugs - check_debug = True - - for line in t_delta.split("\n"): - line = line.strip() - if line == "Lines To Delete" or line == "===============" or not line: - continue - - if line == "Lines To Add": - check_debug = False - continue - - if line == "============" or not line: - continue - - # Leave debugs and log output alone - if check_debug: - if "debug" in line or "log file" in line: - continue - - delta.write(line) - delta.write("\n") - - f.close() - - delta.write("end\n") - - output = router.vtysh_multicmd(delta.getvalue(), pretty_output=False) - - delta.close() - delta = StringIO() - cfg = router.run("vtysh -c 'show running'") - for line in cfg.split("\n"): - line = line.strip() - delta.write(line) - delta.write("\n") - - # Router current configuration to log file or console if - # "show_router_config" is defined in "pytest.ini" - if show_router_config: - logger.info("Configuration on router {} after reset:".format(rname)) - logger.info(delta.getvalue()) - delta.close() + else: + logger.info("Configuration on router {} after reset:\n{}".format(rname, output)) logger.debug("Exiting API: reset_config_on_routers") return True @@ -636,6 +636,7 @@ def load_config_to_router(tgen, routerName, save_bkup=False): return True + def get_frr_ipv6_linklocal(tgen, router, intf=None, vrf=None): """ API to get the link local ipv6 address of a particular interface using @@ -668,38 +669,48 @@ def get_frr_ipv6_linklocal(tgen, router, intf=None, vrf=None): else: cmd = "show interface" - ifaces = router_list[router].run('vtysh -c "{}"'.format(cmd)) - - # Fix newlines (make them all the same) - ifaces = ("\n".join(ifaces.splitlines()) + "\n").splitlines() - - interface = None - ll_per_if_count = 0 - for line in ifaces: - # Interface name - m = re_search("Interface ([a-zA-Z0-9-]+) is", line) - if m: - interface = m.group(1).split(" ")[0] - ll_per_if_count = 0 - - # Interface ip - m1 = re_search("inet6 (fe80[:a-fA-F0-9]+[/0-9]+)", line) - if m1: - local = m1.group(1) - ll_per_if_count += 1 - if ll_per_if_count > 1: - linklocal += [["%s-%s" % (interface, ll_per_if_count), local]] - else: - linklocal += [[interface, local]] - - if linklocal: - if intf: - return [_linklocal[1] for _linklocal in linklocal if _linklocal[0] == intf][ - 0 - ].split("/")[0] - return linklocal - else: - errormsg = "Link local ip missing on router {}" + linklocal = [] + if vrf: + cmd = "show interface vrf {}".format(vrf) + else: + cmd = "show interface" + for chk_ll in range(0, 60): + sleep(1/4) + ifaces = router_list[router].run('vtysh -c "{}"'.format(cmd)) + # Fix newlines (make them all the same) + ifaces = ('\n'.join(ifaces.splitlines()) + '\n').splitlines() + + interface = None + ll_per_if_count = 0 + for line in ifaces: + # Interface name + m = re_search('Interface ([a-zA-Z0-9-]+) is', line) + if m: + interface = m.group(1).split(" ")[0] + ll_per_if_count = 0 + + # Interface ip + m1 = re_search('inet6 (fe80[:a-fA-F0-9]+[\/0-9]+)', + line) + if m1: + local = m1.group(1) + ll_per_if_count += 1 + if ll_per_if_count > 1: + linklocal += [["%s-%s" % + (interface, ll_per_if_count), local]] + else: + linklocal += [[interface, local]] + + try: + if linklocal: + if intf: + return [_linklocal[1] for _linklocal in linklocal if _linklocal[0]==intf][0].\ + split("/")[0] + return linklocal + except IndexError: + continue + + errormsg = "Link local ip missing on router {}".format(router) return errormsg @@ -712,20 +723,36 @@ def generate_support_bundle(): tgen = get_topogen() router_list = tgen.routers() - test_name = sys._getframe(2).f_code.co_name + test_name = os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0] + TMPDIR = os.path.join(LOGDIR, tgen.modname) + bundle_procs = {} for rname, rnode in router_list.items(): - logger.info("Generating support bundle for {}".format(rname)) + logger.info("Spawn collection of support bundle for %s", rname) rnode.run("mkdir -p /var/log/frr") + bundle_procs[rname] = tgen.net[rname].popen( + "/usr/lib/frr/generate_support_bundle.py", + stdin=None, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) - # Support only python3 going forward - bundle_log = rnode.run("env python3 /usr/lib/frr/generate_support_bundle.py") - - logger.info(bundle_log) - + for rname, rnode in router_list.items(): dst_bundle = "{}/{}/support_bundles/{}".format(TMPDIR, rname, test_name) src_bundle = "/var/log/frr" + + output, error = bundle_procs[rname].communicate() + + logger.info("Saving support bundle for %s", rname) + if output: + logger.info( + "Output from collecting support bundle for %s:\n%s", rname, output + ) + if error: + logger.warning( + "Error from collecting support bundle for %s:\n%s", rname, error + ) rnode.run("rm -rf {}".format(dst_bundle)) rnode.run("mkdir -p {}".format(dst_bundle)) rnode.run("mv -f {}/* {}".format(src_bundle, dst_bundle)) @@ -1829,6 +1856,14 @@ def create_interfaces_cfg(tgen, topo, build=False): else: interface_data.append("ipv6 address {}".format(intf_addr)) + # Wait for vrf interfaces to get link local address once they are up + if not destRouterLink == 'lo' and 'vrf' in topo[c_router][ + 'links'][destRouterLink]: + vrf = topo[c_router]['links'][destRouterLink]['vrf'] + intf = topo[c_router]['links'][destRouterLink]['interface'] + ll = get_frr_ipv6_linklocal(tgen, c_router, intf=intf, + vrf = vrf) + if "ipv6-link-local" in data: intf_addr = c_data["links"][destRouterLink]["ipv6-link-local"] @@ -1851,7 +1886,7 @@ def create_interfaces_cfg(tgen, topo, build=False): ) if "ospf6" in data: interface_data += _create_interfaces_ospf_cfg( - "ospf6", c_data, data, ospf_keywords + "ospf6", c_data, data, ospf_keywords + ["area"] ) result = create_common_configuration( diff --git a/tests/topotests/lib/ospf.py b/tests/topotests/lib/ospf.py index dc9fe0fcca..40da7c8fbe 100644 --- a/tests/topotests/lib/ospf.py +++ b/tests/topotests/lib/ospf.py @@ -28,6 +28,7 @@ from time import sleep from lib.topolog import logger from lib.topotest import frr_unicode from ipaddress import IPv6Address + # Import common_config to use commomnly used APIs from lib.common_config import ( create_common_configuration, @@ -89,8 +90,7 @@ def create_router_ospf(tgen, topo, input_dict=None, build=False, load_config=Tru logger.debug("Router %s: 'ospf' not present in input_dict", router) continue - result = __create_ospf_global( - tgen, input_dict, router, build, load_config) + result = __create_ospf_global(tgen, input_dict, router, build, load_config) if result is True: ospf_data = input_dict[router]["ospf"] @@ -100,7 +100,8 @@ def create_router_ospf(tgen, topo, input_dict=None, build=False, load_config=Tru continue result = __create_ospf_global( - tgen, input_dict, router, build, load_config, ospf='ospf6') + tgen, input_dict, router, build, load_config, ospf="ospf6" + ) if result is True: ospf_data = input_dict[router]["ospf6"] @@ -172,7 +173,6 @@ def __create_ospf_global( config_data.append(cmd) - # router id router_id = ospf_data.setdefault("router_id", None) del_router_id = ospf_data.setdefault("del_router_id", False) @@ -187,8 +187,7 @@ def __create_ospf_global( if del_log_adj_changes: config_data.append("no log-adjacency-changes detail") if log_adj_changes: - config_data.append("log-adjacency-changes {}".format( - log_adj_changes)) + config_data.append("log-adjacency-changes {}".format(log_adj_changes)) # aggregation timer aggr_timer = ospf_data.setdefault("aggr_timer", None) @@ -196,8 +195,7 @@ def __create_ospf_global( if del_aggr_timer: config_data.append("no aggregation timer") if aggr_timer: - config_data.append("aggregation timer {}".format( - aggr_timer)) + config_data.append("aggregation timer {}".format(aggr_timer)) # maximum path information ecmp_data = ospf_data.setdefault("maximum-paths", {}) @@ -245,12 +243,13 @@ def __create_ospf_global( cmd = "no {}".format(cmd) config_data.append(cmd) - #def route information + # def route information def_rte_data = ospf_data.setdefault("default-information", {}) if def_rte_data: if "originate" not in def_rte_data: - logger.debug("Router %s: 'originate key' not present in " - "input_dict", router) + logger.debug( + "Router %s: 'originate key' not present in " "input_dict", router + ) else: cmd = "default-information originate" @@ -261,12 +260,10 @@ def __create_ospf_global( cmd = cmd + " metric {}".format(def_rte_data["metric"]) if "metric-type" in def_rte_data: - cmd = cmd + " metric-type {}".format(def_rte_data[ - "metric-type"]) + cmd = cmd + " metric-type {}".format(def_rte_data["metric-type"]) if "route-map" in def_rte_data: - cmd = cmd + " route-map {}".format(def_rte_data[ - "route-map"]) + cmd = cmd + " route-map {}".format(def_rte_data["route-map"]) del_action = def_rte_data.setdefault("delete", False) if del_action: @@ -288,19 +285,19 @@ def __create_ospf_global( config_data.append(cmd) try: - if "area" in input_dict[router]['links'][neighbor][ - 'ospf6']: + if "area" in input_dict[router]["links"][neighbor]["ospf6"]: iface = input_dict[router]["links"][neighbor]["interface"] cmd = "interface {} area {}".format( - iface, input_dict[router]['links'][neighbor][ - 'ospf6']['area']) - if input_dict[router]['links'][neighbor].setdefault( - "delete", False): + iface, + input_dict[router]["links"][neighbor]["ospf6"]["area"], + ) + if input_dict[router]["links"][neighbor].setdefault( + "delete", False + ): cmd = "no {}".format(cmd) config_data.append(cmd) except KeyError: - pass - + pass # summary information summary_data = ospf_data.setdefault("summary-address", {}) @@ -420,6 +417,7 @@ def config_ospf_interface(tgen, topo, input_dict=None, build=False, load_config= True or False """ logger.debug("Enter lib config_ospf_interface") + result = False if not input_dict: input_dict = deepcopy(topo) else: @@ -502,7 +500,7 @@ def config_ospf_interface(tgen, topo, input_dict=None, build=False, load_config= # interface ospf mtu if data_ospf_mtu: cmd = "ip ospf mtu-ignore" - if 'del_action' in ospf_data: + if "del_action" in ospf_data: cmd = "no {}".format(cmd) config_data.append(cmd) @@ -543,8 +541,7 @@ def clear_ospf(tgen, router, ospf=None): version = "ip" cmd = "clear {} ospf interface".format(version) - logger.info( - "Clearing ospf process on router %s.. using command '%s'", router, cmd) + logger.info("Clearing ospf process on router %s.. using command '%s'", router, cmd) run_frr_cmd(rnode, cmd) logger.debug("Exiting lib API: clear_ospf()") @@ -774,7 +771,7 @@ def verify_ospf_neighbor(tgen, topo, dut=None, input_dict=None, lan=False, expec ################################ # Verification procs ################################ -@retry(retry_timeout=20) +@retry(retry_timeout=50) def verify_ospf6_neighbor(tgen, topo, dut=None, input_dict=None, lan=False): """ This API is to verify ospf neighborship by running @@ -825,105 +822,133 @@ def verify_ospf6_neighbor(tgen, topo, dut=None, input_dict=None, lan=False): if input_dict: for router, rnode in tgen.routers().items(): - if 'ospf6' not in topo['routers'][router]: + if "ospf6" not in topo["routers"][router]: continue if dut is not None and dut != router: continue logger.info("Verifying OSPF neighborship on router %s:", router) - show_ospf_json = run_frr_cmd(rnode, - "show ipv6 ospf neighbor json", isjson=True) + show_ospf_json = run_frr_cmd( + rnode, "show ipv6 ospf neighbor json", isjson=True + ) # Verifying output dictionary show_ospf_json is empty or not if not bool(show_ospf_json): errormsg = "OSPF6 is not running" return errormsg ospf_data_list = input_dict[router]["ospf6"] - ospf_nbr_list = ospf_data_list['neighbors'] + ospf_nbr_list = ospf_data_list["neighbors"] for ospf_nbr, nbr_data in ospf_nbr_list.items(): - data_ip = data_rid = topo['routers'][ospf_nbr]['ospf6']['router_id'] + + try: + data_ip = data_rid = topo["routers"][ospf_nbr]["ospf6"]["router_id"] + except KeyError: + data_ip = data_rid = topo["routers"][nbr_data["nbr"]]["ospf6"][ + "router_id" + ] + if ospf_nbr in data_ip: nbr_details = nbr_data[ospf_nbr] elif lan: - for switch in topo['switches']: - if 'ospf6' in topo['switches'][switch]['links'][router]: + for switch in topo["switches"]: + if "ospf6" in topo["switches"][switch]["links"][router]: neighbor_ip = data_ip else: continue else: - neighbor_ip = data_ip[router]['ipv6'].split("/")[0] + neighbor_ip = data_ip[router]["ipv6"].split("/")[0] nh_state = None neighbor_ip = neighbor_ip.lower() nbr_rid = data_rid - get_index_val = dict((d['neighborId'], dict( \ - d, index=index)) for (index, d) in enumerate( \ - show_ospf_json['neighbors'])) + get_index_val = dict( + (d["neighborId"], dict(d, index=index)) + for (index, d) in enumerate(show_ospf_json["neighbors"]) + ) try: - nh_state = get_index_val.get(neighbor_ip)['state'] - intf_state = get_index_val.get(neighbor_ip)['ifState'] + nh_state = get_index_val.get(neighbor_ip)["state"] + intf_state = get_index_val.get(neighbor_ip)["ifState"] except TypeError: - errormsg = "[DUT: {}] OSPF peer {} missing,from "\ - "{} ".format(router, - nbr_rid, ospf_nbr) + errormsg = "[DUT: {}] OSPF peer {} missing,from " "{} ".format( + router, nbr_rid, ospf_nbr + ) return errormsg - nbr_state = nbr_data.setdefault("state",None) - nbr_role = nbr_data.setdefault("role",None) + nbr_state = nbr_data.setdefault("state", None) + nbr_role = nbr_data.setdefault("role", None) if nbr_state: if nbr_state == nh_state: - logger.info("[DUT: {}] OSPF6 Nbr is {}:{} State {}".format - (router, ospf_nbr, nbr_rid, nh_state)) + logger.info( + "[DUT: {}] OSPF6 Nbr is {}:{} State {}".format( + router, ospf_nbr, nbr_rid, nh_state + ) + ) result = True else: - errormsg = ("[DUT: {}] OSPF6 is not Converged, neighbor" - " state is {} , Expected state is {}".format(router, - nh_state, nbr_state)) + errormsg = ( + "[DUT: {}] OSPF6 is not Converged, neighbor" + " state is {} , Expected state is {}".format( + router, nh_state, nbr_state + ) + ) return errormsg if nbr_role: if nbr_role == intf_state: - logger.info("[DUT: {}] OSPF6 Nbr is {}: {} Role {}".format( - router, ospf_nbr, nbr_rid, nbr_role)) + logger.info( + "[DUT: {}] OSPF6 Nbr is {}: {} Role {}".format( + router, ospf_nbr, nbr_rid, nbr_role + ) + ) else: - errormsg = ("[DUT: {}] OSPF6 is not Converged with rid" - "{}, role is {}, Expected role is {}".format(router, - nbr_rid, intf_state, nbr_role)) + errormsg = ( + "[DUT: {}] OSPF6 is not Converged with rid" + "{}, role is {}, Expected role is {}".format( + router, nbr_rid, intf_state, nbr_role + ) + ) return errormsg continue else: for router, rnode in tgen.routers().items(): - if 'ospf6' not in topo['routers'][router]: + if "ospf6" not in topo["routers"][router]: continue if dut is not None and dut != router: continue logger.info("Verifying OSPF6 neighborship on router %s:", router) - show_ospf_json = run_frr_cmd(rnode, - "show ipv6 ospf neighbor json", isjson=True) + show_ospf_json = run_frr_cmd( + rnode, "show ipv6 ospf neighbor json", isjson=True + ) # Verifying output dictionary show_ospf_json is empty or not if not bool(show_ospf_json): errormsg = "OSPF6 is not running" return errormsg ospf_data_list = topo["routers"][router]["ospf6"] - ospf_neighbors = ospf_data_list['neighbors'] + ospf_neighbors = ospf_data_list["neighbors"] total_peer = 0 total_peer = len(ospf_neighbors.keys()) no_of_ospf_nbr = 0 - ospf_nbr_list = ospf_data_list['neighbors'] + ospf_nbr_list = ospf_data_list["neighbors"] no_of_peer = 0 for ospf_nbr, nbr_data in ospf_nbr_list.items(): - data_ip = data_rid = topo['routers'][ospf_nbr]['ospf6']['router_id'] + try: + data_ip = data_rid = topo["routers"][ospf_nbr]["ospf6"]["router_id"] + except KeyError: + data_ip = data_rid = topo["routers"][nbr_data["nbr"]]["ospf6"][ + "router_id" + ] + if ospf_nbr in data_ip: nbr_details = nbr_data[ospf_nbr] elif lan: - for switch in topo['switches']: - if 'ospf6' in topo['switches'][switch]['links'][router]: + for switch in topo["switches"]: + if "ospf6" in topo["switches"][switch]["links"][router]: neighbor_ip = data_ip else: continue @@ -933,26 +958,27 @@ def verify_ospf6_neighbor(tgen, topo, dut=None, input_dict=None, lan=False): nh_state = None neighbor_ip = neighbor_ip.lower() nbr_rid = data_rid - get_index_val = dict((d['neighborId'], dict( \ - d, index=index)) for (index, d) in enumerate( \ - show_ospf_json['neighbors'])) + get_index_val = dict( + (d["neighborId"], dict(d, index=index)) + for (index, d) in enumerate(show_ospf_json["neighbors"]) + ) try: - nh_state = get_index_val.get(neighbor_ip)['state'] - intf_state = get_index_val.get(neighbor_ip)['ifState'] + nh_state = get_index_val.get(neighbor_ip)["state"] + intf_state = get_index_val.get(neighbor_ip)["ifState"] except TypeError: - errormsg = "[DUT: {}] OSPF peer {} missing,from "\ - "{} ".format(router, - nbr_rid, ospf_nbr) + errormsg = "[DUT: {}] OSPF peer {} missing,from " "{} ".format( + router, nbr_rid, ospf_nbr + ) return errormsg - if nh_state == 'Full': + if nh_state == "Full": no_of_peer += 1 if no_of_peer == total_peer: logger.info("[DUT: {}] OSPF6 is Converged".format(router)) result = True else: - errormsg = ("[DUT: {}] OSPF6 is not Converged".format(router)) + errormsg = "[DUT: {}] OSPF6 is not Converged".format(router) return errormsg logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) @@ -1491,7 +1517,7 @@ def verify_ospf_database(tgen, topo, dut, input_dict, expected=True): @retry(retry_timeout=20) -def verify_ospf_summary(tgen, topo, dut, input_dict, expected=True): +def verify_ospf_summary(tgen, topo, dut, input_dict, ospf=None, expected=True): """ This API is to verify ospf routes by running show ip ospf interface command. @@ -1502,7 +1528,6 @@ def verify_ospf_summary(tgen, topo, dut, input_dict, expected=True): * `topo` : topology descriptions * `dut`: device under test * `input_dict` : Input dict data, required when configuring from testcase - * `expected` : expected results from API, by-default True Usage ----- @@ -1522,18 +1547,30 @@ def verify_ospf_summary(tgen, topo, dut, input_dict, expected=True): True or False (Error Message) """ - logger.debug("Entering lib API: verify_ospf_summary()") + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) result = False router = dut logger.info("Verifying OSPF summary on router %s:", router) - if "ospf" not in topo["routers"][dut]: - errormsg = "[DUT: {}] OSPF is not configured on the router.".format(router) - return errormsg - rnode = tgen.routers()[dut] - show_ospf_json = run_frr_cmd(rnode, "show ip ospf summary detail json", isjson=True) + + if ospf: + if 'ospf6' not in topo['routers'][dut]: + errormsg = "[DUT: {}] OSPF6 is not configured on the router.".format( + router) + return errormsg + + show_ospf_json = run_frr_cmd(rnode, "show ipv6 ospf summary detail json", + isjson=True) + else: + if 'ospf' not in topo['routers'][dut]: + errormsg = "[DUT: {}] OSPF is not configured on the router.".format( + router) + return errormsg + + show_ospf_json = run_frr_cmd(rnode, "show ip ospf summary detail json", + isjson=True) # Verifying output dictionary show_ospf_json is empty or not if not bool(show_ospf_json): @@ -1542,35 +1579,31 @@ def verify_ospf_summary(tgen, topo, dut, input_dict, expected=True): # To find neighbor ip type ospf_summary_data = input_dict + + if ospf: + show_ospf_json = show_ospf_json['default'] + for ospf_summ, summ_data in ospf_summary_data.items(): if ospf_summ not in show_ospf_json: continue - summary = ospf_summary_data[ospf_summ]["Summary address"] + summary = ospf_summary_data[ospf_summ]['Summary address'] + if summary in show_ospf_json: for summ in summ_data: if summ_data[summ] == show_ospf_json[summary][summ]: - logger.info( - "[DUT: %s] OSPF summary %s:%s is %s", - router, - summary, - summ, - summ_data[summ], - ) + logger.info("[DUT: %s] OSPF summary %s:%s is %s", + router, summary, summ, summ_data[summ]) result = True else: - errormsg = ( - "[DUT: {}] OSPF summary {}:{} is %s, " - "Expected is {}".format( - router, summary, summ, show_ospf_json[summary][summ] - ) - ) + errormsg = ("[DUT: {}] OSPF summary {} : {} is {}, " + "Expected is {}".format(router, summary, summ,show_ospf_json[ + summary][summ], summ_data[summ] )) return errormsg - logger.debug("Exiting API: verify_ospf_summary()") + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) return result - @retry(retry_timeout=30) def verify_ospf6_rib(tgen, dut, input_dict, next_hop=None, tag=None, metric=None, fib=None): @@ -1627,31 +1660,34 @@ def verify_ospf6_rib(tgen, dut, input_dict, next_hop=None, found_routes = [] missing_routes = [] - if "static_routes" in input_dict[routerInput] or \ - "prefix" in input_dict[routerInput]: + if ( + "static_routes" in input_dict[routerInput] + or "prefix" in input_dict[routerInput] + ): if "prefix" in input_dict[routerInput]: static_routes = input_dict[routerInput]["prefix"] else: static_routes = input_dict[routerInput]["static_routes"] - for static_route in static_routes: cmd = "{}".format(command) cmd = "{} json".format(cmd) - ospf_rib_json = run_frr_cmd(rnode, cmd, isjson=True) + ospf_rib_json = run_frr_cmd(rnode, cmd, isjson=True) # Fix for PR 2644182 try: - ospf_rib_json = ospf_rib_json['routes'] + ospf_rib_json = ospf_rib_json["routes"] except KeyError: pass # Verifying output dictionary ospf_rib_json is not empty if bool(ospf_rib_json) is False: - errormsg = "[DUT: {}] No routes found in OSPF6 route " \ + errormsg = ( + "[DUT: {}] No routes found in OSPF6 route " "table".format(router) + ) return errormsg network = static_route["network"] @@ -1659,7 +1695,6 @@ def verify_ospf6_rib(tgen, dut, input_dict, next_hop=None, _tag = static_route.setdefault("tag", None) _rtype = static_route.setdefault("routeType", None) - # Generating IPs for verification ip_list = generate_ips(network, no_of_ip) st_found = False @@ -1668,7 +1703,7 @@ def verify_ospf6_rib(tgen, dut, input_dict, next_hop=None, st_rt = str(ipaddress.ip_network(frr_unicode(st_rt))) _addr_type = validate_ip_address(st_rt) - if _addr_type != 'ipv6': + if _addr_type != "ipv6": continue if st_rt in ospf_rib_json: @@ -1681,17 +1716,26 @@ def verify_ospf6_rib(tgen, dut, input_dict, next_hop=None, next_hop = [next_hop] for mnh in range(0, len(ospf_rib_json[st_rt])): - if 'fib' in ospf_rib_json[st_rt][ - mnh]["nextHops"][0]: - found_hops.append([rib_r[ - "ip"] for rib_r in ospf_rib_json[ - st_rt][mnh]["nextHops"]]) + if ( + "fib" + in ospf_rib_json[st_rt][mnh]["nextHops"][0] + ): + found_hops.append( + [ + rib_r["ip"] + for rib_r in ospf_rib_json[st_rt][mnh][ + "nextHops" + ] + ] + ) if found_hops[0]: - missing_list_of_nexthops = \ - set(found_hops[0]).difference(next_hop) - additional_nexthops_in_required_nhs = \ - set(next_hop).difference(found_hops[0]) + missing_list_of_nexthops = set( + found_hops[0] + ).difference(next_hop) + additional_nexthops_in_required_nhs = set( + next_hop + ).difference(found_hops[0]) if additional_nexthops_in_required_nhs: logger.info( @@ -1699,13 +1743,18 @@ def verify_ospf6_rib(tgen, dut, input_dict, next_hop=None, "%s is not active for route %s in " "RIB of router %s\n", additional_nexthops_in_required_nhs, - st_rt, dut) + st_rt, + dut, + ) errormsg = ( "Nexthop {} is not active" " for route {} in RIB of router" " {}\n".format( - additional_nexthops_in_required_nhs, - st_rt, dut)) + additional_nexthops_in_required_nhs, + st_rt, + dut, + ) + ) return errormsg else: nh_found = True @@ -1713,98 +1762,118 @@ def verify_ospf6_rib(tgen, dut, input_dict, next_hop=None, elif next_hop and fib is None: if type(next_hop) is not list: next_hop = [next_hop] - found_hops = [rib_r['nextHop'] for rib_r in - ospf_rib_json[st_rt][ - "nextHops"]] + found_hops = [ + rib_r["nextHop"] + for rib_r in ospf_rib_json[st_rt]["nextHops"] + ] if found_hops: - missing_list_of_nexthops = \ - set(found_hops).difference(next_hop) - additional_nexthops_in_required_nhs = \ - set(next_hop).difference(found_hops) + missing_list_of_nexthops = set( + found_hops + ).difference(next_hop) + additional_nexthops_in_required_nhs = set( + next_hop + ).difference(found_hops) if additional_nexthops_in_required_nhs: logger.info( - "Missing nexthop %s for route"\ - " %s in RIB of router %s\n", \ - additional_nexthops_in_required_nhs, \ - st_rt, dut) - errormsg=("Nexthop {} is Missing for "\ - "route {} in RIB of router {}\n".format( + "Missing nexthop %s for route" + " %s in RIB of router %s\n", additional_nexthops_in_required_nhs, - st_rt, dut)) + st_rt, + dut, + ) + errormsg = ( + "Nexthop {} is Missing for " + "route {} in RIB of router {}\n".format( + additional_nexthops_in_required_nhs, + st_rt, + dut, + ) + ) return errormsg else: nh_found = True if _rtype: - if "destinationType" not in ospf_rib_json[ - st_rt]: - errormsg = ("[DUT: {}]: destinationType missing" - "for route {} in OSPF RIB \n".\ - format(dut, st_rt)) + if "destinationType" not in ospf_rib_json[st_rt]: + errormsg = ( + "[DUT: {}]: destinationType missing" + "for route {} in OSPF RIB \n".format(dut, st_rt) + ) return errormsg - elif _rtype != ospf_rib_json[st_rt][ - "destinationType"]: - errormsg = ("[DUT: {}]: destinationType mismatch" - "for route {} in OSPF RIB \n".\ - format(dut, st_rt)) + elif _rtype != ospf_rib_json[st_rt]["destinationType"]: + errormsg = ( + "[DUT: {}]: destinationType mismatch" + "for route {} in OSPF RIB \n".format(dut, st_rt) + ) return errormsg else: - logger.info("DUT: {}]: Found destinationType {}" - "for route {}".\ - format(dut, _rtype, st_rt)) + logger.info( + "DUT: {}]: Found destinationType {}" + "for route {}".format(dut, _rtype, st_rt) + ) if tag: - if "tag" not in ospf_rib_json[ - st_rt]: - errormsg = ("[DUT: {}]: tag is not" - " present for" - " route {} in RIB \n".\ - format(dut, st_rt - )) + if "tag" not in ospf_rib_json[st_rt]: + errormsg = ( + "[DUT: {}]: tag is not" + " present for" + " route {} in RIB \n".format(dut, st_rt) + ) return errormsg - if _tag != ospf_rib_json[ - st_rt]["tag"]: - errormsg = ("[DUT: {}]: tag value {}" - " is not matched for" - " route {} in RIB \n".\ - format(dut, _tag, st_rt, - )) + if _tag != ospf_rib_json[st_rt]["tag"]: + errormsg = ( + "[DUT: {}]: tag value {}" + " is not matched for" + " route {} in RIB \n".format( + dut, + _tag, + st_rt, + ) + ) return errormsg if metric is not None: - if "type2cost" not in ospf_rib_json[ - st_rt]: - errormsg = ("[DUT: {}]: metric is" - " not present for" - " route {} in RIB \n".\ - format(dut, st_rt)) + if "type2cost" not in ospf_rib_json[st_rt]: + errormsg = ( + "[DUT: {}]: metric is" + " not present for" + " route {} in RIB \n".format(dut, st_rt) + ) return errormsg - if metric != ospf_rib_json[ - st_rt]["type2cost"]: - errormsg = ("[DUT: {}]: metric value " - "{} is not matched for " - "route {} in RIB \n".\ - format(dut, metric, st_rt, - )) + if metric != ospf_rib_json[st_rt]["type2cost"]: + errormsg = ( + "[DUT: {}]: metric value " + "{} is not matched for " + "route {} in RIB \n".format( + dut, + metric, + st_rt, + ) + ) return errormsg else: missing_routes.append(st_rt) if nh_found: - logger.info("[DUT: {}]: Found next_hop {} for all OSPF" - " routes in RIB".format(router, next_hop)) + logger.info( + "[DUT: {}]: Found next_hop {} for all OSPF" + " routes in RIB".format(router, next_hop) + ) if len(missing_routes) > 0: - errormsg = ("[DUT: {}]: Missing route in RIB, " - "routes: {}".\ - format(dut, missing_routes)) + errormsg = "[DUT: {}]: Missing route in RIB, " "routes: {}".format( + dut, missing_routes + ) return errormsg if found_routes: - logger.info("[DUT: %s]: Verified routes in RIB, found" - " routes are: %s\n", dut, found_routes) + logger.info( + "[DUT: %s]: Verified routes in RIB, found" " routes are: %s\n", + dut, + found_routes, + ) result = True logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) @@ -1855,15 +1924,16 @@ def verify_ospf6_interface(tgen, topo, dut=None,lan=False, input_dict=None): result = False for router, rnode in tgen.routers().iteritems(): - if 'ospf6' not in topo['routers'][router]: + if "ospf6" not in topo["routers"][router]: continue if dut is not None and dut != router: continue logger.info("Verifying OSPF interface on router %s:", router) - show_ospf_json = run_frr_cmd(rnode, "show ipv6 ospf interface json", - isjson=True) + show_ospf_json = run_frr_cmd( + rnode, "show ipv6 ospf interface json", isjson=True + ) # Verifying output dictionary show_ospf_json is empty or not if not bool(show_ospf_json): @@ -1873,32 +1943,49 @@ def verify_ospf6_interface(tgen, topo, dut=None,lan=False, input_dict=None): # To find neighbor ip type ospf_intf_data = input_dict[router]["links"] for ospf_intf, intf_data in ospf_intf_data.items(): - intf = topo['routers'][router]['links'][ospf_intf]['interface'] - if intf in show_ospf_json: - for intf_attribute in intf_data['ospf6']: - if intf_data['ospf6'][intf_attribute] is not list: - if intf_data['ospf6'][intf_attribute] == show_ospf_json[ - intf][intf_attribute]: - logger.info("[DUT: %s] OSPF6 interface %s: %s is %s", - router, intf, intf_attribute, intf_data['ospf6'][ - intf_attribute]) - elif intf_data['ospf6'][intf_attribute] is list: + intf = topo["routers"][router]["links"][ospf_intf]["interface"] + if intf in show_ospf_json: + for intf_attribute in intf_data["ospf6"]: + if intf_data["ospf6"][intf_attribute] is not list: + if ( + intf_data["ospf6"][intf_attribute] + == show_ospf_json[intf][intf_attribute] + ): + logger.info( + "[DUT: %s] OSPF6 interface %s: %s is %s", + router, + intf, + intf_attribute, + intf_data["ospf6"][intf_attribute], + ) + elif intf_data["ospf6"][intf_attribute] is list: for addr_list in len(show_ospf_json[intf][intf_attribute]): - if show_ospf_json[intf][intf_attribute][addr_list][ - 'address'].split('/')[0] == intf_data['ospf6'][ - 'internetAddress'][0]['address']: - break + if ( + show_ospf_json[intf][intf_attribute][addr_list][ + "address" + ].split("/")[0] + == intf_data["ospf6"]["internetAddress"][0]["address"] + ): + break else: - errormsg= "[DUT: {}] OSPF6 interface {}: {} is {}, \ - Expected is {}".format(router, intf, intf_attribute, - intf_data['ospf6'][intf_attribute], intf_data['ospf6'][ - intf_attribute]) + errormsg = "[DUT: {}] OSPF6 interface {}: {} is {}, \ + Expected is {}".format( + router, + intf, + intf_attribute, + intf_data["ospf6"][intf_attribute], + intf_data["ospf6"][intf_attribute], + ) return errormsg else: - errormsg= "[DUT: {}] OSPF6 interface {}: {} is {}, \ - Expected is {}".format(router, intf, intf_attribute, - intf_data['ospf6'][intf_attribute], intf_data['ospf6'][ - intf_attribute]) + errormsg = "[DUT: {}] OSPF6 interface {}: {} is {}, \ + Expected is {}".format( + router, + intf, + intf_attribute, + intf_data["ospf6"][intf_attribute], + intf_data["ospf6"][intf_attribute], + ) return errormsg result = True logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) @@ -1956,16 +2043,14 @@ def verify_ospf6_database(tgen, topo, dut, input_dict): router = dut logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) - if 'ospf' not in topo['routers'][dut]: - errormsg = "[DUT: {}] OSPF is not configured on the router.".format( - dut) + if "ospf" not in topo["routers"][dut]: + errormsg = "[DUT: {}] OSPF is not configured on the router.".format(dut) return errormsg rnode = tgen.routers()[dut] logger.info("Verifying OSPF interface on router %s:", dut) - show_ospf_json = run_frr_cmd(rnode, "show ip ospf database json", - isjson=True) + show_ospf_json = run_frr_cmd(rnode, "show ip ospf database json", isjson=True) # Verifying output dictionary show_ospf_json is empty or not if not bool(show_ospf_json): errormsg = "OSPF is not running" @@ -1973,167 +2058,209 @@ def verify_ospf6_database(tgen, topo, dut, input_dict): # for inter and inter lsa's ospf_db_data = input_dict.setdefault("areas", None) - ospf_external_lsa = input_dict.setdefault( - 'asExternalLinkStates', None) + ospf_external_lsa = input_dict.setdefault("asExternalLinkStates", None) if ospf_db_data: - for ospf_area, area_lsa in ospf_db_data.items(): - if ospf_area in show_ospf_json['areas']: - if 'routerLinkStates' in area_lsa: - for lsa in area_lsa['routerLinkStates']: - for rtrlsa in show_ospf_json['areas'][ospf_area][ - 'routerLinkStates']: - if lsa['lsaId'] == rtrlsa['lsaId'] and \ - lsa['advertisedRouter'] == rtrlsa[ - 'advertisedRouter']: - result = True - break - if result: - logger.info( - "[DUT: %s] OSPF LSDB area %s:Router " - "LSA %s", router, ospf_area, lsa) + for ospf_area, area_lsa in ospf_db_data.items(): + if ospf_area in show_ospf_json["areas"]: + if "routerLinkStates" in area_lsa: + for lsa in area_lsa["routerLinkStates"]: + for rtrlsa in show_ospf_json["areas"][ospf_area][ + "routerLinkStates" + ]: + if ( + lsa["lsaId"] == rtrlsa["lsaId"] + and lsa["advertisedRouter"] + == rtrlsa["advertisedRouter"] + ): + result = True break - else: - errormsg = \ - "[DUT: {}] OSPF LSDB area {}: expected" \ + if result: + logger.info( + "[DUT: %s] OSPF LSDB area %s:Router " "LSA %s", + router, + ospf_area, + lsa, + ) + break + else: + errormsg = ( + "[DUT: {}] OSPF LSDB area {}: expected" " Router LSA is {}".format(router, ospf_area, lsa) - return errormsg + ) + return errormsg - if 'networkLinkStates' in area_lsa: - for lsa in area_lsa['networkLinkStates']: - for netlsa in show_ospf_json['areas'][ospf_area][ - 'networkLinkStates']: - if lsa in show_ospf_json['areas'][ospf_area][ - 'networkLinkStates']: - if lsa['lsaId'] == netlsa['lsaId'] and \ - lsa['advertisedRouter'] == netlsa[ - 'advertisedRouter']: - result = True - break - if result: - logger.info( - "[DUT: %s] OSPF LSDB area %s:Network " - "LSA %s", router, ospf_area, lsa) - break - else: - errormsg = \ - "[DUT: {}] OSPF LSDB area {}: expected" \ + if "networkLinkStates" in area_lsa: + for lsa in area_lsa["networkLinkStates"]: + for netlsa in show_ospf_json["areas"][ospf_area][ + "networkLinkStates" + ]: + if ( + lsa + in show_ospf_json["areas"][ospf_area][ + "networkLinkStates" + ] + ): + if ( + lsa["lsaId"] == netlsa["lsaId"] + and lsa["advertisedRouter"] + == netlsa["advertisedRouter"] + ): + result = True + break + if result: + logger.info( + "[DUT: %s] OSPF LSDB area %s:Network " "LSA %s", + router, + ospf_area, + lsa, + ) + break + else: + errormsg = ( + "[DUT: {}] OSPF LSDB area {}: expected" " Network LSA is {}".format(router, ospf_area, lsa) - return errormsg + ) + return errormsg - if 'summaryLinkStates' in area_lsa: - for lsa in area_lsa['summaryLinkStates']: - for t3lsa in show_ospf_json['areas'][ospf_area][ - 'summaryLinkStates']: - if lsa['lsaId'] == t3lsa['lsaId'] and \ - lsa['advertisedRouter'] == t3lsa[ - 'advertisedRouter']: - result = True - break - if result: - logger.info( - "[DUT: %s] OSPF LSDB area %s:Summary " - "LSA %s", router, ospf_area, lsa) + if "summaryLinkStates" in area_lsa: + for lsa in area_lsa["summaryLinkStates"]: + for t3lsa in show_ospf_json["areas"][ospf_area][ + "summaryLinkStates" + ]: + if ( + lsa["lsaId"] == t3lsa["lsaId"] + and lsa["advertisedRouter"] == t3lsa["advertisedRouter"] + ): + result = True break - else: - errormsg = \ - "[DUT: {}] OSPF LSDB area {}: expected" \ + if result: + logger.info( + "[DUT: %s] OSPF LSDB area %s:Summary " "LSA %s", + router, + ospf_area, + lsa, + ) + break + else: + errormsg = ( + "[DUT: {}] OSPF LSDB area {}: expected" " Summary LSA is {}".format(router, ospf_area, lsa) - return errormsg + ) + return errormsg - if 'nssaExternalLinkStates' in area_lsa: - for lsa in area_lsa['nssaExternalLinkStates']: - for t7lsa in show_ospf_json['areas'][ospf_area][ - 'nssaExternalLinkStates']: - if lsa['lsaId'] == t7lsa['lsaId'] and \ - lsa['advertisedRouter'] == t7lsa[ - 'advertisedRouter']: - result = True - break - if result: - logger.info( - "[DUT: %s] OSPF LSDB area %s:Type7 " - "LSA %s", router, ospf_area, lsa) + if "nssaExternalLinkStates" in area_lsa: + for lsa in area_lsa["nssaExternalLinkStates"]: + for t7lsa in show_ospf_json["areas"][ospf_area][ + "nssaExternalLinkStates" + ]: + if ( + lsa["lsaId"] == t7lsa["lsaId"] + and lsa["advertisedRouter"] == t7lsa["advertisedRouter"] + ): + result = True break - else: - errormsg = \ - "[DUT: {}] OSPF LSDB area {}: expected" \ + if result: + logger.info( + "[DUT: %s] OSPF LSDB area %s:Type7 " "LSA %s", + router, + ospf_area, + lsa, + ) + break + else: + errormsg = ( + "[DUT: {}] OSPF LSDB area {}: expected" " Type7 LSA is {}".format(router, ospf_area, lsa) - return errormsg + ) + return errormsg - if 'asbrSummaryLinkStates' in area_lsa: - for lsa in area_lsa['asbrSummaryLinkStates']: - for t4lsa in show_ospf_json['areas'][ospf_area][ - 'asbrSummaryLinkStates']: - if lsa['lsaId'] == t4lsa['lsaId'] and \ - lsa['advertisedRouter'] == t4lsa[ - 'advertisedRouter']: - result = True - break - if result: - logger.info( - "[DUT: %s] OSPF LSDB area %s:ASBR Summary " - "LSA %s", router, ospf_area, lsa) + if "asbrSummaryLinkStates" in area_lsa: + for lsa in area_lsa["asbrSummaryLinkStates"]: + for t4lsa in show_ospf_json["areas"][ospf_area][ + "asbrSummaryLinkStates" + ]: + if ( + lsa["lsaId"] == t4lsa["lsaId"] + and lsa["advertisedRouter"] == t4lsa["advertisedRouter"] + ): result = True - else: - errormsg = \ - "[DUT: {}] OSPF LSDB area {}: expected" \ - " ASBR Summary LSA is {}".format( - router, ospf_area, lsa) - return errormsg + break + if result: + logger.info( + "[DUT: %s] OSPF LSDB area %s:ASBR Summary " "LSA %s", + router, + ospf_area, + lsa, + ) + result = True + else: + errormsg = ( + "[DUT: {}] OSPF LSDB area {}: expected" + " ASBR Summary LSA is {}".format(router, ospf_area, lsa) + ) + return errormsg - if 'linkLocalOpaqueLsa' in area_lsa: - for lsa in area_lsa['linkLocalOpaqueLsa']: - try: - for lnklsa in show_ospf_json['areas'][ospf_area][ - 'linkLocalOpaqueLsa']: - if lsa['lsaId'] in lnklsa['lsaId'] and \ - 'linkLocalOpaqueLsa' in show_ospf_json[ - 'areas'][ospf_area]: - logger.info(( - "[DUT: FRR] OSPF LSDB area %s:Opaque-LSA" - "%s", ospf_area, lsa)) - result = True - else: - errormsg = ("[DUT: FRR] OSPF LSDB area: {} " - "expected Opaque-LSA is {}, Found is {}".format( - ospf_area, lsa, show_ospf_json)) - raise ValueError (errormsg) - return errormsg - except KeyError: - errormsg = ("[DUT: FRR] linkLocalOpaqueLsa Not " - "present") - return errormsg + if "linkLocalOpaqueLsa" in area_lsa: + for lsa in area_lsa["linkLocalOpaqueLsa"]: + try: + for lnklsa in show_ospf_json["areas"][ospf_area][ + "linkLocalOpaqueLsa" + ]: + if ( + lsa["lsaId"] in lnklsa["lsaId"] + and "linkLocalOpaqueLsa" + in show_ospf_json["areas"][ospf_area] + ): + logger.info( + ( + "[DUT: FRR] OSPF LSDB area %s:Opaque-LSA" + "%s", + ospf_area, + lsa, + ) + ) + result = True + else: + errormsg = ( + "[DUT: FRR] OSPF LSDB area: {} " + "expected Opaque-LSA is {}, Found is {}".format( + ospf_area, lsa, show_ospf_json + ) + ) + raise ValueError(errormsg) + return errormsg + except KeyError: + errormsg = "[DUT: FRR] linkLocalOpaqueLsa Not " "present" + return errormsg if ospf_external_lsa: - for lsa in ospf_external_lsa: - try: - for t5lsa in show_ospf_json['asExternalLinkStates']: - if lsa['lsaId'] == t5lsa['lsaId'] and \ - lsa['advertisedRouter'] == t5lsa[ - 'advertisedRouter']: - result = True - break - except KeyError: - result = False - if result: - logger.info( - "[DUT: %s] OSPF LSDB:External LSA %s", - router, lsa) - result = True - else: - errormsg = \ - "[DUT: {}] OSPF LSDB : expected" \ - " External LSA is {}".format(router, lsa) - return errormsg + for lsa in ospf_external_lsa: + try: + for t5lsa in show_ospf_json["asExternalLinkStates"]: + if ( + lsa["lsaId"] == t5lsa["lsaId"] + and lsa["advertisedRouter"] == t5lsa["advertisedRouter"] + ): + result = True + break + except KeyError: + result = False + if result: + logger.info("[DUT: %s] OSPF LSDB:External LSA %s", router, lsa) + result = True + else: + errormsg = ( + "[DUT: {}] OSPF LSDB : expected" + " External LSA is {}".format(router, lsa) + ) + return errormsg logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) return result - -def config_ospf6_interface (tgen, topo, input_dict=None, build=False, - load_config=True): +def config_ospf6_interface(tgen, topo, input_dict=None, build=False, load_config=True): """ API to configure ospf on router. @@ -2180,17 +2307,17 @@ def config_ospf6_interface (tgen, topo, input_dict=None, build=False, "input_dict, passed input_dict %s", router, str(input_dict)) continue - ospf_data = input_dict[router]['links'][lnk]['ospf6'] + ospf_data = input_dict[router]["links"][lnk]["ospf6"] data_ospf_area = ospf_data.setdefault("area", None) - data_ospf_auth = ospf_data.setdefault("authentication", None) + data_ospf_auth = ospf_data.setdefault("hash-algo", None) data_ospf_dr_priority = ospf_data.setdefault("priority", None) data_ospf_cost = ospf_data.setdefault("cost", None) data_ospf_mtu = ospf_data.setdefault("mtu_ignore", None) try: - intf = topo['routers'][router]['links'][lnk]['interface'] + intf = topo["routers"][router]["links"][lnk]["interface"] except KeyError: - intf = topo['switches'][router]['links'][lnk]['interface'] + intf = topo["switches"][router]["links"][lnk]["interface"] # interface cmd = "interface {}".format(intf) @@ -2201,34 +2328,50 @@ def config_ospf6_interface (tgen, topo, input_dict=None, build=False, cmd = "ipv6 ospf area {}".format(data_ospf_area) config_data.append(cmd) + # interface ospf auth + if data_ospf_auth: + cmd = "ipv6 ospf6 authentication" + + if "del_action" in ospf_data: + cmd = "no {}".format(cmd) + + if "hash-algo" in ospf_data: + cmd = "{} key-id {} hash-algo {} key {}".format( + cmd, + ospf_data["key-id"], + ospf_data["hash-algo"], + ospf_data["key"], + ) + if "del_action" in ospf_data: + cmd = "no {}".format(cmd) + config_data.append(cmd) + # interface ospf dr priority if data_ospf_dr_priority: - cmd = "ipv6 ospf priority {}".format( - ospf_data["priority"]) - if 'del_action' in ospf_data: + cmd = "ipv6 ospf priority {}".format(ospf_data["priority"]) + if "del_action" in ospf_data: cmd = "no {}".format(cmd) config_data.append(cmd) # interface ospf cost if data_ospf_cost: - cmd = "ipv6 ospf cost {}".format( - ospf_data["cost"]) - if 'del_action' in ospf_data: + cmd = "ipv6 ospf cost {}".format(ospf_data["cost"]) + if "del_action" in ospf_data: cmd = "no {}".format(cmd) config_data.append(cmd) # interface ospf mtu if data_ospf_mtu: cmd = "ipv6 ospf mtu-ignore" - if 'del_action' in ospf_data: + if "del_action" in ospf_data: cmd = "no {}".format(cmd) config_data.append(cmd) if build: return config_data else: - result = create_common_configuration(tgen, router, config_data, - "interface_config", - build=build) + result = create_common_configuration( + tgen, router, config_data, "interface_config", build=build + ) logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) return result diff --git a/tests/topotests/lib/scapy_sendpkt.py b/tests/topotests/lib/scapy_sendpkt.py new file mode 100755 index 0000000000..0bb6a72092 --- /dev/null +++ b/tests/topotests/lib/scapy_sendpkt.py @@ -0,0 +1,61 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 eval: (blacken-mode 1) -*- +# +# July 29 2021, Christian Hopps <chopps@labn.net> +# +# Copyright (c) 2021, LabN Consulting, L.L.C. ("LabN") +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +# +import argparse +import logging +import re +import sys + +from scapy.all import conf, srp + +conf.verb = 0 + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("-i", "--interface", help="interface to send packet on.") + parser.add_argument("-I", "--imports", help="scapy symbols to import") + parser.add_argument( + "-t", "--timeout", type=float, default=2.0, help="timeout for reply receipts" + ) + parser.add_argument("pktdef", help="scapy packet definition to send") + args = parser.parse_args() + + if args.imports: + i = args.imports.replace("\n", "").strip() + if not re.match("[a-zA-Z0-9_ \t,]", i): + logging.critical('Invalid imports specified: "%s"', i) + sys.exit(1) + exec("from scapy.all import " + i, globals(), locals()) + + ans, unans = srp(eval(args.pktdef), iface=args.interface, timeout=args.timeout) + if not ans: + sys.exit(2) + for pkt in ans: + print(pkt.answer.show(dump=True)) + + +if __name__ == "__main__": + main() diff --git a/tests/topotests/lib/topogen.py b/tests/topotests/lib/topogen.py index ade5933504..8888421bf1 100644 --- a/tests/topotests/lib/topogen.py +++ b/tests/topotests/lib/topogen.py @@ -471,6 +471,12 @@ class TopoGear(object): """ return self.tgen.net[self.name].cmd(command) + def popen(self, *params, **kwargs): + """ + Popen on the router. + """ + return self.tgen.net[self.name].popen(*params, **kwargs) + def add_link(self, node, myif=None, nodeif=None): """ Creates a link (connection) between myself and the specified node. @@ -801,8 +807,8 @@ class TopoRouter(TopoGear): try: return json.loads(output) - except ValueError: - logger.warning("vtysh_cmd: failed to convert json output") + except ValueError as error: + logger.warning("vtysh_cmd: %s: failed to convert json output: %s: %s", self.name, str(output), str(error)) return {} def vtysh_multicmd(self, commands, pretty_output=True, daemon=None): diff --git a/tests/topotests/lib/topojson.py b/tests/topotests/lib/topojson.py index fcc6c19868..1ae482a265 100644 --- a/tests/topotests/lib/topojson.py +++ b/tests/topotests/lib/topojson.py @@ -293,6 +293,24 @@ def build_topo_from_json(tgen, topo): ) +def linux_intf_config_from_json(tgen, topo): + """Configure interfaces from linux based on topo.""" + routers = topo["routers"] + for rname in routers: + router = tgen.gears[rname] + links = routers[rname]["links"] + for rrname in links: + link = links[rrname] + if rrname == "lo": + lname = "lo" + else: + lname = link["interface"] + if "ipv4" in link: + router.run("ip addr add {} dev {}".format(link["ipv4"], lname)) + if "ipv6" in link: + router.run("ip -6 addr add {} dev {}".format(link["ipv6"], lname)) + + def build_config_from_json(tgen, topo, save_bkup=True): """ Reads initial configuraiton from JSON for each router, builds diff --git a/tests/topotests/lib/topotest.py b/tests/topotests/lib/topotest.py index d1f60bfe0d..6112b4b633 100644 --- a/tests/topotests/lib/topotest.py +++ b/tests/topotests/lib/topotest.py @@ -1152,6 +1152,18 @@ class Router(Node): self.reportCores = True self.version = None + self.ns_cmd = "sudo nsenter -m -n -t {} ".format(self.pid) + try: + # Allow escaping from running inside docker + cgroup = open("/proc/1/cgroup").read() + m = re.search("[0-9]+:cpuset:/docker/([a-f0-9]+)", cgroup) + if m: + self.ns_cmd = "docker exec -it {} ".format(m.group(1)) + self.ns_cmd + except IOError: + pass + else: + logger.debug("CMD to enter {}: {}".format(self.name, self.ns_cmd)) + def _config_frr(self, **params): "Configure FRR binaries" self.daemondir = params.get("frrdir") @@ -1223,25 +1235,28 @@ class Router(Node): dmns = rundaemons.split("\n") # Exclude empty string at end of list for d in dmns[:-1]: - daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip() - if daemonpid.isdigit() and pid_exists(int(daemonpid)): - daemonname = os.path.basename(d.rstrip().rsplit(".", 1)[0]) - logger.info("{}: stopping {}".format(self.name, daemonname)) - try: - os.kill(int(daemonpid), signal.SIGTERM) - except OSError as err: - if err.errno == errno.ESRCH: - logger.error( - "{}: {} left a dead pidfile (pid={})".format( - self.name, daemonname, daemonpid + # Only check if daemonfilepath starts with / + # Avoids hang on "-> Connection closed" in above self.cmd() + if d[0] == '/': + daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip() + if daemonpid.isdigit() and pid_exists(int(daemonpid)): + daemonname = os.path.basename(d.rstrip().rsplit(".", 1)[0]) + logger.info("{}: stopping {}".format(self.name, daemonname)) + try: + os.kill(int(daemonpid), signal.SIGTERM) + except OSError as err: + if err.errno == errno.ESRCH: + logger.error( + "{}: {} left a dead pidfile (pid={})".format( + self.name, daemonname, daemonpid + ) ) - ) - else: - logger.info( - "{}: {} could not kill pid {}: {}".format( - self.name, daemonname, daemonpid, str(err) + else: + logger.info( + "{}: {} could not kill pid {}: {}".format( + self.name, daemonname, daemonpid, str(err) + ) ) - ) if not wait: return errors @@ -1350,7 +1365,7 @@ class Router(Node): term = topo_terminal if topo_terminal else "xterm" makeTerm(self, title=title if title else cmd, term=term, cmd=cmd) else: - nscmd = "sudo nsenter -m -n -t {} {}".format(self.pid, cmd) + nscmd = self.ns_cmd + cmd if "TMUX" in os.environ: self.cmd("tmux select-layout main-horizontal") wcmd = "tmux split-window -h" @@ -1437,7 +1452,7 @@ class Router(Node): logger.info("BFD Test, but no bfdd compiled or installed") return "BFD Test, but no bfdd compiled or installed" - return self.startRouterDaemons() + return self.startRouterDaemons(tgen=tgen) def getStdErr(self, daemon): return self.getLog("err", daemon) @@ -1448,14 +1463,16 @@ class Router(Node): def getLog(self, log, daemon): return self.cmd("cat {}/{}/{}.{}".format(self.logdir, self.name, daemon, log)) - def startRouterDaemons(self, daemons=None): + def startRouterDaemons(self, daemons=None, tgen=None): "Starts all FRR daemons for this router." + asan_abort = g_extra_config["asan_abort"] gdb_breakpoints = g_extra_config["gdb_breakpoints"] gdb_daemons = g_extra_config["gdb_daemons"] gdb_routers = g_extra_config["gdb_routers"] valgrind_extra = g_extra_config["valgrind_extra"] valgrind_memleaks = g_extra_config["valgrind_memleaks"] + strace_daemons = g_extra_config["strace_daemons"] bundle_data = "" @@ -1482,7 +1499,6 @@ class Router(Node): os.path.join(self.daemondir, "bgpd") + " -v" ).split()[2] logger.info("{}: running version: {}".format(self.name, self.version)) - # If `daemons` was specified then some upper API called us with # specific daemons, otherwise just use our own configuration. daemons_list = [] @@ -1506,13 +1522,20 @@ class Router(Node): else: binary = os.path.join(self.daemondir, daemon) - cmdenv = "ASAN_OPTIONS=log_path={0}.asan".format(daemon) + cmdenv = "ASAN_OPTIONS=" + if asan_abort: + cmdenv = "abort_on_error=1:" + cmdenv += "log_path={0}/{1}.{2}.asan ".format(self.logdir, self.name, daemon) + if valgrind_memleaks: this_dir = os.path.dirname(os.path.abspath(os.path.realpath(__file__))) supp_file = os.path.abspath(os.path.join(this_dir, "../../../tools/valgrind.supp")) cmdenv += " /usr/bin/valgrind --num-callers=50 --log-file={1}/{2}.valgrind.{0}.%p --leak-check=full --suppressions={3}".format(daemon, self.logdir, self.name, supp_file) if valgrind_extra: cmdenv += "--gen-suppressions=all --expensive-definedness-checks=yes" + elif daemon in strace_daemons or "all" in strace_daemons: + cmdenv = "strace -f -D -o {1}/{2}.strace.{0} ".format(daemon, self.logdir, self.name) + cmdopt = "{} --log file:{}.log --log-level debug".format( daemon_opts, daemon ) diff --git a/tests/topotests/msdp_mesh_topo1/r1/pimd.conf b/tests/topotests/msdp_mesh_topo1/r1/pimd.conf index 49341efa57..c2ffed4762 100644 --- a/tests/topotests/msdp_mesh_topo1/r1/pimd.conf +++ b/tests/topotests/msdp_mesh_topo1/r1/pimd.conf @@ -10,6 +10,8 @@ interface r1-eth1 ip igmp ! ip pim rp 10.254.254.1 +ip pim join-prune-interval 5 +ip msdp timers 10 20 3 ip msdp mesh-group mg-1 source 10.254.254.1 ip msdp mesh-group mg-1 member 10.254.254.2 ip msdp mesh-group mg-1 member 10.254.254.3 diff --git a/tests/topotests/msdp_mesh_topo1/r2/pimd.conf b/tests/topotests/msdp_mesh_topo1/r2/pimd.conf index 9005263ed7..1719a17007 100644 --- a/tests/topotests/msdp_mesh_topo1/r2/pimd.conf +++ b/tests/topotests/msdp_mesh_topo1/r2/pimd.conf @@ -9,6 +9,8 @@ interface r2-eth1 ip pim ! ip pim rp 10.254.254.2 +ip pim join-prune-interval 5 +ip msdp timers 10 20 3 ip msdp mesh-group mg-1 source 10.254.254.2 ip msdp mesh-group mg-1 member 10.254.254.1 ip msdp mesh-group mg-1 member 10.254.254.3 diff --git a/tests/topotests/msdp_mesh_topo1/r3/pimd.conf b/tests/topotests/msdp_mesh_topo1/r3/pimd.conf index 30e1148561..2748a55d83 100644 --- a/tests/topotests/msdp_mesh_topo1/r3/pimd.conf +++ b/tests/topotests/msdp_mesh_topo1/r3/pimd.conf @@ -9,7 +9,9 @@ interface r3-eth1 ip pim ip igmp ! +ip pim join-prune-interval 5 ip pim rp 10.254.254.3 +ip msdp timers 10 20 3 ip msdp mesh-group mg-1 source 10.254.254.3 ip msdp mesh-group mg-1 member 10.254.254.1 ip msdp mesh-group mg-1 member 10.254.254.2 diff --git a/tests/topotests/msdp_mesh_topo1/test_msdp_mesh_topo1.py b/tests/topotests/msdp_mesh_topo1/test_msdp_mesh_topo1.py index 719ead091c..222fb28ade 100644 --- a/tests/topotests/msdp_mesh_topo1/test_msdp_mesh_topo1.py +++ b/tests/topotests/msdp_mesh_topo1/test_msdp_mesh_topo1.py @@ -223,7 +223,7 @@ def test_wait_msdp_convergence(): "show ip msdp peer json", {peer: {"state": "established", "saCount": sa_count}} ) - _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) + _, result = topotest.run_and_expect(test_func, None, count=40, wait=2) assertmsg = '"{}" MSDP connection failure'.format(router) assert result is None, assertmsg diff --git a/tests/topotests/msdp_topo1/__init__.py b/tests/topotests/msdp_topo1/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/topotests/msdp_topo1/__init__.py diff --git a/tests/topotests/msdp_topo1/r1/bgpd.conf b/tests/topotests/msdp_topo1/r1/bgpd.conf new file mode 100644 index 0000000000..01d8ddbdfa --- /dev/null +++ b/tests/topotests/msdp_topo1/r1/bgpd.conf @@ -0,0 +1,8 @@ +router bgp 65001 + no bgp ebgp-requires-policy + neighbor 192.168.0.2 remote-as 65002 + neighbor 192.168.1.2 remote-as 65003 + address-family ipv4 unicast + redistribute connected + exit-address-family +! diff --git a/tests/topotests/msdp_topo1/r1/pimd.conf b/tests/topotests/msdp_topo1/r1/pimd.conf new file mode 100644 index 0000000000..4274315271 --- /dev/null +++ b/tests/topotests/msdp_topo1/r1/pimd.conf @@ -0,0 +1,22 @@ +debug pim +debug pim zebra +! +interface lo + ip pim + ip pim use-source 10.254.254.1 +! +interface r1-eth0 + ip pim +! +interface r1-eth1 + ip pim +! +interface r1-eth2 + ip pim + ip igmp +! +ip msdp timers 10 20 3 +ip msdp peer 192.168.0.2 source 192.168.0.1 +ip msdp peer 192.168.1.2 source 192.168.1.1 +ip pim rp 10.254.254.1 +ip pim join-prune-interval 5 diff --git a/tests/topotests/msdp_topo1/r1/zebra.conf b/tests/topotests/msdp_topo1/r1/zebra.conf new file mode 100644 index 0000000000..fb6eabccdf --- /dev/null +++ b/tests/topotests/msdp_topo1/r1/zebra.conf @@ -0,0 +1,14 @@ +ip forwarding +! +interface r1-eth0 + ip address 192.168.0.1/24 +! +interface r1-eth1 + ip address 192.168.1.1/24 +! +interface r1-eth2 + ip address 192.168.10.1/24 +! +interface lo + ip address 10.254.254.1/32 +! diff --git a/tests/topotests/msdp_topo1/r2/bgpd.conf b/tests/topotests/msdp_topo1/r2/bgpd.conf new file mode 100644 index 0000000000..987bef40dd --- /dev/null +++ b/tests/topotests/msdp_topo1/r2/bgpd.conf @@ -0,0 +1,8 @@ +router bgp 65002 + no bgp ebgp-requires-policy + neighbor 192.168.0.1 remote-as 65001 + neighbor 192.168.2.2 remote-as 65004 + address-family ipv4 unicast + redistribute connected + exit-address-family +! diff --git a/tests/topotests/msdp_topo1/r2/pimd.conf b/tests/topotests/msdp_topo1/r2/pimd.conf new file mode 100644 index 0000000000..a4a69bf05c --- /dev/null +++ b/tests/topotests/msdp_topo1/r2/pimd.conf @@ -0,0 +1,18 @@ +debug pim +debug pim zebra +! +interface lo + ip pim + ip pim use-source 10.254.254.2 +! +interface r2-eth0 + ip pim +! +interface r2-eth1 + ip pim +! +ip msdp timers 10 20 3 +ip msdp peer 192.168.0.1 source 192.168.0.2 +ip msdp peer 192.168.2.2 source 192.168.2.1 +ip pim rp 10.254.254.2 +ip pim join-prune-interval 5 diff --git a/tests/topotests/msdp_topo1/r2/zebra.conf b/tests/topotests/msdp_topo1/r2/zebra.conf new file mode 100644 index 0000000000..527f7dd766 --- /dev/null +++ b/tests/topotests/msdp_topo1/r2/zebra.conf @@ -0,0 +1,11 @@ +ip forwarding +! +interface r2-eth0 + ip address 192.168.0.2/24 +! +interface r2-eth1 + ip address 192.168.2.1/24 +! +interface lo + ip address 10.254.254.2/32 +! diff --git a/tests/topotests/msdp_topo1/r3/bgpd.conf b/tests/topotests/msdp_topo1/r3/bgpd.conf new file mode 100644 index 0000000000..02d685b0e8 --- /dev/null +++ b/tests/topotests/msdp_topo1/r3/bgpd.conf @@ -0,0 +1,8 @@ +router bgp 65003 + no bgp ebgp-requires-policy + neighbor 192.168.1.1 remote-as 65001 + neighbor 192.168.3.2 remote-as 65004 + address-family ipv4 unicast + redistribute connected + exit-address-family +! diff --git a/tests/topotests/msdp_topo1/r3/pimd.conf b/tests/topotests/msdp_topo1/r3/pimd.conf new file mode 100644 index 0000000000..db94447c76 --- /dev/null +++ b/tests/topotests/msdp_topo1/r3/pimd.conf @@ -0,0 +1,18 @@ +debug pim +debug pim zebra +! +interface lo + ip pim + ip pim use-source 10.254.254.3 +! +interface r3-eth0 + ip pim +! +interface r3-eth1 + ip pim +! +ip msdp timers 10 20 3 +ip msdp peer 192.168.1.1 source 192.168.1.2 +ip msdp peer 192.168.3.2 source 192.168.3.1 +ip pim rp 10.254.254.3 +ip pim join-prune-interval 5 diff --git a/tests/topotests/msdp_topo1/r3/zebra.conf b/tests/topotests/msdp_topo1/r3/zebra.conf new file mode 100644 index 0000000000..688e752f42 --- /dev/null +++ b/tests/topotests/msdp_topo1/r3/zebra.conf @@ -0,0 +1,11 @@ +ip forwarding +! +interface r3-eth0 + ip address 192.168.1.2/24 +! +interface r3-eth1 + ip address 192.168.3.1/24 +! +interface lo + ip address 10.254.254.3/32 +! diff --git a/tests/topotests/msdp_topo1/r4/bgpd.conf b/tests/topotests/msdp_topo1/r4/bgpd.conf new file mode 100644 index 0000000000..633e8db245 --- /dev/null +++ b/tests/topotests/msdp_topo1/r4/bgpd.conf @@ -0,0 +1,9 @@ +router bgp 65004 + no bgp ebgp-requires-policy + neighbor 192.168.2.1 remote-as 65002 + neighbor 192.168.3.1 remote-as 65003 + address-family ipv4 unicast + redistribute connected + exit-address-family +! + diff --git a/tests/topotests/msdp_topo1/r4/pimd.conf b/tests/topotests/msdp_topo1/r4/pimd.conf new file mode 100644 index 0000000000..e9bb59054c --- /dev/null +++ b/tests/topotests/msdp_topo1/r4/pimd.conf @@ -0,0 +1,22 @@ +debug pim +debug pim zebra +! +interface lo + ip pim + ip pim use-source 10.254.254.4 +! +interface r4-eth0 + ip pim +! +interface r4-eth1 + ip pim +! +interface r4-eth2 + ip pim + ip igmp +! +ip msdp timers 10 20 3 +ip msdp peer 192.168.2.1 source 192.168.2.2 +ip msdp peer 192.168.3.1 source 192.168.3.2 +ip pim rp 10.254.254.4 +ip pim join-prune-interval 5 diff --git a/tests/topotests/msdp_topo1/r4/zebra.conf b/tests/topotests/msdp_topo1/r4/zebra.conf new file mode 100644 index 0000000000..1db8132256 --- /dev/null +++ b/tests/topotests/msdp_topo1/r4/zebra.conf @@ -0,0 +1,14 @@ +ip forwarding +! +interface r4-eth0 + ip address 192.168.2.2/24 +! +interface r4-eth1 + ip address 192.168.3.2/24 +! +interface r4-eth2 + ip address 192.168.4.1/24 +! +interface lo + ip address 10.254.254.4/32 +! diff --git a/tests/topotests/msdp_topo1/test_msdp_topo1.py b/tests/topotests/msdp_topo1/test_msdp_topo1.py new file mode 100755 index 0000000000..b860c04faa --- /dev/null +++ b/tests/topotests/msdp_topo1/test_msdp_topo1.py @@ -0,0 +1,499 @@ +#!/usr/bin/env python + +# +# test_msdp_topo1.py +# Part of NetDEF Topology Tests +# +# Copyright (c) 2021 by +# Network Device Education Foundation, Inc. ("NetDEF") +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +test_msdp_topo1.py: Test the FRR PIM MSDP peer. +""" + +import os +import sys +import json +import socket +import tempfile +from functools import partial +import pytest + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topolog import logger + +# Required to instantiate the topology builder class. +from mininet.topo import Topo + +pytestmark = [pytest.mark.bgpd, pytest.mark.pimd] + +# +# Test global variables: +# They are used to handle communicating with external application. +# +APP_SOCK_PATH = '/tmp/topotests/apps.sock' +HELPER_APP_PATH = os.path.join(CWD, "../lib/mcast-tester.py") +app_listener = None +app_clients = {} + + +def listen_to_applications(): + "Start listening socket to connect with applications." + # Remove old socket. + try: + os.unlink(APP_SOCK_PATH) + except OSError: + pass + + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0) + sock.bind(APP_SOCK_PATH) + sock.listen(10) + global app_listener + app_listener = sock + + +def accept_host(host): + "Accept connection from application running in hosts." + global app_listener, app_clients + conn = app_listener.accept() + app_clients[host] = { + 'fd': conn[0], + 'address': conn[1] + } + + +def close_applications(): + "Signal applications to stop and close all sockets." + global app_listener, app_clients + + # Close listening socket. + app_listener.close() + + # Remove old socket. + try: + os.unlink(APP_SOCK_PATH) + except OSError: + pass + + # Close all host connections. + for host in ["h1", "h2"]: + if app_clients.get(host) is None: + continue + app_clients[host]["fd"].close() + + +class MSDPTopo1(Topo): + "Test topology builder" + + def build(self, *_args, **_opts): + "Build function" + tgen = get_topogen(self) + + # Create 4 routers + for routern in range(1, 5): + tgen.add_router("r{}".format(routern)) + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) + + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r3"]) + + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r4"]) + + switch = tgen.add_switch("s4") + #switch.add_link(tgen.gears["r3"]) + switch.add_link(tgen.gears["r4"]) + + switch = tgen.add_switch("s5") + switch.add_link(tgen.gears["r4"]) + + # Create a host connected and direct at r4: + tgen.add_host("h1", "192.168.4.100/24", "192.168.4.1") + switch.add_link(tgen.gears["h1"]) + + # Create a host connected and direct at r1: + switch = tgen.add_switch("s6") + tgen.add_host("h2", "192.168.10.100/24", "192.168.10.1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["h2"]) + + +def setup_module(mod): + "Sets up the pytest environment" + tgen = Topogen(MSDPTopo1, mod.__name__) + tgen.start_topology() + + router_list = tgen.routers() + for rname, router in router_list.items(): + daemon_file = "{}/{}/zebra.conf".format(CWD, rname) + if os.path.isfile(daemon_file): + router.load_config(TopoRouter.RD_ZEBRA, daemon_file) + + daemon_file = "{}/{}/bgpd.conf".format(CWD, rname) + if os.path.isfile(daemon_file): + router.load_config(TopoRouter.RD_BGP, daemon_file) + + daemon_file = "{}/{}/pimd.conf".format(CWD, rname) + if os.path.isfile(daemon_file): + router.load_config(TopoRouter.RD_PIM, daemon_file) + + # Initialize all routers. + tgen.start_router() + + # Start applications socket. + listen_to_applications() + + +def teardown_module(mod): + "Teardown the pytest environment" + tgen = get_topogen() + close_applications() + tgen.stop_topology() + + +def test_bgp_convergence(): + "Wait for BGP protocol convergence" + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + logger.info("waiting for protocols to converge") + + def expect_loopback_route(router, iptype, route, proto): + "Wait until route is present on RIB for protocol." + logger.info("waiting route {} in {}".format(route, router)) + test_func = partial( + topotest.router_json_cmp, + tgen.gears[router], + "show {} route json".format(iptype), + {route: [{"protocol": proto}]}, + ) + _, result = topotest.run_and_expect(test_func, None, count=130, wait=1) + assertmsg = '"{}" convergence failure'.format(router) + assert result is None, assertmsg + + # Wait for R1 + expect_loopback_route("r1", "ip", "10.254.254.2/32", "bgp") + expect_loopback_route("r1", "ip", "10.254.254.3/32", "bgp") + expect_loopback_route("r1", "ip", "10.254.254.4/32", "bgp") + + # Wait for R2 + expect_loopback_route("r2", "ip", "10.254.254.1/32", "bgp") + expect_loopback_route("r2", "ip", "10.254.254.3/32", "bgp") + expect_loopback_route("r2", "ip", "10.254.254.4/32", "bgp") + + # Wait for R3 + expect_loopback_route("r3", "ip", "10.254.254.1/32", "bgp") + expect_loopback_route("r3", "ip", "10.254.254.2/32", "bgp") + expect_loopback_route("r3", "ip", "10.254.254.4/32", "bgp") + + # Wait for R4 + expect_loopback_route("r4", "ip", "10.254.254.1/32", "bgp") + expect_loopback_route("r4", "ip", "10.254.254.2/32", "bgp") + expect_loopback_route("r4", "ip", "10.254.254.3/32", "bgp") + + +def test_mroute_install(): + "Test that multicast routes propagated and installed" + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + tgen.gears["h1"].run("{} '{}' '{}' '{}' &".format( + HELPER_APP_PATH, APP_SOCK_PATH, '229.1.2.3', 'h1-eth0')) + accept_host("h1") + + tgen.gears["h2"].run("{} --send='0.7' '{}' '{}' '{}' &".format( + HELPER_APP_PATH, APP_SOCK_PATH, '229.1.2.3', 'h2-eth0')) + accept_host("h2") + + # + # Test R1 mroute + # + expect_1 = { + '229.1.2.3': { + '192.168.10.100': { + 'iif': 'r1-eth2', + 'flags': 'SFT', + 'oil': { + 'r1-eth0': { + 'source': '192.168.10.100', + 'group': '229.1.2.3' + }, + 'r1-eth1': None + } + } + } + } + # Create a deep copy of `expect_1`. + expect_2 = json.loads(json.dumps(expect_1)) + # The route will be either via R2 or R3. + expect_2['229.1.2.3']['192.168.10.100']['oil']['r1-eth0'] = None + expect_2['229.1.2.3']['192.168.10.100']['oil']['r1-eth1'] = { + 'source': '192.168.10.100', + 'group': '229.1.2.3' + } + + def test_r1_mroute(): + "Test r1 multicast routing table function" + out = tgen.gears['r1'].vtysh_cmd('show ip mroute json', isjson=True) + if topotest.json_cmp(out, expect_1) is None: + return None + return topotest.json_cmp(out, expect_2) + + logger.info('Waiting for R1 multicast routes') + _, val = topotest.run_and_expect(test_r1_mroute, None, count=55, wait=2) + assert val is None, 'multicast route convergence failure' + + # + # Test routers 2 and 3. + # + # NOTE: only one of the paths will get the multicast route. + # + expect_r2 = { + "229.1.2.3": { + "192.168.10.100": { + "iif": "r2-eth0", + "flags": "S", + "oil": { + "r2-eth1": { + "source": "192.168.10.100", + "group": "229.1.2.3", + } + } + } + } + } + expect_r3 = { + "229.1.2.3": { + "192.168.10.100": { + "iif": "r3-eth0", + "flags": "S", + "oil": { + "r3-eth1": { + "source": "192.168.10.100", + "group": "229.1.2.3", + } + } + } + } + } + + def test_r2_r3_mroute(): + "Test r2/r3 multicast routing table function" + r2_out = tgen.gears['r2'].vtysh_cmd('show ip mroute json', isjson=True) + r3_out = tgen.gears['r3'].vtysh_cmd('show ip mroute json', isjson=True) + + if topotest.json_cmp(r2_out, expect_r2) is not None: + return topotest.json_cmp(r3_out, expect_r3) + + return topotest.json_cmp(r2_out, expect_r2) + + logger.info('Waiting for R2 and R3 multicast routes') + _, val = topotest.run_and_expect(test_r2_r3_mroute, None, count=55, wait=2) + assert val is None, 'multicast route convergence failure' + + # + # Test router 4 + # + expect_4 = { + "229.1.2.3": { + "*": { + "iif": "lo", + "flags": "SC", + "oil": { + "pimreg": { + "source": "*", + "group": "229.1.2.3", + "inboundInterface": "lo", + "outboundInterface": "pimreg" + }, + "r4-eth2": { + "source": "*", + "group": "229.1.2.3", + "inboundInterface": "lo", + "outboundInterface": "r4-eth2" + } + } + }, + "192.168.10.100": { + "iif": "r4-eth0", + "flags": "ST", + "oil": { + "r4-eth2": { + "source": "192.168.10.100", + "group": "229.1.2.3", + "inboundInterface": "r4-eth0", + "outboundInterface": "r4-eth2", + } + } + } + } + } + + test_func = partial( + topotest.router_json_cmp, + tgen.gears['r4'], "show ip mroute json", expect_4, + ) + logger.info('Waiting for R4 multicast routes') + _, val = topotest.run_and_expect(test_func, None, count=55, wait=2) + assert val is None, 'multicast route convergence failure' + + +def test_msdp(): + """ + Test MSDP convergence. + + MSDP non meshed groups must propagate the whole SA database (not just + their own) to all peers because not all peers talk with each other. + + This setup leads to a potential loop that can be prevented by checking + the route's first AS in AS path: it must match the remote eBGP AS number. + """ + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + r1_expect = { + "192.168.0.2": { + "peer": "192.168.0.2", + "local": "192.168.0.1", + "state": "established" + }, + "192.168.1.2": { + "peer": "192.168.1.2", + "local": "192.168.1.1", + "state": "established" + } + } + r1_sa_expect = { + "229.1.2.3": { + "192.168.10.100": { + "source": "192.168.10.100", + "group": "229.1.2.3", + "rp": "-", + "local": "yes", + "sptSetup": "-" + } + } + } + r2_expect = { + "192.168.0.1": { + "peer": "192.168.0.1", + "local": "192.168.0.2", + "state": "established" + }, + "192.168.2.2": { + "peer": "192.168.2.2", + "local": "192.168.2.1", + "state": "established" + } + } + # Only R2 or R3 will get this SA. + r2_r3_sa_expect = { + "229.1.2.3": { + "192.168.10.100": { + "source": "192.168.10.100", + "group": "229.1.2.3", + "rp": "192.168.1.1", + "local": "no", + "sptSetup": "no", + } + } + } + r3_expect = { + "192.168.1.1": { + "peer": "192.168.1.1", + "local": "192.168.1.2", + "state": "established" + }, + #"192.169.3.2": { + # "peer": "192.168.3.2", + # "local": "192.168.3.1", + # "state": "established" + #} + } + r4_expect = { + "192.168.2.1": { + "peer": "192.168.2.1", + "local": "192.168.2.2", + "state": "established" + }, + #"192.168.3.1": { + # "peer": "192.168.3.1", + # "local": "192.168.3.2", + # "state": "established" + #} + } + r4_sa_expect = { + "229.1.2.3": { + "192.168.10.100": { + "source": "192.168.10.100", + "group": "229.1.2.3", + "rp": "192.168.1.1", + "local": "no", + "sptSetup": "yes" + } + } + } + + for router in [('r1', r1_expect, r1_sa_expect), + ('r2', r2_expect, r2_r3_sa_expect), + ('r3', r3_expect, r2_r3_sa_expect), + ('r4', r4_expect, r4_sa_expect)]: + test_func = partial( + topotest.router_json_cmp, + tgen.gears[router[0]], "show ip msdp peer json", router[1] + ) + logger.info('Waiting for {} msdp peer data'.format(router[0])) + _, val = topotest.run_and_expect(test_func, None, count=30, wait=1) + assert val is None, 'multicast route convergence failure' + + test_func = partial( + topotest.router_json_cmp, + tgen.gears[router[0]], "show ip msdp sa json", router[2] + ) + logger.info('Waiting for {} msdp SA data'.format(router[0])) + _, val = topotest.run_and_expect(test_func, None, count=30, wait=1) + assert val is None, 'multicast route convergence failure' + + +def test_memory_leak(): + "Run the memory leak test and report results." + tgen = get_topogen() + if not tgen.is_memleak_enabled(): + pytest.skip("Memory leak test/report is disabled") + + tgen.report_memory_leaks() + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/multicast_pim_bsm_topo1/test_mcast_pim_bsmp_01.py b/tests/topotests/multicast_pim_bsm_topo1/test_mcast_pim_bsmp_01.py index cd398a5111..827dde69ec 100644 --- a/tests/topotests/multicast_pim_bsm_topo1/test_mcast_pim_bsmp_01.py +++ b/tests/topotests/multicast_pim_bsm_topo1/test_mcast_pim_bsmp_01.py @@ -113,6 +113,9 @@ from lib.pim import ( from lib.topolog import logger from lib.topojson import build_topo_from_json, build_config_from_json +pytestmark = [pytest.mark.pimd, pytest.mark.staticd] + + # Reading the data from JSON File for topology creation jsonFile = "{}/mcast_pim_bsmp_01.json".format(CWD) try: diff --git a/tests/topotests/multicast_pim_bsm_topo2/test_mcast_pim_bsmp_02.py b/tests/topotests/multicast_pim_bsm_topo2/test_mcast_pim_bsmp_02.py index 60bd6de35d..98af4433ab 100644 --- a/tests/topotests/multicast_pim_bsm_topo2/test_mcast_pim_bsmp_02.py +++ b/tests/topotests/multicast_pim_bsm_topo2/test_mcast_pim_bsmp_02.py @@ -104,8 +104,7 @@ from lib.pim import ( from lib.topolog import logger from lib.topojson import build_topo_from_json, build_config_from_json - -pytestmark = [pytest.mark.pimd] +pytestmark = [pytest.mark.pimd, pytest.mark.staticd] # Reading the data from JSON File for topology creation diff --git a/tests/topotests/multicast_pim_sm_topo1/test_multicast_pim_sm_topo1.py b/tests/topotests/multicast_pim_sm_topo1/test_multicast_pim_sm_topo1.py index e55e30270d..99a6e5bacf 100755 --- a/tests/topotests/multicast_pim_sm_topo1/test_multicast_pim_sm_topo1.py +++ b/tests/topotests/multicast_pim_sm_topo1/test_multicast_pim_sm_topo1.py @@ -107,6 +107,9 @@ from lib.pim import ( from lib.topolog import logger from lib.topojson import build_topo_from_json, build_config_from_json +pytestmark = [pytest.mark.pimd] + + # Reading the data from JSON File for topology creation jsonFile = "{}/multicast_pim_sm_topo1.json".format(CWD) try: @@ -526,9 +529,14 @@ def test_multicast_data_traffic_static_RP_send_traffic_then_join_p0(request): {"dut": "r2", "src_address": source, "iif": "r2-f1-eth0", "oil": "r2-l1-eth2"}, {"dut": "f1", "src_address": source, "iif": "f1-i2-eth1", "oil": "f1-r2-eth3"}, ] + # On timeout change from default of 80 to 120: failures logs indicate times 90+ + # seconds for success on the 2nd entry in the above table. Using 100s here restores + # previous 80 retries with 2s wait if we assume .5s per vtysh/show ip mroute runtime + # (41 * (2 + .5)) == 102. for data in input_dict: result = verify_ip_mroutes( - tgen, data["dut"], data["src_address"], IGMP_JOIN, data["iif"], data["oil"] + tgen, data["dut"], data["src_address"], IGMP_JOIN, data["iif"], data["oil"], + retry_timeout=102 ) assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) diff --git a/tests/topotests/multicast_pim_sm_topo2/test_multicast_pim_sm_topo2.py b/tests/topotests/multicast_pim_sm_topo2/test_multicast_pim_sm_topo2.py index ad3b77b843..f30902c1b2 100755 --- a/tests/topotests/multicast_pim_sm_topo2/test_multicast_pim_sm_topo2.py +++ b/tests/topotests/multicast_pim_sm_topo2/test_multicast_pim_sm_topo2.py @@ -102,6 +102,9 @@ from lib.pim import ( from lib.topolog import logger from lib.topojson import build_topo_from_json, build_config_from_json +pytestmark = [pytest.mark.pimd] + + # Reading the data from JSON File for topology creation jsonFile = "{}/multicast_pim_sm_topo2.json".format(CWD) try: diff --git a/tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pim_static_rp.py b/tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pim_static_rp.py index d73e8dc9e8..736cb1659c 100755 --- a/tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pim_static_rp.py +++ b/tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pim_static_rp.py @@ -153,6 +153,9 @@ from lib.pim import ( clear_ip_mroute_verify, ) +pytestmark = [pytest.mark.pimd, pytest.mark.staticd] + + # Reading the data from JSON File for topology and configuration creation jsonFile = "{}/multicast_pim_static_rp.json".format(CWD) try: diff --git a/tests/topotests/nhrp_topo/test_nhrp_topo.py b/tests/topotests/nhrp_topo/test_nhrp_topo.py index 1687961f34..f59e3ae1b9 100644 --- a/tests/topotests/nhrp_topo/test_nhrp_topo.py +++ b/tests/topotests/nhrp_topo/test_nhrp_topo.py @@ -45,6 +45,8 @@ from lib.topolog import logger # Required to instantiate the topology builder class. from mininet.topo import Topo +pytestmark = [pytest.mark.nhrpd] + class NHRPTopo(Topo): "Test topology builder" @@ -115,7 +117,7 @@ def setup_module(mod): ) # Initialize all routers. - logger.info('Launching BGP, NHRP') + logger.info('Launching NHRP') for name in router_list: router = tgen.gears[name] router.start() diff --git a/tests/topotests/ospf6_topo1/test_ospf6_topo1.py b/tests/topotests/ospf6_topo1/test_ospf6_topo1.py index bbd18a57ff..8a6544734a 100644 --- a/tests/topotests/ospf6_topo1/test_ospf6_topo1.py +++ b/tests/topotests/ospf6_topo1/test_ospf6_topo1.py @@ -91,6 +91,9 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger import platform +pytestmark = [pytest.mark.ospfd] + + ##################################################### ## ## Network Topology Definition diff --git a/tests/topotests/ospf6_topo1_vrf/test_ospf6_topo1_vrf.py b/tests/topotests/ospf6_topo1_vrf/test_ospf6_topo1_vrf.py index b158099d9a..61a80cc9ec 100755 --- a/tests/topotests/ospf6_topo1_vrf/test_ospf6_topo1_vrf.py +++ b/tests/topotests/ospf6_topo1_vrf/test_ospf6_topo1_vrf.py @@ -94,6 +94,9 @@ from lib.topolog import logger from lib.topotest import iproute2_is_vrf_capable from lib.common_config import required_linux_kernel_version +pytestmark = [pytest.mark.ospfd] + + ##################################################### ## ## Network Topology Definition diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_asbr_summary_topo1.py b/tests/topotests/ospf_basic_functionality/test_ospf_asbr_summary_topo1.py index 41960ac79f..e61a6b5905 100644 --- a/tests/topotests/ospf_basic_functionality/test_ospf_asbr_summary_topo1.py +++ b/tests/topotests/ospf_basic_functionality/test_ospf_asbr_summary_topo1.py @@ -70,6 +70,9 @@ from lib.ospf import ( verify_ospf_summary, ) +pytestmark = [pytest.mark.ospfd, pytest.mark.staticd] + + # Global variables topo = None # Reading the data from JSON File for topology creation diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_asbr_summary_type7_lsa.py b/tests/topotests/ospf_basic_functionality/test_ospf_asbr_summary_type7_lsa.py index 393eb19a53..db177360b4 100644 --- a/tests/topotests/ospf_basic_functionality/test_ospf_asbr_summary_type7_lsa.py +++ b/tests/topotests/ospf_basic_functionality/test_ospf_asbr_summary_type7_lsa.py @@ -70,6 +70,9 @@ from lib.ospf import ( verify_ospf_summary, ) +pytestmark = [pytest.mark.ospfd, pytest.mark.staticd] + + # Global variables topo = None # Reading the data from JSON File for topology creation diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_authentication.py b/tests/topotests/ospf_basic_functionality/test_ospf_authentication.py index a7f2893eab..bdba8fd8e4 100644 --- a/tests/topotests/ospf_basic_functionality/test_ospf_authentication.py +++ b/tests/topotests/ospf_basic_functionality/test_ospf_authentication.py @@ -56,6 +56,9 @@ from lib.topojson import build_topo_from_json, build_config_from_json from lib.ospf import verify_ospf_neighbor, config_ospf_interface, clear_ospf from ipaddress import IPv4Address +pytestmark = [pytest.mark.ospfd] + + # Global variables topo = None # Reading the data from JSON File for topology creation diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_ecmp.py b/tests/topotests/ospf_basic_functionality/test_ospf_ecmp.py index 49ecaac9f7..5c57f8be25 100644 --- a/tests/topotests/ospf_basic_functionality/test_ospf_ecmp.py +++ b/tests/topotests/ospf_basic_functionality/test_ospf_ecmp.py @@ -68,6 +68,9 @@ from lib.ospf import ( redistribute_ospf, ) +pytestmark = [pytest.mark.ospfd, pytest.mark.staticd] + + topo = None # Reading the data from JSON File for topology creation diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_ecmp_lan.py b/tests/topotests/ospf_basic_functionality/test_ospf_ecmp_lan.py index 47c6c45e39..96f781c150 100644 --- a/tests/topotests/ospf_basic_functionality/test_ospf_ecmp_lan.py +++ b/tests/topotests/ospf_basic_functionality/test_ospf_ecmp_lan.py @@ -70,6 +70,9 @@ from lib.ospf import ( ) from ipaddress import IPv4Address +pytestmark = [pytest.mark.ospfd, pytest.mark.staticd] + + # Global variables topo = None # Reading the data from JSON File for topology creation diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_lan.py b/tests/topotests/ospf_basic_functionality/test_ospf_lan.py index d9b90a132a..c89a663380 100644 --- a/tests/topotests/ospf_basic_functionality/test_ospf_lan.py +++ b/tests/topotests/ospf_basic_functionality/test_ospf_lan.py @@ -71,6 +71,9 @@ from lib.ospf import ( ) from ipaddress import IPv4Address +pytestmark = [pytest.mark.ospfd, pytest.mark.staticd] + + # Global variables topo = None # Reading the data from JSON File for topology creation diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_nssa.py b/tests/topotests/ospf_basic_functionality/test_ospf_nssa.py index 3644bff3dc..0af83548b9 100644 --- a/tests/topotests/ospf_basic_functionality/test_ospf_nssa.py +++ b/tests/topotests/ospf_basic_functionality/test_ospf_nssa.py @@ -63,6 +63,9 @@ sys.path.append(os.path.join(CWD, "../lib/")) # pylint: disable=C0413 # Import topogen and topotest helpers +pytestmark = [pytest.mark.ospfd, pytest.mark.staticd] + + # Global variables topo = None # Reading the data from JSON File for topology creation diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_p2mp.py b/tests/topotests/ospf_basic_functionality/test_ospf_p2mp.py index be18ba5a78..0172f589c5 100644 --- a/tests/topotests/ospf_basic_functionality/test_ospf_p2mp.py +++ b/tests/topotests/ospf_basic_functionality/test_ospf_p2mp.py @@ -68,6 +68,9 @@ from lib.ospf import ( verify_ospf_database, ) +pytestmark = [pytest.mark.ospfd, pytest.mark.staticd] + + # Global variables topo = None diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_routemaps.py b/tests/topotests/ospf_basic_functionality/test_ospf_routemaps.py index 0848f6c94a..bc6c248ad2 100644 --- a/tests/topotests/ospf_basic_functionality/test_ospf_routemaps.py +++ b/tests/topotests/ospf_basic_functionality/test_ospf_routemaps.py @@ -65,6 +65,9 @@ from lib.ospf import ( redistribute_ospf, ) +pytestmark = [pytest.mark.ospfd, pytest.mark.staticd] + + # Global variables topo = None # Reading the data from JSON File for topology creation diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_rte_calc.py b/tests/topotests/ospf_basic_functionality/test_ospf_rte_calc.py index f17346d5b1..0e2fef4a22 100644 --- a/tests/topotests/ospf_basic_functionality/test_ospf_rte_calc.py +++ b/tests/topotests/ospf_basic_functionality/test_ospf_rte_calc.py @@ -66,6 +66,9 @@ from lib.ospf import ( verify_ospf_interface, ) +pytestmark = [pytest.mark.ospfd, pytest.mark.staticd] + + # Global variables topo = None diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_single_area.py b/tests/topotests/ospf_basic_functionality/test_ospf_single_area.py index e94680d974..a595bc0491 100644 --- a/tests/topotests/ospf_basic_functionality/test_ospf_single_area.py +++ b/tests/topotests/ospf_basic_functionality/test_ospf_single_area.py @@ -69,6 +69,7 @@ from lib.ospf import ( verify_ospf_database, ) +pytestmark = [pytest.mark.ospfd, pytest.mark.staticd] # Global variables topo = None diff --git a/tests/topotests/ospf_dual_stack/test_ospf_dual_stack.py b/tests/topotests/ospf_dual_stack/test_ospf_dual_stack.py index 5e7802fa04..b5f535cd06 100644 --- a/tests/topotests/ospf_dual_stack/test_ospf_dual_stack.py +++ b/tests/topotests/ospf_dual_stack/test_ospf_dual_stack.py @@ -42,6 +42,9 @@ from lib.ospf import ( verify_ospf_database, ) +pytestmark = [pytest.mark.ospfd, pytest.mark.staticd] + + # Global variables topo = None diff --git a/tests/topotests/ospf_gr_topo1/__init__.py b/tests/topotests/ospf_gr_topo1/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/__init__.py diff --git a/tests/topotests/ospf_gr_topo1/rt1/ospfd.conf b/tests/topotests/ospf_gr_topo1/rt1/ospfd.conf new file mode 100644 index 0000000000..9c04b74d35 --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/rt1/ospfd.conf @@ -0,0 +1,32 @@ +password 1 +hostname rt1 +log file ospfd.log +log commands +! +debug ospf zebra +debug ospf event +debug ospf lsa +debug ospf te +debug ospf packet all +debug ospf packet ls-update detail +debug ospf ism +debug ospf nsm +debug ospf nssa +debug ospf graceful-restart +! +interface lo + ip ospf area 1 +! +interface eth-rt2 + ip ospf network point-to-point + ip ospf area 1 + ip ospf hello-interval 3 + ip ospf dead-interval 9 +! +router ospf + router-id 1.1.1.1 + capability opaque + redistribute connected + graceful-restart grace-period 120 + graceful-restart helper-only +! diff --git a/tests/topotests/ospf_gr_topo1/rt1/show_ip_ospf_database.json b/tests/topotests/ospf_gr_topo1/rt1/show_ip_ospf_database.json new file mode 100644 index 0000000000..d01ac74c17 --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/rt1/show_ip_ospf_database.json @@ -0,0 +1,98 @@ +{ + "routerId":"1.1.1.1", + "areas":{ + "0.0.0.1":{ + "routerLinkStates":[ + { + "lsId":"1.1.1.1", + "advertisedRouter":"1.1.1.1", + "numOfRouterLinks":3 + }, + { + "lsId":"2.2.2.2", + "advertisedRouter":"2.2.2.2", + "numOfRouterLinks":2 + } + ], + "summaryLinkStates":[ + { + "lsId":"2.2.2.2", + "advertisedRouter":"2.2.2.2", + "summaryAddress":"2.2.2.2\/32" + }, + { + "lsId":"3.3.3.3", + "advertisedRouter":"2.2.2.2", + "summaryAddress":"3.3.3.3\/32" + }, + { + "lsId":"4.4.4.4", + "advertisedRouter":"2.2.2.2", + "summaryAddress":"4.4.4.4\/32" + }, + { + "lsId":"5.5.5.5", + "advertisedRouter":"2.2.2.2", + "summaryAddress":"5.5.5.5\/32" + }, + { + "lsId":"6.6.6.6", + "advertisedRouter":"2.2.2.2", + "summaryAddress":"6.6.6.6\/32" + }, + { + "lsId":"7.7.7.7", + "advertisedRouter":"2.2.2.2", + "summaryAddress":"7.7.7.7\/32" + }, + { + "lsId":"10.0.2.0", + "advertisedRouter":"2.2.2.2", + "summaryAddress":"10.0.2.0\/24" + }, + { + "lsId":"10.0.3.0", + "advertisedRouter":"2.2.2.2", + "summaryAddress":"10.0.3.0\/24" + }, + { + "lsId":"10.0.4.0", + "advertisedRouter":"2.2.2.2", + "summaryAddress":"10.0.4.0\/24" + }, + { + "lsId":"10.0.5.0", + "advertisedRouter":"2.2.2.2", + "summaryAddress":"10.0.5.0\/24" + }, + { + "lsId":"10.0.6.0", + "advertisedRouter":"2.2.2.2", + "summaryAddress":"10.0.6.0\/24" + } + ], + "asbrSummaryLinkStates":[ + { + "lsId":"6.6.6.6", + "advertisedRouter":"2.2.2.2" + } + ] + } + }, + "asExternalLinkStates":[ + { + "lsId":"172.16.1.0", + "advertisedRouter":"1.1.1.1", + "metricType":"E2", + "route":"172.16.1.0\/24", + "tag":0 + }, + { + "lsId":"192.168.1.0", + "advertisedRouter":"6.6.6.6", + "metricType":"E2", + "route":"192.168.1.0\/24", + "tag":0 + } + ] +} diff --git a/tests/topotests/ospf_gr_topo1/rt1/show_ip_ospf_neighbor.json b/tests/topotests/ospf_gr_topo1/rt1/show_ip_ospf_neighbor.json new file mode 100644 index 0000000000..ed290323a4 --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/rt1/show_ip_ospf_neighbor.json @@ -0,0 +1,11 @@ +{ + "neighbors":{ + "2.2.2.2":[ + { + "state":"Full\/DROther", + "address":"10.0.1.2", + "ifaceName":"eth-rt2:10.0.1.1" + } + ] + } +} diff --git a/tests/topotests/ospf_gr_topo1/rt1/show_ip_ospf_route.json b/tests/topotests/ospf_gr_topo1/rt1/show_ip_ospf_route.json new file mode 100644 index 0000000000..548ca1e2d1 --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/rt1/show_ip_ospf_route.json @@ -0,0 +1,180 @@ +{ + "1.1.1.1\/32":{ + "routeType":"N", + "cost":0, + "area":"0.0.0.1", + "nexthops":[ + { + "ip":" ", + "directly attached to":"lo" + } + ] + }, + "2.2.2.2\/32":{ + "routeType":"N IA", + "cost":10, + "area":"0.0.0.1", + "nexthops":[ + { + "ip":"10.0.1.2", + "via":"eth-rt2" + } + ] + }, + "3.3.3.3\/32":{ + "routeType":"N IA", + "cost":20, + "area":"0.0.0.1", + "nexthops":[ + { + "ip":"10.0.1.2", + "via":"eth-rt2" + } + ] + }, + "4.4.4.4\/32":{ + "routeType":"N IA", + "cost":30, + "area":"0.0.0.1", + "nexthops":[ + { + "ip":"10.0.1.2", + "via":"eth-rt2" + } + ] + }, + "5.5.5.5\/32":{ + "routeType":"N IA", + "cost":40, + "area":"0.0.0.1", + "nexthops":[ + { + "ip":"10.0.1.2", + "via":"eth-rt2" + } + ] + }, + "6.6.6.6\/32":{ + "routeType":"N IA", + "cost":30, + "area":"0.0.0.1", + "nexthops":[ + { + "ip":"10.0.1.2", + "via":"eth-rt2" + } + ] + }, + "7.7.7.7\/32":{ + "routeType":"N IA", + "cost":40, + "area":"0.0.0.1", + "nexthops":[ + { + "ip":"10.0.1.2", + "via":"eth-rt2" + } + ] + }, + "10.0.1.0\/24":{ + "routeType":"N", + "cost":10, + "area":"0.0.0.1", + "nexthops":[ + { + "ip":" ", + "directly attached to":"eth-rt2" + } + ] + }, + "10.0.2.0\/24":{ + "routeType":"N IA", + "cost":20, + "area":"0.0.0.1", + "nexthops":[ + { + "ip":"10.0.1.2", + "via":"eth-rt2" + } + ] + }, + "10.0.3.0\/24":{ + "routeType":"N IA", + "cost":30, + "area":"0.0.0.1", + "nexthops":[ + { + "ip":"10.0.1.2", + "via":"eth-rt2" + } + ] + }, + "10.0.4.0\/24":{ + "routeType":"N IA", + "cost":30, + "area":"0.0.0.1", + "nexthops":[ + { + "ip":"10.0.1.2", + "via":"eth-rt2" + } + ] + }, + "10.0.5.0\/24":{ + "routeType":"N IA", + "cost":40, + "area":"0.0.0.1", + "nexthops":[ + { + "ip":"10.0.1.2", + "via":"eth-rt2" + } + ] + }, + "10.0.6.0\/24":{ + "routeType":"N IA", + "cost":40, + "area":"0.0.0.1", + "nexthops":[ + { + "ip":"10.0.1.2", + "via":"eth-rt2" + } + ] + }, + "2.2.2.2":{ + "routeType":"R ", + "cost":10, + "area":"0.0.0.1", + "routerType":"abr", + "nexthops":[ + { + "ip":"10.0.1.2", + "via":"eth-rt2" + } + ] + }, + "6.6.6.6":{ + "routeType":"R ", + "cost":30, + "area":"0.0.0.1", + "IA":true, + "routerType":"asbr", + "nexthops":[ + { + "ip":"10.0.1.2", + "via":"eth-rt2" + } + ] + }, + "192.168.1.0\/24":{ + "routeType":"N E2", + "cost":40, + "nexthops":[ + { + "ip":"10.0.1.2", + "via":"eth-rt2" + } + ] + } +} diff --git a/tests/topotests/ospf_gr_topo1/rt1/show_ip_route.json b/tests/topotests/ospf_gr_topo1/rt1/show_ip_route.json new file mode 100644 index 0000000000..3dce1eee3e --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/rt1/show_ip_route.json @@ -0,0 +1,210 @@ +{ + "1.1.1.1\/32":[ + { + "prefix":"1.1.1.1\/32", + "protocol":"ospf", + "distance":110, + "metric":0, + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"lo" + } + ] + } + ], + "2.2.2.2\/32":[ + { + "prefix":"2.2.2.2\/32", + "protocol":"ospf", + "distance":110, + "metric":10, + "nexthops":[ + { + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-rt2" + } + ] + } + ], + "3.3.3.3\/32":[ + { + "prefix":"3.3.3.3\/32", + "protocol":"ospf", + "distance":110, + "metric":20, + "nexthops":[ + { + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-rt2" + } + ] + } + ], + "4.4.4.4\/32":[ + { + "prefix":"4.4.4.4\/32", + "protocol":"ospf", + "distance":110, + "metric":30, + "nexthops":[ + { + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-rt2" + } + ] + } + ], + "5.5.5.5\/32":[ + { + "prefix":"5.5.5.5\/32", + "protocol":"ospf", + "distance":110, + "metric":40, + "nexthops":[ + { + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-rt2" + } + ] + } + ], + "6.6.6.6\/32":[ + { + "prefix":"6.6.6.6\/32", + "protocol":"ospf", + "distance":110, + "metric":30, + "nexthops":[ + { + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-rt2" + } + ] + } + ], + "7.7.7.7\/32":[ + { + "prefix":"7.7.7.7\/32", + "protocol":"ospf", + "distance":110, + "metric":40, + "nexthops":[ + { + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-rt2" + } + ] + } + ], + "10.0.1.0\/24":[ + { + "prefix":"10.0.1.0\/24", + "protocol":"ospf", + "distance":110, + "metric":10, + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"eth-rt2" + } + ] + } + ], + "10.0.2.0\/24":[ + { + "prefix":"10.0.2.0\/24", + "protocol":"ospf", + "distance":110, + "metric":20, + "nexthops":[ + { + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-rt2" + } + ] + } + ], + "10.0.3.0\/24":[ + { + "prefix":"10.0.3.0\/24", + "protocol":"ospf", + "distance":110, + "metric":30, + "nexthops":[ + { + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-rt2" + } + ] + } + ], + "10.0.4.0\/24":[ + { + "prefix":"10.0.4.0\/24", + "protocol":"ospf", + "distance":110, + "metric":30, + "nexthops":[ + { + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-rt2" + } + ] + } + ], + "10.0.5.0\/24":[ + { + "prefix":"10.0.5.0\/24", + "protocol":"ospf", + "distance":110, + "metric":40, + "nexthops":[ + { + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-rt2" + } + ] + } + ], + "10.0.6.0\/24":[ + { + "prefix":"10.0.6.0\/24", + "protocol":"ospf", + "distance":110, + "metric":40, + "nexthops":[ + { + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-rt2" + } + ] + } + ], + "192.168.1.0\/24":[ + { + "prefix":"192.168.1.0\/24", + "protocol":"ospf", + "distance":110, + "metric":20, + "nexthops":[ + { + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-rt2" + } + ] + } + ] +} diff --git a/tests/topotests/ospf_gr_topo1/rt1/zebra.conf b/tests/topotests/ospf_gr_topo1/rt1/zebra.conf new file mode 100644 index 0000000000..183cd3df48 --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/rt1/zebra.conf @@ -0,0 +1,23 @@ +password 1 +hostname rt1 +log file zebra.log +log commands +! +debug zebra event +debug zebra packet +debug zebra rib +debug zebra kernel +! +interface lo + ip address 1.1.1.1/32 +! +interface stub1 + ip address 172.16.1.1/24 +! +interface eth-rt2 + ip address 10.0.1.1/24 +! +ip forwarding +! +line vty +! diff --git a/tests/topotests/ospf_gr_topo1/rt2/ospfd.conf b/tests/topotests/ospf_gr_topo1/rt2/ospfd.conf new file mode 100644 index 0000000000..922db8c8cc --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/rt2/ospfd.conf @@ -0,0 +1,37 @@ +password 1 +hostname rt2 +log file ospfd.log +log commands +! +debug ospf zebra +debug ospf event +debug ospf lsa +debug ospf te +debug ospf packet all +debug ospf packet ls-update detail +debug ospf ism +debug ospf nsm +debug ospf nssa +debug ospf graceful-restart +! +interface lo + ip ospf area 0 +! +interface eth-rt1 + ip ospf network point-to-point + ip ospf area 1 + ip ospf hello-interval 3 + ip ospf dead-interval 9 +! +interface eth-rt3 + ip ospf network point-to-point + ip ospf area 0 + ip ospf hello-interval 3 + ip ospf dead-interval 9 +! +router ospf + router-id 2.2.2.2 + capability opaque + graceful-restart grace-period 120 + graceful-restart helper-only +! diff --git a/tests/topotests/ospf_gr_topo1/rt2/show_ip_ospf_database.json b/tests/topotests/ospf_gr_topo1/rt2/show_ip_ospf_database.json new file mode 100644 index 0000000000..40c3e82d6a --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/rt2/show_ip_ospf_database.json @@ -0,0 +1,160 @@ +{ + "routerId":"2.2.2.2", + "areas":{ + "0.0.0.0":{ + "routerLinkStates":[ + { + "lsId":"2.2.2.2", + "advertisedRouter":"2.2.2.2", + "numOfRouterLinks":3 + }, + { + "lsId":"3.3.3.3", + "advertisedRouter":"3.3.3.3", + "numOfRouterLinks":7 + }, + { + "lsId":"4.4.4.4", + "advertisedRouter":"4.4.4.4", + "numOfRouterLinks":3 + }, + { + "lsId":"6.6.6.6", + "advertisedRouter":"6.6.6.6", + "numOfRouterLinks":3 + } + ], + "summaryLinkStates":[ + { + "lsId":"1.1.1.1", + "advertisedRouter":"2.2.2.2", + "summaryAddress":"1.1.1.1\/32" + }, + { + "lsId":"5.5.5.5", + "advertisedRouter":"4.4.4.4", + "summaryAddress":"5.5.5.5\/32" + }, + { + "lsId":"7.7.7.7", + "advertisedRouter":"6.6.6.6", + "summaryAddress":"7.7.7.7\/32" + }, + { + "lsId":"10.0.1.0", + "advertisedRouter":"2.2.2.2", + "summaryAddress":"10.0.1.0\/24" + }, + { + "lsId":"10.0.5.0", + "advertisedRouter":"4.4.4.4", + "summaryAddress":"10.0.5.0\/24" + }, + { + "lsId":"10.0.6.0", + "advertisedRouter":"6.6.6.6", + "summaryAddress":"10.0.6.0\/24" + } + ], + "asbrSummaryLinkStates":[ + { + "lsId":"1.1.1.1", + "advertisedRouter":"2.2.2.2" + } + ] + }, + "0.0.0.1":{ + "routerLinkStates":[ + { + "lsId":"1.1.1.1", + "advertisedRouter":"1.1.1.1", + "numOfRouterLinks":3 + }, + { + "lsId":"2.2.2.2", + "advertisedRouter":"2.2.2.2", + "numOfRouterLinks":2 + } + ], + "summaryLinkStates":[ + { + "lsId":"2.2.2.2", + "advertisedRouter":"2.2.2.2", + "summaryAddress":"2.2.2.2\/32" + }, + { + "lsId":"3.3.3.3", + "advertisedRouter":"2.2.2.2", + "summaryAddress":"3.3.3.3\/32" + }, + { + "lsId":"4.4.4.4", + "advertisedRouter":"2.2.2.2", + "summaryAddress":"4.4.4.4\/32" + }, + { + "lsId":"5.5.5.5", + "advertisedRouter":"2.2.2.2", + "summaryAddress":"5.5.5.5\/32" + }, + { + "lsId":"6.6.6.6", + "advertisedRouter":"2.2.2.2", + "summaryAddress":"6.6.6.6\/32" + }, + { + "lsId":"7.7.7.7", + "advertisedRouter":"2.2.2.2", + "summaryAddress":"7.7.7.7\/32" + }, + { + "lsId":"10.0.2.0", + "advertisedRouter":"2.2.2.2", + "summaryAddress":"10.0.2.0\/24" + }, + { + "lsId":"10.0.3.0", + "advertisedRouter":"2.2.2.2", + "summaryAddress":"10.0.3.0\/24" + }, + { + "lsId":"10.0.4.0", + "advertisedRouter":"2.2.2.2", + "summaryAddress":"10.0.4.0\/24" + }, + { + "lsId":"10.0.5.0", + "advertisedRouter":"2.2.2.2", + "summaryAddress":"10.0.5.0\/24" + }, + { + "lsId":"10.0.6.0", + "advertisedRouter":"2.2.2.2", + "summaryAddress":"10.0.6.0\/24" + } + ], + "asbrSummaryLinkStates":[ + { + "lsId":"6.6.6.6", + "advertisedRouter":"2.2.2.2" + } + ] + } + }, + "asExternalLinkStates":[ + { + "lsId":"172.16.1.0", + "advertisedRouter":"1.1.1.1", + "metricType":"E2", + "route":"172.16.1.0\/24", + "tag":0 + }, + { + "lsId":"192.168.1.0", + "advertisedRouter":"6.6.6.6", + "metricType":"E2", + "route":"192.168.1.0\/24", + "tag":0 + } + ] +} diff --git a/tests/topotests/ospf_gr_topo1/rt2/show_ip_ospf_neighbor.json b/tests/topotests/ospf_gr_topo1/rt2/show_ip_ospf_neighbor.json new file mode 100644 index 0000000000..4fe92b0b98 --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/rt2/show_ip_ospf_neighbor.json @@ -0,0 +1,18 @@ +{ + "neighbors":{ + "1.1.1.1":[ + { + "state":"Full\/DROther", + "address":"10.0.1.1", + "ifaceName":"eth-rt1:10.0.1.2" + } + ], + "3.3.3.3":[ + { + "state":"Full\/DROther", + "address":"10.0.2.3", + "ifaceName":"eth-rt3:10.0.2.2" + } + ] + } +} diff --git a/tests/topotests/ospf_gr_topo1/rt2/show_ip_ospf_route.json b/tests/topotests/ospf_gr_topo1/rt2/show_ip_ospf_route.json new file mode 100644 index 0000000000..4accb2ba4a --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/rt2/show_ip_ospf_route.json @@ -0,0 +1,201 @@ +{ + "1.1.1.1\/32":{ + "routeType":"N", + "cost":10, + "area":"0.0.0.1", + "nexthops":[ + { + "ip":"10.0.1.1", + "via":"eth-rt1" + } + ] + }, + "2.2.2.2\/32":{ + "routeType":"N", + "cost":0, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":" ", + "directly attached to":"lo" + } + ] + }, + "3.3.3.3\/32":{ + "routeType":"N", + "cost":10, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":"10.0.2.3", + "via":"eth-rt3" + } + ] + }, + "4.4.4.4\/32":{ + "routeType":"N", + "cost":20, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":"10.0.2.3", + "via":"eth-rt3" + } + ] + }, + "5.5.5.5\/32":{ + "routeType":"N IA", + "cost":30, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":"10.0.2.3", + "via":"eth-rt3" + } + ] + }, + "6.6.6.6\/32":{ + "routeType":"N", + "cost":20, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":"10.0.2.3", + "via":"eth-rt3" + } + ] + }, + "7.7.7.7\/32":{ + "routeType":"N IA", + "cost":30, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":"10.0.2.3", + "via":"eth-rt3" + } + ] + }, + "10.0.1.0\/24":{ + "routeType":"N", + "cost":10, + "area":"0.0.0.1", + "nexthops":[ + { + "ip":" ", + "directly attached to":"eth-rt1" + } + ] + }, + "10.0.2.0\/24":{ + "routeType":"N", + "cost":10, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":" ", + "directly attached to":"eth-rt3" + } + ] + }, + "10.0.3.0\/24":{ + "routeType":"N", + "cost":20, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":"10.0.2.3", + "via":"eth-rt3" + } + ] + }, + "10.0.4.0\/24":{ + "routeType":"N", + "cost":20, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":"10.0.2.3", + "via":"eth-rt3" + } + ] + }, + "10.0.5.0\/24":{ + "routeType":"N IA", + "cost":30, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":"10.0.2.3", + "via":"eth-rt3" + } + ] + }, + "10.0.6.0\/24":{ + "routeType":"N IA", + "cost":30, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":"10.0.2.3", + "via":"eth-rt3" + } + ] + }, + "1.1.1.1":{ + "routeType":"R ", + "cost":10, + "area":"0.0.0.1", + "routerType":"asbr", + "nexthops":[ + { + "ip":"10.0.1.1", + "via":"eth-rt1" + } + ] + }, + "4.4.4.4":{ + "routeType":"R ", + "cost":20, + "area":"0.0.0.0", + "routerType":"abr", + "nexthops":[ + { + "ip":"10.0.2.3", + "via":"eth-rt3" + } + ] + }, + "6.6.6.6":{ + "routeType":"R ", + "cost":20, + "area":"0.0.0.0", + "routerType":"abr", + "nexthops":[ + { + "ip":"10.0.2.3", + "via":"eth-rt3" + } + ] + }, + "172.16.1.0\/24":{ + "routeType":"N E2", + "cost":10, + "nexthops":[ + { + "ip":"10.0.1.1", + "via":"eth-rt1" + } + ] + }, + "192.168.1.0\/24":{ + "routeType":"N E2", + "cost":30, + "nexthops":[ + { + "ip":"10.0.2.3", + "via":"eth-rt3" + } + ] + } +} diff --git a/tests/topotests/ospf_gr_topo1/rt2/show_ip_route.json b/tests/topotests/ospf_gr_topo1/rt2/show_ip_route.json new file mode 100644 index 0000000000..8989a45765 --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/rt2/show_ip_route.json @@ -0,0 +1,224 @@ +{ + "1.1.1.1\/32":[ + { + "prefix":"1.1.1.1\/32", + "protocol":"ospf", + "distance":110, + "metric":10, + "nexthops":[ + { + "ip":"10.0.1.1", + "afi":"ipv4", + "interfaceName":"eth-rt1" + } + ] + } + ], + "2.2.2.2\/32":[ + { + "prefix":"2.2.2.2\/32", + "protocol":"ospf", + "distance":110, + "metric":0, + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"lo" + } + ] + } + ], + "3.3.3.3\/32":[ + { + "prefix":"3.3.3.3\/32", + "protocol":"ospf", + "distance":110, + "metric":10, + "nexthops":[ + { + "ip":"10.0.2.3", + "afi":"ipv4", + "interfaceName":"eth-rt3" + } + ] + } + ], + "4.4.4.4\/32":[ + { + "prefix":"4.4.4.4\/32", + "protocol":"ospf", + "distance":110, + "metric":20, + "nexthops":[ + { + "ip":"10.0.2.3", + "afi":"ipv4", + "interfaceName":"eth-rt3" + } + ] + } + ], + "5.5.5.5\/32":[ + { + "prefix":"5.5.5.5\/32", + "protocol":"ospf", + "distance":110, + "metric":30, + "nexthops":[ + { + "ip":"10.0.2.3", + "afi":"ipv4", + "interfaceName":"eth-rt3" + } + ] + } + ], + "6.6.6.6\/32":[ + { + "prefix":"6.6.6.6\/32", + "protocol":"ospf", + "distance":110, + "metric":20, + "nexthops":[ + { + "ip":"10.0.2.3", + "afi":"ipv4", + "interfaceName":"eth-rt3" + } + ] + } + ], + "7.7.7.7\/32":[ + { + "prefix":"7.7.7.7\/32", + "protocol":"ospf", + "distance":110, + "metric":30, + "nexthops":[ + { + "ip":"10.0.2.3", + "afi":"ipv4", + "interfaceName":"eth-rt3" + } + ] + } + ], + "10.0.1.0\/24":[ + { + "prefix":"10.0.1.0\/24", + "protocol":"ospf", + "distance":110, + "metric":10, + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"eth-rt1" + } + ] + } + ], + "10.0.2.0\/24":[ + { + "prefix":"10.0.2.0\/24", + "protocol":"ospf", + "distance":110, + "metric":10, + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"eth-rt3" + } + ] + } + ], + "10.0.3.0\/24":[ + { + "prefix":"10.0.3.0\/24", + "protocol":"ospf", + "distance":110, + "metric":20, + "nexthops":[ + { + "ip":"10.0.2.3", + "afi":"ipv4", + "interfaceName":"eth-rt3" + } + ] + } + ], + "10.0.4.0\/24":[ + { + "prefix":"10.0.4.0\/24", + "protocol":"ospf", + "distance":110, + "metric":20, + "nexthops":[ + { + "ip":"10.0.2.3", + "afi":"ipv4", + "interfaceName":"eth-rt3" + } + ] + } + ], + "10.0.5.0\/24":[ + { + "prefix":"10.0.5.0\/24", + "protocol":"ospf", + "distance":110, + "metric":30, + "nexthops":[ + { + "ip":"10.0.2.3", + "afi":"ipv4", + "interfaceName":"eth-rt3" + } + ] + } + ], + "10.0.6.0\/24":[ + { + "prefix":"10.0.6.0\/24", + "protocol":"ospf", + "distance":110, + "metric":30, + "nexthops":[ + { + "ip":"10.0.2.3", + "afi":"ipv4", + "interfaceName":"eth-rt3" + } + ] + } + ], + "172.16.1.0\/24":[ + { + "prefix":"172.16.1.0\/24", + "protocol":"ospf", + "distance":110, + "metric":20, + "nexthops":[ + { + "ip":"10.0.1.1", + "afi":"ipv4", + "interfaceName":"eth-rt1" + } + ] + } + ], + "192.168.1.0\/24":[ + { + "prefix":"192.168.1.0\/24", + "protocol":"ospf", + "distance":110, + "metric":20, + "nexthops":[ + { + "ip":"10.0.2.3", + "afi":"ipv4", + "interfaceName":"eth-rt3" + } + ] + } + ] +} diff --git a/tests/topotests/ospf_gr_topo1/rt2/zebra.conf b/tests/topotests/ospf_gr_topo1/rt2/zebra.conf new file mode 100644 index 0000000000..8bde98ad44 --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/rt2/zebra.conf @@ -0,0 +1,23 @@ +password 1 +hostname rt2 +log file zebra.log +log commands +! +debug zebra event +debug zebra packet +debug zebra rib +debug zebra kernel +! +interface lo + ip address 2.2.2.2/32 +! +interface eth-rt1 + ip address 10.0.1.2/24 +! +interface eth-rt3 + ip address 10.0.2.2/24 +! +ip forwarding +! +line vty +! diff --git a/tests/topotests/ospf_gr_topo1/rt3/ospfd.conf b/tests/topotests/ospf_gr_topo1/rt3/ospfd.conf new file mode 100644 index 0000000000..51e48f13da --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/rt3/ospfd.conf @@ -0,0 +1,43 @@ +password 1 +hostname rt3 +log file ospfd.log +log commands +! +debug ospf zebra +debug ospf event +debug ospf lsa +debug ospf te +debug ospf packet all +debug ospf packet ls-update detail +debug ospf ism +debug ospf nsm +debug ospf nssa +debug ospf graceful-restart +! +interface lo + ip ospf area 0 +! +interface eth-rt2 + ip ospf network point-to-point + ip ospf area 0 + ip ospf hello-interval 3 + ip ospf dead-interval 9 +! +interface eth-rt4 + ip ospf network point-to-point + ip ospf area 0 + ip ospf hello-interval 3 + ip ospf dead-interval 9 +! +interface eth-rt6 + ip ospf network point-to-point + ip ospf area 0 + ip ospf hello-interval 3 + ip ospf dead-interval 9 +! +router ospf + router-id 3.3.3.3 + capability opaque + graceful-restart grace-period 120 + graceful-restart helper-only +! diff --git a/tests/topotests/ospf_gr_topo1/rt3/show_ip_ospf_database.json b/tests/topotests/ospf_gr_topo1/rt3/show_ip_ospf_database.json new file mode 100644 index 0000000000..1fc5b546e4 --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/rt3/show_ip_ospf_database.json @@ -0,0 +1,83 @@ +{ + "routerId":"3.3.3.3", + "areas":{ + "0.0.0.0":{ + "routerLinkStates":[ + { + "lsId":"2.2.2.2", + "advertisedRouter":"2.2.2.2", + "numOfRouterLinks":3 + }, + { + "lsId":"3.3.3.3", + "advertisedRouter":"3.3.3.3", + "numOfRouterLinks":7 + }, + { + "lsId":"4.4.4.4", + "advertisedRouter":"4.4.4.4", + "numOfRouterLinks":3 + }, + { + "lsId":"6.6.6.6", + "advertisedRouter":"6.6.6.6", + "numOfRouterLinks":3 + } + ], + "summaryLinkStates":[ + { + "lsId":"1.1.1.1", + "advertisedRouter":"2.2.2.2", + "summaryAddress":"1.1.1.1\/32" + }, + { + "lsId":"5.5.5.5", + "advertisedRouter":"4.4.4.4", + "summaryAddress":"5.5.5.5\/32" + }, + { + "lsId":"7.7.7.7", + "advertisedRouter":"6.6.6.6", + "summaryAddress":"7.7.7.7\/32" + }, + { + "lsId":"10.0.1.0", + "advertisedRouter":"2.2.2.2", + "summaryAddress":"10.0.1.0\/24" + }, + { + "lsId":"10.0.5.0", + "advertisedRouter":"4.4.4.4", + "summaryAddress":"10.0.5.0\/24" + }, + { + "lsId":"10.0.6.0", + "advertisedRouter":"6.6.6.6", + "summaryAddress":"10.0.6.0\/24" + } + ], + "asbrSummaryLinkStates":[ + { + "lsId":"1.1.1.1", + "advertisedRouter":"2.2.2.2" + } + ] + } + }, + "asExternalLinkStates":[ + { + "lsId":"172.16.1.0", + "advertisedRouter":"1.1.1.1", + "metricType":"E2", + "route":"172.16.1.0\/24", + "tag":0 + }, + { + "lsId":"192.168.1.0", + "advertisedRouter":"6.6.6.6", + "metricType":"E2", + "route":"192.168.1.0\/24", + "tag":0 + } + ] +} diff --git a/tests/topotests/ospf_gr_topo1/rt3/show_ip_ospf_neighbor.json b/tests/topotests/ospf_gr_topo1/rt3/show_ip_ospf_neighbor.json new file mode 100644 index 0000000000..e3c36ab9a3 --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/rt3/show_ip_ospf_neighbor.json @@ -0,0 +1,25 @@ +{ + "neighbors":{ + "2.2.2.2":[ + { + "state":"Full\/DROther", + "address":"10.0.2.2", + "ifaceName":"eth-rt2:10.0.2.3" + } + ], + "4.4.4.4":[ + { + "state":"Full\/DROther", + "address":"10.0.3.4", + "ifaceName":"eth-rt4:10.0.3.3" + } + ], + "6.6.6.6":[ + { + "state":"Full\/DROther", + "address":"10.0.4.6", + "ifaceName":"eth-rt6:10.0.4.3" + } + ] + } +} diff --git a/tests/topotests/ospf_gr_topo1/rt3/show_ip_ospf_route.json b/tests/topotests/ospf_gr_topo1/rt3/show_ip_ospf_route.json new file mode 100644 index 0000000000..b2f37e25a1 --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/rt3/show_ip_ospf_route.json @@ -0,0 +1,214 @@ +{ + "1.1.1.1\/32":{ + "routeType":"N IA", + "cost":20, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":"10.0.2.2", + "via":"eth-rt2" + } + ] + }, + "2.2.2.2\/32":{ + "routeType":"N", + "cost":10, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":"10.0.2.2", + "via":"eth-rt2" + } + ] + }, + "3.3.3.3\/32":{ + "routeType":"N", + "cost":0, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":" ", + "directly attached to":"lo" + } + ] + }, + "4.4.4.4\/32":{ + "routeType":"N", + "cost":10, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":"10.0.3.4", + "via":"eth-rt4" + } + ] + }, + "5.5.5.5\/32":{ + "routeType":"N IA", + "cost":20, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":"10.0.3.4", + "via":"eth-rt4" + } + ] + }, + "6.6.6.6\/32":{ + "routeType":"N", + "cost":10, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":"10.0.4.6", + "via":"eth-rt6" + } + ] + }, + "7.7.7.7\/32":{ + "routeType":"N IA", + "cost":20, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":"10.0.4.6", + "via":"eth-rt6" + } + ] + }, + "10.0.1.0\/24":{ + "routeType":"N IA", + "cost":20, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":"10.0.2.2", + "via":"eth-rt2" + } + ] + }, + "10.0.2.0\/24":{ + "routeType":"N", + "cost":10, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":" ", + "directly attached to":"eth-rt2" + } + ] + }, + "10.0.3.0\/24":{ + "routeType":"N", + "cost":10, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":" ", + "directly attached to":"eth-rt4" + } + ] + }, + "10.0.4.0\/24":{ + "routeType":"N", + "cost":10, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":" ", + "directly attached to":"eth-rt6" + } + ] + }, + "10.0.5.0\/24":{ + "routeType":"N IA", + "cost":20, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":"10.0.3.4", + "via":"eth-rt4" + } + ] + }, + "10.0.6.0\/24":{ + "routeType":"N IA", + "cost":20, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":"10.0.4.6", + "via":"eth-rt6" + } + ] + }, + "1.1.1.1":{ + "routeType":"R ", + "cost":20, + "area":"0.0.0.0", + "IA":true, + "routerType":"asbr", + "nexthops":[ + { + "ip":"10.0.2.2", + "via":"eth-rt2" + } + ] + }, + "2.2.2.2":{ + "routeType":"R ", + "cost":10, + "area":"0.0.0.0", + "routerType":"abr", + "nexthops":[ + { + "ip":"10.0.2.2", + "via":"eth-rt2" + } + ] + }, + "4.4.4.4":{ + "routeType":"R ", + "cost":10, + "area":"0.0.0.0", + "routerType":"abr", + "nexthops":[ + { + "ip":"10.0.3.4", + "via":"eth-rt4" + } + ] + }, + "6.6.6.6":{ + "routeType":"R ", + "cost":10, + "area":"0.0.0.0", + "routerType":"abr", + "nexthops":[ + { + "ip":"10.0.4.6", + "via":"eth-rt6" + } + ] + }, + "172.16.1.0\/24":{ + "routeType":"N E2", + "cost":20, + "nexthops":[ + { + "ip":"10.0.2.2", + "via":"eth-rt2" + } + ] + }, + "192.168.1.0\/24":{ + "routeType":"N E2", + "cost":20, + "nexthops":[ + { + "ip":"10.0.4.6", + "via":"eth-rt6" + } + ] + } +} diff --git a/tests/topotests/ospf_gr_topo1/rt3/show_ip_route.json b/tests/topotests/ospf_gr_topo1/rt3/show_ip_route.json new file mode 100644 index 0000000000..c9a1e18b92 --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/rt3/show_ip_route.json @@ -0,0 +1,223 @@ +{ + "1.1.1.1\/32":[ + { + "prefix":"1.1.1.1\/32", + "protocol":"ospf", + "distance":110, + "metric":20, + "nexthops":[ + { + "ip":"10.0.2.2", + "afi":"ipv4", + "interfaceName":"eth-rt2" + } + ] + } + ], + "2.2.2.2\/32":[ + { + "prefix":"2.2.2.2\/32", + "protocol":"ospf", + "distance":110, + "metric":10, + "nexthops":[ + { + "ip":"10.0.2.2", + "afi":"ipv4", + "interfaceName":"eth-rt2" + } + ] + } + ], + "3.3.3.3\/32":[ + { + "prefix":"3.3.3.3\/32", + "protocol":"ospf", + "distance":110, + "metric":0, + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"lo" + } + ] + } + ], + "4.4.4.4\/32":[ + { + "prefix":"4.4.4.4\/32", + "protocol":"ospf", + "distance":110, + "metric":10, + "nexthops":[ + { + "ip":"10.0.3.4", + "afi":"ipv4", + "interfaceName":"eth-rt4" + } + ] + } + ], + "5.5.5.5\/32":[ + { + "prefix":"5.5.5.5\/32", + "protocol":"ospf", + "distance":110, + "metric":20, + "nexthops":[ + { + "ip":"10.0.3.4", + "afi":"ipv4", + "interfaceName":"eth-rt4" + } + ] + } + ], + "6.6.6.6\/32":[ + { + "prefix":"6.6.6.6\/32", + "protocol":"ospf", + "distance":110, + "metric":10, + "nexthops":[ + { + "ip":"10.0.4.6", + "afi":"ipv4", + "interfaceName":"eth-rt6" + } + ] + } + ], + "7.7.7.7\/32":[ + { + "prefix":"7.7.7.7\/32", + "protocol":"ospf", + "distance":110, + "metric":20, + "nexthops":[ + { + "ip":"10.0.4.6", + "afi":"ipv4", + "interfaceName":"eth-rt6" + } + ] + } + ], + "10.0.1.0\/24":[ + { + "prefix":"10.0.1.0\/24", + "protocol":"ospf", + "distance":110, + "metric":20, + "nexthops":[ + { + "ip":"10.0.2.2", + "afi":"ipv4", + "interfaceName":"eth-rt2" + } + ] + } + ], + "10.0.2.0\/24":[ + { + "prefix":"10.0.2.0\/24", + "protocol":"ospf", + "distance":110, + "metric":10, + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"eth-rt2" + } + ] + } + ], + "10.0.3.0\/24":[ + { + "prefix":"10.0.3.0\/24", + "protocol":"ospf", + "distance":110, + "metric":10, + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"eth-rt4" + } + ] + } + ], + "10.0.4.0\/24":[ + { + "prefix":"10.0.4.0\/24", + "protocol":"ospf", + "distance":110, + "metric":10, + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"eth-rt6" + } + ] + } + ], + "10.0.5.0\/24":[ + { + "prefix":"10.0.5.0\/24", + "protocol":"ospf", + "distance":110, + "metric":20, + "nexthops":[ + { + "ip":"10.0.3.4", + "afi":"ipv4", + "interfaceName":"eth-rt4" + } + ] + } + ], + "10.0.6.0\/24":[ + { + "prefix":"10.0.6.0\/24", + "protocol":"ospf", + "distance":110, + "metric":20, + "nexthops":[ + { + "ip":"10.0.4.6", + "afi":"ipv4", + "interfaceName":"eth-rt6" + } + ] + } + ], + "172.16.1.0\/24":[ + { + "prefix":"172.16.1.0\/24", + "protocol":"ospf", + "distance":110, + "metric":20, + "nexthops":[ + { + "ip":"10.0.2.2", + "afi":"ipv4", + "interfaceName":"eth-rt2" + } + ] + } + ], + "192.168.1.0\/24":[ + { + "prefix":"192.168.1.0\/24", + "protocol":"ospf", + "distance":110, + "metric":20, + "nexthops":[ + { + "ip":"10.0.4.6", + "afi":"ipv4", + "interfaceName":"eth-rt6" + } + ] + } + ] +} diff --git a/tests/topotests/ospf_gr_topo1/rt3/zebra.conf b/tests/topotests/ospf_gr_topo1/rt3/zebra.conf new file mode 100644 index 0000000000..dfd89cbe5b --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/rt3/zebra.conf @@ -0,0 +1,26 @@ +password 1 +hostname rt3 +log file zebra.log +log commands +! +debug zebra event +debug zebra packet +debug zebra rib +debug zebra kernel +! +interface lo + ip address 3.3.3.3/32 +! +interface eth-rt2 + ip address 10.0.2.3/24 +! +interface eth-rt4 + ip address 10.0.3.3/24 +! +interface eth-rt6 + ip address 10.0.4.3/24 +! +ip forwarding +! +line vty +! diff --git a/tests/topotests/ospf_gr_topo1/rt4/ospfd.conf b/tests/topotests/ospf_gr_topo1/rt4/ospfd.conf new file mode 100644 index 0000000000..a54f27a1d7 --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/rt4/ospfd.conf @@ -0,0 +1,37 @@ +password 1 +hostname rt4 +log file ospfd.log +log commands +! +debug ospf zebra +debug ospf event +debug ospf lsa +debug ospf te +debug ospf packet all +debug ospf packet ls-update detail +debug ospf ism +debug ospf nsm +debug ospf nssa +debug ospf graceful-restart +! +interface lo + ip ospf area 0 +! +interface eth-rt3 + ip ospf network point-to-point + ip ospf area 0 + ip ospf hello-interval 3 + ip ospf dead-interval 9 +! +interface eth-rt5 + ip ospf network point-to-point + ip ospf area 2 + ip ospf hello-interval 3 + ip ospf dead-interval 9 +! +router ospf + router-id 4.4.4.4 + capability opaque + graceful-restart grace-period 120 + graceful-restart helper-only +! diff --git a/tests/topotests/ospf_gr_topo1/rt4/show_ip_ospf_database.json b/tests/topotests/ospf_gr_topo1/rt4/show_ip_ospf_database.json new file mode 100644 index 0000000000..87b80414c9 --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/rt4/show_ip_ospf_database.json @@ -0,0 +1,164 @@ +{ + "routerId":"4.4.4.4", + "areas":{ + "0.0.0.0":{ + "routerLinkStates":[ + { + "lsId":"2.2.2.2", + "advertisedRouter":"2.2.2.2", + "numOfRouterLinks":3 + }, + { + "lsId":"3.3.3.3", + "advertisedRouter":"3.3.3.3", + "numOfRouterLinks":7 + }, + { + "lsId":"4.4.4.4", + "advertisedRouter":"4.4.4.4", + "numOfRouterLinks":3 + }, + { + "lsId":"6.6.6.6", + "advertisedRouter":"6.6.6.6", + "numOfRouterLinks":3 + } + ], + "summaryLinkStates":[ + { + "lsId":"1.1.1.1", + "advertisedRouter":"2.2.2.2", + "summaryAddress":"1.1.1.1\/32" + }, + { + "lsId":"5.5.5.5", + "advertisedRouter":"4.4.4.4", + "summaryAddress":"5.5.5.5\/32" + }, + { + "lsId":"7.7.7.7", + "advertisedRouter":"6.6.6.6", + "summaryAddress":"7.7.7.7\/32" + }, + { + "lsId":"10.0.1.0", + "advertisedRouter":"2.2.2.2", + "summaryAddress":"10.0.1.0\/24" + }, + { + "lsId":"10.0.5.0", + "advertisedRouter":"4.4.4.4", + "summaryAddress":"10.0.5.0\/24" + }, + { + "lsId":"10.0.6.0", + "advertisedRouter":"6.6.6.6", + "summaryAddress":"10.0.6.0\/24" + } + ], + "asbrSummaryLinkStates":[ + { + "lsId":"1.1.1.1", + "advertisedRouter":"2.2.2.2" + } + ] + }, + "0.0.0.2":{ + "routerLinkStates":[ + { + "lsId":"4.4.4.4", + "advertisedRouter":"4.4.4.4", + "numOfRouterLinks":2 + }, + { + "lsId":"5.5.5.5", + "advertisedRouter":"5.5.5.5", + "numOfRouterLinks":3 + } + ], + "summaryLinkStates":[ + { + "lsId":"1.1.1.1", + "advertisedRouter":"4.4.4.4", + "summaryAddress":"1.1.1.1\/32" + }, + { + "lsId":"2.2.2.2", + "advertisedRouter":"4.4.4.4", + "summaryAddress":"2.2.2.2\/32" + }, + { + "lsId":"3.3.3.3", + "advertisedRouter":"4.4.4.4", + "summaryAddress":"3.3.3.3\/32" + }, + { + "lsId":"4.4.4.4", + "advertisedRouter":"4.4.4.4", + "summaryAddress":"4.4.4.4\/32" + }, + { + "lsId":"6.6.6.6", + "advertisedRouter":"4.4.4.4", + "summaryAddress":"6.6.6.6\/32" + }, + { + "lsId":"7.7.7.7", + "advertisedRouter":"4.4.4.4", + "summaryAddress":"7.7.7.7\/32" + }, + { + "lsId":"10.0.1.0", + "advertisedRouter":"4.4.4.4", + "summaryAddress":"10.0.1.0\/24" + }, + { + "lsId":"10.0.2.0", + "advertisedRouter":"4.4.4.4", + "summaryAddress":"10.0.2.0\/24" + }, + { + "lsId":"10.0.3.0", + "advertisedRouter":"4.4.4.4", + "summaryAddress":"10.0.3.0\/24" + }, + { + "lsId":"10.0.4.0", + "advertisedRouter":"4.4.4.4", + "summaryAddress":"10.0.4.0\/24" + }, + { + "lsId":"10.0.6.0", + "advertisedRouter":"4.4.4.4", + "summaryAddress":"10.0.6.0\/24" + } + ], + "asbrSummaryLinkStates":[ + { + "lsId":"1.1.1.1", + "advertisedRouter":"4.4.4.4" + }, + { + "lsId":"6.6.6.6", + "advertisedRouter":"4.4.4.4" + } + ] + } + }, + "asExternalLinkStates":[ + { + "lsId":"172.16.1.0", + "advertisedRouter":"1.1.1.1", + "metricType":"E2", + "route":"172.16.1.0\/24", + "tag":0 + }, + { + "lsId":"192.168.1.0", + "advertisedRouter":"6.6.6.6", + "metricType":"E2", + "route":"192.168.1.0\/24", + "tag":0 + } + ] +} diff --git a/tests/topotests/ospf_gr_topo1/rt4/show_ip_ospf_neighbor.json b/tests/topotests/ospf_gr_topo1/rt4/show_ip_ospf_neighbor.json new file mode 100644 index 0000000000..2123ecb8da --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/rt4/show_ip_ospf_neighbor.json @@ -0,0 +1,18 @@ +{ + "neighbors":{ + "3.3.3.3":[ + { + "state":"Full\/DROther", + "address":"10.0.3.3", + "ifaceName":"eth-rt3:10.0.3.4" + } + ], + "5.5.5.5":[ + { + "state":"Full\/DROther", + "address":"10.0.5.5", + "ifaceName":"eth-rt5:10.0.5.4" + } + ] + } +} diff --git a/tests/topotests/ospf_gr_topo1/rt4/show_ip_ospf_route.json b/tests/topotests/ospf_gr_topo1/rt4/show_ip_ospf_route.json new file mode 100644 index 0000000000..04e318aef0 --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/rt4/show_ip_ospf_route.json @@ -0,0 +1,202 @@ +{ + "1.1.1.1\/32":{ + "routeType":"N IA", + "cost":30, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":"10.0.3.3", + "via":"eth-rt3" + } + ] + }, + "2.2.2.2\/32":{ + "routeType":"N", + "cost":20, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":"10.0.3.3", + "via":"eth-rt3" + } + ] + }, + "3.3.3.3\/32":{ + "routeType":"N", + "cost":10, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":"10.0.3.3", + "via":"eth-rt3" + } + ] + }, + "4.4.4.4\/32":{ + "routeType":"N", + "cost":0, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":" ", + "directly attached to":"lo" + } + ] + }, + "5.5.5.5\/32":{ + "routeType":"N", + "cost":10, + "area":"0.0.0.2", + "nexthops":[ + { + "ip":"10.0.5.5", + "via":"eth-rt5" + } + ] + }, + "6.6.6.6\/32":{ + "routeType":"N", + "cost":20, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":"10.0.3.3", + "via":"eth-rt3" + } + ] + }, + "7.7.7.7\/32":{ + "routeType":"N IA", + "cost":30, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":"10.0.3.3", + "via":"eth-rt3" + } + ] + }, + "10.0.1.0\/24":{ + "routeType":"N IA", + "cost":30, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":"10.0.3.3", + "via":"eth-rt3" + } + ] + }, + "10.0.2.0\/24":{ + "routeType":"N", + "cost":20, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":"10.0.3.3", + "via":"eth-rt3" + } + ] + }, + "10.0.3.0\/24":{ + "routeType":"N", + "cost":10, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":" ", + "directly attached to":"eth-rt3" + } + ] + }, + "10.0.4.0\/24":{ + "routeType":"N", + "cost":20, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":"10.0.3.3", + "via":"eth-rt3" + } + ] + }, + "10.0.5.0\/24":{ + "routeType":"N", + "cost":10, + "area":"0.0.0.2", + "nexthops":[ + { + "ip":" ", + "directly attached to":"eth-rt5" + } + ] + }, + "10.0.6.0\/24":{ + "routeType":"N IA", + "cost":30, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":"10.0.3.3", + "via":"eth-rt3" + } + ] + }, + "1.1.1.1":{ + "routeType":"R ", + "cost":30, + "area":"0.0.0.0", + "IA":true, + "routerType":"asbr", + "nexthops":[ + { + "ip":"10.0.3.3", + "via":"eth-rt3" + } + ] + }, + "2.2.2.2":{ + "routeType":"R ", + "cost":20, + "area":"0.0.0.0", + "routerType":"abr", + "nexthops":[ + { + "ip":"10.0.3.3", + "via":"eth-rt3" + } + ] + }, + "6.6.6.6":{ + "routeType":"R ", + "cost":20, + "area":"0.0.0.0", + "routerType":"abr", + "nexthops":[ + { + "ip":"10.0.3.3", + "via":"eth-rt3" + } + ] + }, + "172.16.1.0\/24":{ + "routeType":"N E2", + "cost":30, + "nexthops":[ + { + "ip":"10.0.3.3", + "via":"eth-rt3" + } + ] + }, + "192.168.1.0\/24":{ + "routeType":"N E2", + "cost":30, + "nexthops":[ + { + "ip":"10.0.3.3", + "via":"eth-rt3" + } + ] + } +} diff --git a/tests/topotests/ospf_gr_topo1/rt4/show_ip_route.json b/tests/topotests/ospf_gr_topo1/rt4/show_ip_route.json new file mode 100644 index 0000000000..8058f8f431 --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/rt4/show_ip_route.json @@ -0,0 +1,224 @@ +{ + "1.1.1.1\/32":[ + { + "prefix":"1.1.1.1\/32", + "protocol":"ospf", + "distance":110, + "metric":30, + "nexthops":[ + { + "ip":"10.0.3.3", + "afi":"ipv4", + "interfaceName":"eth-rt3" + } + ] + } + ], + "2.2.2.2\/32":[ + { + "prefix":"2.2.2.2\/32", + "protocol":"ospf", + "distance":110, + "metric":20, + "nexthops":[ + { + "ip":"10.0.3.3", + "afi":"ipv4", + "interfaceName":"eth-rt3" + } + ] + } + ], + "3.3.3.3\/32":[ + { + "prefix":"3.3.3.3\/32", + "protocol":"ospf", + "distance":110, + "metric":10, + "nexthops":[ + { + "ip":"10.0.3.3", + "afi":"ipv4", + "interfaceName":"eth-rt3" + } + ] + } + ], + "4.4.4.4\/32":[ + { + "prefix":"4.4.4.4\/32", + "protocol":"ospf", + "distance":110, + "metric":0, + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"lo" + } + ] + } + ], + "5.5.5.5\/32":[ + { + "prefix":"5.5.5.5\/32", + "protocol":"ospf", + "distance":110, + "metric":10, + "nexthops":[ + { + "ip":"10.0.5.5", + "afi":"ipv4", + "interfaceName":"eth-rt5" + } + ] + } + ], + "6.6.6.6\/32":[ + { + "prefix":"6.6.6.6\/32", + "protocol":"ospf", + "distance":110, + "metric":20, + "nexthops":[ + { + "ip":"10.0.3.3", + "afi":"ipv4", + "interfaceName":"eth-rt3" + } + ] + } + ], + "7.7.7.7\/32":[ + { + "prefix":"7.7.7.7\/32", + "protocol":"ospf", + "distance":110, + "metric":30, + "nexthops":[ + { + "ip":"10.0.3.3", + "afi":"ipv4", + "interfaceName":"eth-rt3" + } + ] + } + ], + "10.0.1.0\/24":[ + { + "prefix":"10.0.1.0\/24", + "protocol":"ospf", + "distance":110, + "metric":30, + "nexthops":[ + { + "ip":"10.0.3.3", + "afi":"ipv4", + "interfaceName":"eth-rt3" + } + ] + } + ], + "10.0.2.0\/24":[ + { + "prefix":"10.0.2.0\/24", + "protocol":"ospf", + "distance":110, + "metric":20, + "nexthops":[ + { + "ip":"10.0.3.3", + "afi":"ipv4", + "interfaceName":"eth-rt3" + } + ] + } + ], + "10.0.3.0\/24":[ + { + "prefix":"10.0.3.0\/24", + "protocol":"ospf", + "distance":110, + "metric":10, + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"eth-rt3" + } + ] + } + ], + "10.0.4.0\/24":[ + { + "prefix":"10.0.4.0\/24", + "protocol":"ospf", + "distance":110, + "metric":20, + "nexthops":[ + { + "ip":"10.0.3.3", + "afi":"ipv4", + "interfaceName":"eth-rt3" + } + ] + } + ], + "10.0.5.0\/24":[ + { + "prefix":"10.0.5.0\/24", + "protocol":"ospf", + "distance":110, + "metric":10, + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"eth-rt5" + } + ] + } + ], + "10.0.6.0\/24":[ + { + "prefix":"10.0.6.0\/24", + "protocol":"ospf", + "distance":110, + "metric":30, + "nexthops":[ + { + "ip":"10.0.3.3", + "afi":"ipv4", + "interfaceName":"eth-rt3" + } + ] + } + ], + "172.16.1.0\/24":[ + { + "prefix":"172.16.1.0\/24", + "protocol":"ospf", + "distance":110, + "metric":20, + "nexthops":[ + { + "ip":"10.0.3.3", + "afi":"ipv4", + "interfaceName":"eth-rt3" + } + ] + } + ], + "192.168.1.0\/24":[ + { + "prefix":"192.168.1.0\/24", + "protocol":"ospf", + "distance":110, + "metric":20, + "nexthops":[ + { + "ip":"10.0.3.3", + "afi":"ipv4", + "interfaceName":"eth-rt3" + } + ] + } + ] +} diff --git a/tests/topotests/ospf_gr_topo1/rt4/zebra.conf b/tests/topotests/ospf_gr_topo1/rt4/zebra.conf new file mode 100644 index 0000000000..f399b29f3f --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/rt4/zebra.conf @@ -0,0 +1,23 @@ +password 1 +hostname rt4 +log file zebra.log +log commands +! +debug zebra event +debug zebra packet +debug zebra rib +debug zebra kernel +! +interface lo + ip address 4.4.4.4/32 +! +interface eth-rt3 + ip address 10.0.3.4/24 +! +interface eth-rt5 + ip address 10.0.5.4/24 +! +ip forwarding +! +line vty +! diff --git a/tests/topotests/ospf_gr_topo1/rt5/ospfd.conf b/tests/topotests/ospf_gr_topo1/rt5/ospfd.conf new file mode 100644 index 0000000000..724af0e97c --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/rt5/ospfd.conf @@ -0,0 +1,31 @@ +password 1 +hostname rt5 +log file ospfd.log +log commands +! +debug ospf zebra +debug ospf event +debug ospf lsa +debug ospf te +debug ospf packet all +debug ospf packet ls-update detail +debug ospf ism +debug ospf nsm +debug ospf nssa +debug ospf graceful-restart +! +interface lo + ip ospf area 2 +! +interface eth-rt4 + ip ospf network point-to-point + ip ospf area 2 + ip ospf hello-interval 3 + ip ospf dead-interval 9 +! +router ospf + router-id 5.5.5.5 + capability opaque + graceful-restart grace-period 120 + graceful-restart helper-only +! diff --git a/tests/topotests/ospf_gr_topo1/rt5/show_ip_ospf_database.json b/tests/topotests/ospf_gr_topo1/rt5/show_ip_ospf_database.json new file mode 100644 index 0000000000..aeb8604473 --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/rt5/show_ip_ospf_database.json @@ -0,0 +1,102 @@ +{ + "routerId":"5.5.5.5", + "areas":{ + "0.0.0.2":{ + "routerLinkStates":[ + { + "lsId":"4.4.4.4", + "advertisedRouter":"4.4.4.4", + "numOfRouterLinks":2 + }, + { + "lsId":"5.5.5.5", + "advertisedRouter":"5.5.5.5", + "numOfRouterLinks":3 + } + ], + "summaryLinkStates":[ + { + "lsId":"1.1.1.1", + "advertisedRouter":"4.4.4.4", + "summaryAddress":"1.1.1.1\/32" + }, + { + "lsId":"2.2.2.2", + "advertisedRouter":"4.4.4.4", + "summaryAddress":"2.2.2.2\/32" + }, + { + "lsId":"3.3.3.3", + "advertisedRouter":"4.4.4.4", + "summaryAddress":"3.3.3.3\/32" + }, + { + "lsId":"4.4.4.4", + "advertisedRouter":"4.4.4.4", + "summaryAddress":"4.4.4.4\/32" + }, + { + "lsId":"6.6.6.6", + "advertisedRouter":"4.4.4.4", + "summaryAddress":"6.6.6.6\/32" + }, + { + "lsId":"7.7.7.7", + "advertisedRouter":"4.4.4.4", + "summaryAddress":"7.7.7.7\/32" + }, + { + "lsId":"10.0.1.0", + "advertisedRouter":"4.4.4.4", + "summaryAddress":"10.0.1.0\/24" + }, + { + "lsId":"10.0.2.0", + "advertisedRouter":"4.4.4.4", + "summaryAddress":"10.0.2.0\/24" + }, + { + "lsId":"10.0.3.0", + "advertisedRouter":"4.4.4.4", + "summaryAddress":"10.0.3.0\/24" + }, + { + "lsId":"10.0.4.0", + "advertisedRouter":"4.4.4.4", + "summaryAddress":"10.0.4.0\/24" + }, + { + "lsId":"10.0.6.0", + "advertisedRouter":"4.4.4.4", + "summaryAddress":"10.0.6.0\/24" + } + ], + "asbrSummaryLinkStates":[ + { + "lsId":"1.1.1.1", + "advertisedRouter":"4.4.4.4" + }, + { + "lsId":"6.6.6.6", + "advertisedRouter":"4.4.4.4" + } + ] + } + }, + "asExternalLinkStates":[ + { + "lsId":"172.16.1.0", + "advertisedRouter":"1.1.1.1", + "metricType":"E2", + "route":"172.16.1.0\/24", + "tag":0 + }, + { + "lsId":"192.168.1.0", + "advertisedRouter":"6.6.6.6", + "metricType":"E2", + "route":"192.168.1.0\/24", + "tag":0 + } + ] +} diff --git a/tests/topotests/ospf_gr_topo1/rt5/show_ip_ospf_neighbor.json b/tests/topotests/ospf_gr_topo1/rt5/show_ip_ospf_neighbor.json new file mode 100644 index 0000000000..6440b67698 --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/rt5/show_ip_ospf_neighbor.json @@ -0,0 +1,11 @@ +{ + "neighbors":{ + "4.4.4.4":[ + { + "state":"Full\/DROther", + "address":"10.0.5.4", + "ifaceName":"eth-rt4:10.0.5.5" + } + ] + } +} diff --git a/tests/topotests/ospf_gr_topo1/rt5/show_ip_ospf_route.json b/tests/topotests/ospf_gr_topo1/rt5/show_ip_ospf_route.json new file mode 100644 index 0000000000..e7f712ea6b --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/rt5/show_ip_ospf_route.json @@ -0,0 +1,203 @@ +{ + "1.1.1.1\/32":{ + "routeType":"N IA", + "cost":40, + "area":"0.0.0.2", + "nexthops":[ + { + "ip":"10.0.5.4", + "via":"eth-rt4" + } + ] + }, + "2.2.2.2\/32":{ + "routeType":"N IA", + "cost":30, + "area":"0.0.0.2", + "nexthops":[ + { + "ip":"10.0.5.4", + "via":"eth-rt4" + } + ] + }, + "3.3.3.3\/32":{ + "routeType":"N IA", + "cost":20, + "area":"0.0.0.2", + "nexthops":[ + { + "ip":"10.0.5.4", + "via":"eth-rt4" + } + ] + }, + "4.4.4.4\/32":{ + "routeType":"N IA", + "cost":10, + "area":"0.0.0.2", + "nexthops":[ + { + "ip":"10.0.5.4", + "via":"eth-rt4" + } + ] + }, + "5.5.5.5\/32":{ + "routeType":"N", + "cost":0, + "area":"0.0.0.2", + "nexthops":[ + { + "ip":" ", + "directly attached to":"lo" + } + ] + }, + "6.6.6.6\/32":{ + "routeType":"N IA", + "cost":30, + "area":"0.0.0.2", + "nexthops":[ + { + "ip":"10.0.5.4", + "via":"eth-rt4" + } + ] + }, + "7.7.7.7\/32":{ + "routeType":"N IA", + "cost":40, + "area":"0.0.0.2", + "nexthops":[ + { + "ip":"10.0.5.4", + "via":"eth-rt4" + } + ] + }, + "10.0.1.0\/24":{ + "routeType":"N IA", + "cost":40, + "area":"0.0.0.2", + "nexthops":[ + { + "ip":"10.0.5.4", + "via":"eth-rt4" + } + ] + }, + "10.0.2.0\/24":{ + "routeType":"N IA", + "cost":30, + "area":"0.0.0.2", + "nexthops":[ + { + "ip":"10.0.5.4", + "via":"eth-rt4" + } + ] + }, + "10.0.3.0\/24":{ + "routeType":"N IA", + "cost":20, + "area":"0.0.0.2", + "nexthops":[ + { + "ip":"10.0.5.4", + "via":"eth-rt4" + } + ] + }, + "10.0.4.0\/24":{ + "routeType":"N IA", + "cost":30, + "area":"0.0.0.2", + "nexthops":[ + { + "ip":"10.0.5.4", + "via":"eth-rt4" + } + ] + }, + "10.0.5.0\/24":{ + "routeType":"N", + "cost":10, + "area":"0.0.0.2", + "nexthops":[ + { + "ip":" ", + "directly attached to":"eth-rt4" + } + ] + }, + "10.0.6.0\/24":{ + "routeType":"N IA", + "cost":40, + "area":"0.0.0.2", + "nexthops":[ + { + "ip":"10.0.5.4", + "via":"eth-rt4" + } + ] + }, + "1.1.1.1":{ + "routeType":"R ", + "cost":40, + "area":"0.0.0.2", + "IA":true, + "routerType":"asbr", + "nexthops":[ + { + "ip":"10.0.5.4", + "via":"eth-rt4" + } + ] + }, + "4.4.4.4":{ + "routeType":"R ", + "cost":10, + "area":"0.0.0.2", + "routerType":"abr", + "nexthops":[ + { + "ip":"10.0.5.4", + "via":"eth-rt4" + } + ] + }, + "6.6.6.6":{ + "routeType":"R ", + "cost":30, + "area":"0.0.0.2", + "IA":true, + "routerType":"asbr", + "nexthops":[ + { + "ip":"10.0.5.4", + "via":"eth-rt4" + } + ] + }, + "172.16.1.0\/24":{ + "routeType":"N E2", + "cost":40, + "nexthops":[ + { + "ip":"10.0.5.4", + "via":"eth-rt4" + } + ] + }, + "192.168.1.0\/24":{ + "routeType":"N E2", + "cost":40, + "nexthops":[ + { + "ip":"10.0.5.4", + "via":"eth-rt4" + } + ] + } +} diff --git a/tests/topotests/ospf_gr_topo1/rt5/show_ip_route.json b/tests/topotests/ospf_gr_topo1/rt5/show_ip_route.json new file mode 100644 index 0000000000..9896839440 --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/rt5/show_ip_route.json @@ -0,0 +1,225 @@ +{ + "1.1.1.1\/32":[ + { + "prefix":"1.1.1.1\/32", + "protocol":"ospf", + "distance":110, + "metric":40, + "nexthops":[ + { + "ip":"10.0.5.4", + "afi":"ipv4", + "interfaceName":"eth-rt4" + } + ] + } + ], + "2.2.2.2\/32":[ + { + "prefix":"2.2.2.2\/32", + "protocol":"ospf", + "distance":110, + "metric":30, + "nexthops":[ + { + "ip":"10.0.5.4", + "afi":"ipv4", + "interfaceName":"eth-rt4" + } + ] + } + ], + "3.3.3.3\/32":[ + { + "prefix":"3.3.3.3\/32", + "protocol":"ospf", + "distance":110, + "metric":20, + "nexthops":[ + { + "ip":"10.0.5.4", + "afi":"ipv4", + "interfaceName":"eth-rt4" + } + ] + } + ], + "4.4.4.4\/32":[ + { + "prefix":"4.4.4.4\/32", + "protocol":"ospf", + "distance":110, + "metric":10, + "nexthops":[ + { + "ip":"10.0.5.4", + "afi":"ipv4", + "interfaceName":"eth-rt4" + } + ] + } + ], + "5.5.5.5\/32":[ + { + "prefix":"5.5.5.5\/32", + "protocol":"ospf", + "distance":110, + "metric":0, + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"lo" + } + ] + } + ], + "6.6.6.6\/32":[ + { + "prefix":"6.6.6.6\/32", + "protocol":"ospf", + "distance":110, + "metric":30, + "nexthops":[ + { + "ip":"10.0.5.4", + "afi":"ipv4", + "interfaceName":"eth-rt4" + } + ] + } + ], + "7.7.7.7\/32":[ + { + "prefix":"7.7.7.7\/32", + "protocol":"ospf", + "distance":110, + "metric":40, + "nexthops":[ + { + "ip":"10.0.5.4", + "afi":"ipv4", + "interfaceName":"eth-rt4" + } + ] + } + ], + "10.0.1.0\/24":[ + { + "prefix":"10.0.1.0\/24", + "protocol":"ospf", + "distance":110, + "metric":40, + "nexthops":[ + { + "ip":"10.0.5.4", + "afi":"ipv4", + "interfaceName":"eth-rt4" + } + ] + } + ], + "10.0.2.0\/24":[ + { + "prefix":"10.0.2.0\/24", + "protocol":"ospf", + "distance":110, + "metric":30, + "nexthops":[ + { + "ip":"10.0.5.4", + "afi":"ipv4", + "interfaceName":"eth-rt4" + } + ] + } + ], + "10.0.3.0\/24":[ + { + "prefix":"10.0.3.0\/24", + "protocol":"ospf", + "distance":110, + "metric":20, + "nexthops":[ + { + "ip":"10.0.5.4", + "afi":"ipv4", + "interfaceName":"eth-rt4" + } + ] + } + ], + "10.0.4.0\/24":[ + { + "prefix":"10.0.4.0\/24", + "protocol":"ospf", + "distance":110, + "metric":30, + "nexthops":[ + { + "ip":"10.0.5.4", + "afi":"ipv4", + "interfaceName":"eth-rt4" + } + ] + } + ], + "10.0.5.0\/24":[ + { + "prefix":"10.0.5.0\/24", + "protocol":"ospf", + "distance":110, + "metric":10, + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"eth-rt4" + } + ] + } + ], + "10.0.6.0\/24":[ + { + "prefix":"10.0.6.0\/24", + "protocol":"ospf", + "distance":110, + "metric":40, + "nexthops":[ + { + "ip":"10.0.5.4", + "afi":"ipv4", + "interfaceName":"eth-rt4" + } + ] + } + ], + "172.16.1.0\/24":[ + { + "prefix":"172.16.1.0\/24", + "protocol":"ospf", + "distance":110, + "metric":20, + "nexthops":[ + { + "ip":"10.0.5.4", + "afi":"ipv4", + "interfaceName":"eth-rt4" + } + ] + } + ], + "192.168.1.0\/24":[ + { + "prefix":"192.168.1.0\/24", + "protocol":"ospf", + "distance":110, + "metric":20, + "nexthops":[ + { + "ip":"10.0.5.4", + "afi":"ipv4", + "interfaceName":"eth-rt4" + } + ] + } + ] +} diff --git a/tests/topotests/ospf_gr_topo1/rt5/zebra.conf b/tests/topotests/ospf_gr_topo1/rt5/zebra.conf new file mode 100644 index 0000000000..49a1c05a6d --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/rt5/zebra.conf @@ -0,0 +1,20 @@ +password 1 +hostname rt5 +log file zebra.log +log commands +! +debug zebra event +debug zebra packet +debug zebra rib +debug zebra kernel +! +interface lo + ip address 5.5.5.5/32 +! +interface eth-rt4 + ip address 10.0.5.5/24 +! +ip forwarding +! +line vty +! diff --git a/tests/topotests/ospf_gr_topo1/rt6/ospfd.conf b/tests/topotests/ospf_gr_topo1/rt6/ospfd.conf new file mode 100644 index 0000000000..0b9b83bcd2 --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/rt6/ospfd.conf @@ -0,0 +1,38 @@ +password 1 +hostname rt6 +log file ospfd.log +log commands +! +debug ospf zebra +debug ospf event +debug ospf lsa +debug ospf te +debug ospf packet all +debug ospf packet ls-update detail +debug ospf ism +debug ospf nsm +debug ospf nssa +debug ospf graceful-restart +! +interface lo + ip ospf area 0 +! +interface eth-rt3 + ip ospf network point-to-point + ip ospf area 0 + ip ospf hello-interval 3 + ip ospf dead-interval 9 +! +interface eth-rt7 + ip ospf network point-to-point + ip ospf area 3 + ip ospf hello-interval 3 + ip ospf dead-interval 9 +! +router ospf + router-id 6.6.6.6 + capability opaque + area 3 nssa + graceful-restart grace-period 120 + graceful-restart helper-only +! diff --git a/tests/topotests/ospf_gr_topo1/rt6/show_ip_ospf_database.json b/tests/topotests/ospf_gr_topo1/rt6/show_ip_ospf_database.json new file mode 100644 index 0000000000..294b2c904e --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/rt6/show_ip_ospf_database.json @@ -0,0 +1,168 @@ +{ + "routerId":"6.6.6.6", + "areas":{ + "0.0.0.0":{ + "routerLinkStates":[ + { + "lsId":"2.2.2.2", + "advertisedRouter":"2.2.2.2", + "numOfRouterLinks":3 + }, + { + "lsId":"3.3.3.3", + "advertisedRouter":"3.3.3.3", + "numOfRouterLinks":7 + }, + { + "lsId":"4.4.4.4", + "advertisedRouter":"4.4.4.4", + "numOfRouterLinks":3 + }, + { + "lsId":"6.6.6.6", + "advertisedRouter":"6.6.6.6", + "numOfRouterLinks":3 + } + ], + "summaryLinkStates":[ + { + "lsId":"1.1.1.1", + "advertisedRouter":"2.2.2.2", + "summaryAddress":"1.1.1.1\/32" + }, + { + "lsId":"5.5.5.5", + "advertisedRouter":"4.4.4.4", + "summaryAddress":"5.5.5.5\/32" + }, + { + "lsId":"7.7.7.7", + "advertisedRouter":"6.6.6.6", + "summaryAddress":"7.7.7.7\/32" + }, + { + "lsId":"10.0.1.0", + "advertisedRouter":"2.2.2.2", + "summaryAddress":"10.0.1.0\/24" + }, + { + "lsId":"10.0.5.0", + "advertisedRouter":"4.4.4.4", + "summaryAddress":"10.0.5.0\/24" + }, + { + "lsId":"10.0.6.0", + "advertisedRouter":"6.6.6.6", + "summaryAddress":"10.0.6.0\/24" + } + ], + "asbrSummaryLinkStates":[ + { + "lsId":"1.1.1.1", + "advertisedRouter":"2.2.2.2" + } + ] + }, + "0.0.0.3":{ + "routerLinkStates":[ + { + "lsId":"6.6.6.6", + "advertisedRouter":"6.6.6.6", + "numOfRouterLinks":2 + }, + { + "lsId":"7.7.7.7", + "advertisedRouter":"7.7.7.7", + "numOfRouterLinks":3 + } + ], + "summaryLinkStates":[ + { + "lsId":"0.0.0.0", + "advertisedRouter":"6.6.6.6", + "summaryAddress":"0.0.0.0\/0" + }, + { + "lsId":"1.1.1.1", + "advertisedRouter":"6.6.6.6", + "summaryAddress":"1.1.1.1\/32" + }, + { + "lsId":"2.2.2.2", + "advertisedRouter":"6.6.6.6", + "summaryAddress":"2.2.2.2\/32" + }, + { + "lsId":"3.3.3.3", + "advertisedRouter":"6.6.6.6", + "summaryAddress":"3.3.3.3\/32" + }, + { + "lsId":"4.4.4.4", + "advertisedRouter":"6.6.6.6", + "summaryAddress":"4.4.4.4\/32" + }, + { + "lsId":"5.5.5.5", + "advertisedRouter":"6.6.6.6", + "summaryAddress":"5.5.5.5\/32" + }, + { + "lsId":"6.6.6.6", + "advertisedRouter":"6.6.6.6", + "summaryAddress":"6.6.6.6\/32" + }, + { + "lsId":"10.0.1.0", + "advertisedRouter":"6.6.6.6", + "summaryAddress":"10.0.1.0\/24" + }, + { + "lsId":"10.0.2.0", + "advertisedRouter":"6.6.6.6", + "summaryAddress":"10.0.2.0\/24" + }, + { + "lsId":"10.0.3.0", + "advertisedRouter":"6.6.6.6", + "summaryAddress":"10.0.3.0\/24" + }, + { + "lsId":"10.0.4.0", + "advertisedRouter":"6.6.6.6", + "summaryAddress":"10.0.4.0\/24" + }, + { + "lsId":"10.0.5.0", + "advertisedRouter":"6.6.6.6", + "summaryAddress":"10.0.5.0\/24" + } + ], + "nssaExternalLinkStates":[ + { + "lsId":"192.168.1.0", + "advertisedRouter":"7.7.7.7", + "metricType":"E2", + "route":"192.168.1.0\/24", + "tag":0 + } + ] + } + }, + "asExternalLinkStates":[ + { + "lsId":"172.16.1.0", + "advertisedRouter":"1.1.1.1", + "metricType":"E2", + "route":"172.16.1.0\/24", + "tag":0 + }, + { + "lsId":"192.168.1.0", + "advertisedRouter":"6.6.6.6", + "metricType":"E2", + "route":"192.168.1.0\/24", + "tag":0 + } + ] +} diff --git a/tests/topotests/ospf_gr_topo1/rt6/show_ip_ospf_neighbor.json b/tests/topotests/ospf_gr_topo1/rt6/show_ip_ospf_neighbor.json new file mode 100644 index 0000000000..d815c23927 --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/rt6/show_ip_ospf_neighbor.json @@ -0,0 +1,18 @@ +{ + "neighbors":{ + "3.3.3.3":[ + { + "state":"Full\/DROther", + "address":"10.0.4.3", + "ifaceName":"eth-rt3:10.0.4.6" + } + ], + "7.7.7.7":[ + { + "state":"Full\/DROther", + "address":"10.0.6.7", + "ifaceName":"eth-rt7:10.0.6.6" + } + ] + } +} diff --git a/tests/topotests/ospf_gr_topo1/rt6/show_ip_ospf_route.json b/tests/topotests/ospf_gr_topo1/rt6/show_ip_ospf_route.json new file mode 100644 index 0000000000..d9009724d5 --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/rt6/show_ip_ospf_route.json @@ -0,0 +1,214 @@ +{ + "1.1.1.1\/32":{ + "routeType":"N IA", + "cost":30, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":"10.0.4.3", + "via":"eth-rt3" + } + ] + }, + "2.2.2.2\/32":{ + "routeType":"N", + "cost":20, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":"10.0.4.3", + "via":"eth-rt3" + } + ] + }, + "3.3.3.3\/32":{ + "routeType":"N", + "cost":10, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":"10.0.4.3", + "via":"eth-rt3" + } + ] + }, + "4.4.4.4\/32":{ + "routeType":"N", + "cost":20, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":"10.0.4.3", + "via":"eth-rt3" + } + ] + }, + "5.5.5.5\/32":{ + "routeType":"N IA", + "cost":30, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":"10.0.4.3", + "via":"eth-rt3" + } + ] + }, + "6.6.6.6\/32":{ + "routeType":"N", + "cost":0, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":" ", + "directly attached to":"lo" + } + ] + }, + "7.7.7.7\/32":{ + "routeType":"N", + "cost":10, + "area":"0.0.0.3", + "nexthops":[ + { + "ip":"10.0.6.7", + "via":"eth-rt7" + } + ] + }, + "10.0.1.0\/24":{ + "routeType":"N IA", + "cost":30, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":"10.0.4.3", + "via":"eth-rt3" + } + ] + }, + "10.0.2.0\/24":{ + "routeType":"N", + "cost":20, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":"10.0.4.3", + "via":"eth-rt3" + } + ] + }, + "10.0.3.0\/24":{ + "routeType":"N", + "cost":20, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":"10.0.4.3", + "via":"eth-rt3" + } + ] + }, + "10.0.4.0\/24":{ + "routeType":"N", + "cost":10, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":" ", + "directly attached to":"eth-rt3" + } + ] + }, + "10.0.5.0\/24":{ + "routeType":"N IA", + "cost":30, + "area":"0.0.0.0", + "nexthops":[ + { + "ip":"10.0.4.3", + "via":"eth-rt3" + } + ] + }, + "10.0.6.0\/24":{ + "routeType":"N", + "cost":10, + "area":"0.0.0.3", + "nexthops":[ + { + "ip":" ", + "directly attached to":"eth-rt7" + } + ] + }, + "1.1.1.1":{ + "routeType":"R ", + "cost":30, + "area":"0.0.0.0", + "IA":true, + "routerType":"asbr", + "nexthops":[ + { + "ip":"10.0.4.3", + "via":"eth-rt3" + } + ] + }, + "2.2.2.2":{ + "routeType":"R ", + "cost":20, + "area":"0.0.0.0", + "routerType":"abr", + "nexthops":[ + { + "ip":"10.0.4.3", + "via":"eth-rt3" + } + ] + }, + "4.4.4.4":{ + "routeType":"R ", + "cost":20, + "area":"0.0.0.0", + "routerType":"abr", + "nexthops":[ + { + "ip":"10.0.4.3", + "via":"eth-rt3" + } + ] + }, + "7.7.7.7":{ + "routeType":"R ", + "cost":10, + "area":"0.0.0.3", + "routerType":"asbr", + "nexthops":[ + { + "ip":"10.0.6.7", + "via":"eth-rt7" + } + ] + }, + "172.16.1.0\/24":{ + "routeType":"N E2", + "cost":30, + "nexthops":[ + { + "ip":"10.0.4.3", + "via":"eth-rt3" + } + ] + }, + "192.168.1.0\/24":{ + "routeType":"N E2", + "cost":10, + "nexthops":[ + { + "ip":"10.0.6.7", + "via":"eth-rt7" + } + ] + } +} diff --git a/tests/topotests/ospf_gr_topo1/rt6/show_ip_route.json b/tests/topotests/ospf_gr_topo1/rt6/show_ip_route.json new file mode 100644 index 0000000000..dd95f1fab1 --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/rt6/show_ip_route.json @@ -0,0 +1,224 @@ +{ + "1.1.1.1\/32":[ + { + "prefix":"1.1.1.1\/32", + "protocol":"ospf", + "distance":110, + "metric":30, + "nexthops":[ + { + "ip":"10.0.4.3", + "afi":"ipv4", + "interfaceName":"eth-rt3" + } + ] + } + ], + "2.2.2.2\/32":[ + { + "prefix":"2.2.2.2\/32", + "protocol":"ospf", + "distance":110, + "metric":20, + "nexthops":[ + { + "ip":"10.0.4.3", + "afi":"ipv4", + "interfaceName":"eth-rt3" + } + ] + } + ], + "3.3.3.3\/32":[ + { + "prefix":"3.3.3.3\/32", + "protocol":"ospf", + "distance":110, + "metric":10, + "nexthops":[ + { + "ip":"10.0.4.3", + "afi":"ipv4", + "interfaceName":"eth-rt3" + } + ] + } + ], + "4.4.4.4\/32":[ + { + "prefix":"4.4.4.4\/32", + "protocol":"ospf", + "distance":110, + "metric":20, + "nexthops":[ + { + "ip":"10.0.4.3", + "afi":"ipv4", + "interfaceName":"eth-rt3" + } + ] + } + ], + "5.5.5.5\/32":[ + { + "prefix":"5.5.5.5\/32", + "protocol":"ospf", + "distance":110, + "metric":30, + "nexthops":[ + { + "ip":"10.0.4.3", + "afi":"ipv4", + "interfaceName":"eth-rt3" + } + ] + } + ], + "6.6.6.6\/32":[ + { + "prefix":"6.6.6.6\/32", + "protocol":"ospf", + "distance":110, + "metric":0, + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"lo" + } + ] + } + ], + "7.7.7.7\/32":[ + { + "prefix":"7.7.7.7\/32", + "protocol":"ospf", + "distance":110, + "metric":10, + "nexthops":[ + { + "ip":"10.0.6.7", + "afi":"ipv4", + "interfaceName":"eth-rt7" + } + ] + } + ], + "10.0.1.0\/24":[ + { + "prefix":"10.0.1.0\/24", + "protocol":"ospf", + "distance":110, + "metric":30, + "nexthops":[ + { + "ip":"10.0.4.3", + "afi":"ipv4", + "interfaceName":"eth-rt3" + } + ] + } + ], + "10.0.2.0\/24":[ + { + "prefix":"10.0.2.0\/24", + "protocol":"ospf", + "distance":110, + "metric":20, + "nexthops":[ + { + "ip":"10.0.4.3", + "afi":"ipv4", + "interfaceName":"eth-rt3" + } + ] + } + ], + "10.0.3.0\/24":[ + { + "prefix":"10.0.3.0\/24", + "protocol":"ospf", + "distance":110, + "metric":20, + "nexthops":[ + { + "ip":"10.0.4.3", + "afi":"ipv4", + "interfaceName":"eth-rt3" + } + ] + } + ], + "10.0.4.0\/24":[ + { + "prefix":"10.0.4.0\/24", + "protocol":"ospf", + "distance":110, + "metric":10, + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"eth-rt3" + } + ] + } + ], + "10.0.5.0\/24":[ + { + "prefix":"10.0.5.0\/24", + "protocol":"ospf", + "distance":110, + "metric":30, + "nexthops":[ + { + "ip":"10.0.4.3", + "afi":"ipv4", + "interfaceName":"eth-rt3" + } + ] + } + ], + "10.0.6.0\/24":[ + { + "prefix":"10.0.6.0\/24", + "protocol":"ospf", + "distance":110, + "metric":10, + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"eth-rt7" + } + ] + } + ], + "172.16.1.0\/24":[ + { + "prefix":"172.16.1.0\/24", + "protocol":"ospf", + "distance":110, + "metric":20, + "nexthops":[ + { + "ip":"10.0.4.3", + "afi":"ipv4", + "interfaceName":"eth-rt3" + } + ] + } + ], + "192.168.1.0\/24":[ + { + "prefix":"192.168.1.0\/24", + "protocol":"ospf", + "distance":110, + "metric":20, + "nexthops":[ + { + "ip":"10.0.6.7", + "afi":"ipv4", + "interfaceName":"eth-rt7" + } + ] + } + ] +} diff --git a/tests/topotests/ospf_gr_topo1/rt6/zebra.conf b/tests/topotests/ospf_gr_topo1/rt6/zebra.conf new file mode 100644 index 0000000000..d6a8f52b3a --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/rt6/zebra.conf @@ -0,0 +1,23 @@ +password 1 +hostname rt6 +log file zebra.log +log commands +! +debug zebra event +debug zebra packet +debug zebra rib +debug zebra kernel +! +interface lo + ip address 6.6.6.6/32 +! +interface eth-rt3 + ip address 10.0.4.6/24 +! +interface eth-rt7 + ip address 10.0.6.6/24 +! +ip forwarding +! +line vty +! diff --git a/tests/topotests/ospf_gr_topo1/rt7/ospfd.conf b/tests/topotests/ospf_gr_topo1/rt7/ospfd.conf new file mode 100644 index 0000000000..49db254410 --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/rt7/ospfd.conf @@ -0,0 +1,33 @@ +password 1 +hostname rt7 +log file ospfd.log +log commands +! +debug ospf zebra +debug ospf event +debug ospf lsa +debug ospf te +debug ospf packet all +debug ospf packet ls-update detail +debug ospf ism +debug ospf nsm +debug ospf nssa +debug ospf graceful-restart +! +interface lo + ip ospf area 3 +! +interface eth-rt6 + ip ospf network point-to-point + ip ospf area 3 + ip ospf hello-interval 3 + ip ospf dead-interval 9 +! +router ospf + router-id 7.7.7.7 + capability opaque + redistribute connected + area 3 nssa + graceful-restart grace-period 120 + graceful-restart helper-only +! diff --git a/tests/topotests/ospf_gr_topo1/rt7/show_ip_ospf_database.json b/tests/topotests/ospf_gr_topo1/rt7/show_ip_ospf_database.json new file mode 100644 index 0000000000..4916fba9d4 --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/rt7/show_ip_ospf_database.json @@ -0,0 +1,99 @@ +{ + "routerId":"7.7.7.7", + "areas":{ + "0.0.0.3":{ + "routerLinkStates":[ + { + "lsId":"6.6.6.6", + "advertisedRouter":"6.6.6.6", + "numOfRouterLinks":2 + }, + { + "lsId":"7.7.7.7", + "advertisedRouter":"7.7.7.7", + "numOfRouterLinks":3 + } + ], + "summaryLinkStates":[ + { + "lsId":"0.0.0.0", + "advertisedRouter":"6.6.6.6", + "summaryAddress":"0.0.0.0\/0" + }, + { + "lsId":"1.1.1.1", + "advertisedRouter":"6.6.6.6", + "summaryAddress":"1.1.1.1\/32" + }, + { + "lsId":"2.2.2.2", + "advertisedRouter":"6.6.6.6", + "summaryAddress":"2.2.2.2\/32" + }, + { + "lsId":"3.3.3.3", + "advertisedRouter":"6.6.6.6", + "summaryAddress":"3.3.3.3\/32" + }, + { + "lsId":"4.4.4.4", + "advertisedRouter":"6.6.6.6", + "summaryAddress":"4.4.4.4\/32" + }, + { + "lsId":"5.5.5.5", + "advertisedRouter":"6.6.6.6", + "summaryAddress":"5.5.5.5\/32" + }, + { + "lsId":"6.6.6.6", + "advertisedRouter":"6.6.6.6", + "summaryAddress":"6.6.6.6\/32" + }, + { + "lsId":"10.0.1.0", + "advertisedRouter":"6.6.6.6", + "summaryAddress":"10.0.1.0\/24" + }, + { + "lsId":"10.0.2.0", + "advertisedRouter":"6.6.6.6", + "summaryAddress":"10.0.2.0\/24" + }, + { + "lsId":"10.0.3.0", + "advertisedRouter":"6.6.6.6", + "summaryAddress":"10.0.3.0\/24" + }, + { + "lsId":"10.0.4.0", + "advertisedRouter":"6.6.6.6", + "summaryAddress":"10.0.4.0\/24" + }, + { + "lsId":"10.0.5.0", + "advertisedRouter":"6.6.6.6", + "summaryAddress":"10.0.5.0\/24" + } + ], + "nssaExternalLinkStates":[ + { + "lsId":"192.168.1.0", + "advertisedRouter":"7.7.7.7", + "metricType":"E2", + "route":"192.168.1.0\/24", + "tag":0 + } + ] + } + }, + "asExternalLinkStates":[ + { + "lsId":"192.168.1.0", + "advertisedRouter":"7.7.7.7", + "metricType":"E2", + "route":"192.168.1.0\/24", + "tag":0 + } + ] +} diff --git a/tests/topotests/ospf_gr_topo1/rt7/show_ip_ospf_neighbor.json b/tests/topotests/ospf_gr_topo1/rt7/show_ip_ospf_neighbor.json new file mode 100644 index 0000000000..2254aea9a6 --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/rt7/show_ip_ospf_neighbor.json @@ -0,0 +1,11 @@ +{ + "neighbors":{ + "6.6.6.6":[ + { + "state":"Full\/DROther", + "address":"10.0.6.6", + "ifaceName":"eth-rt6:10.0.6.7" + } + ] + } +} diff --git a/tests/topotests/ospf_gr_topo1/rt7/show_ip_ospf_route.json b/tests/topotests/ospf_gr_topo1/rt7/show_ip_ospf_route.json new file mode 100644 index 0000000000..89bad320bb --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/rt7/show_ip_ospf_route.json @@ -0,0 +1,168 @@ +{ + "0.0.0.0\/0":{ + "routeType":"N IA", + "cost":11, + "area":"0.0.0.3", + "nexthops":[ + { + "ip":"10.0.6.6", + "via":"eth-rt6" + } + ] + }, + "1.1.1.1\/32":{ + "routeType":"N IA", + "cost":40, + "area":"0.0.0.3", + "nexthops":[ + { + "ip":"10.0.6.6", + "via":"eth-rt6" + } + ] + }, + "2.2.2.2\/32":{ + "routeType":"N IA", + "cost":30, + "area":"0.0.0.3", + "nexthops":[ + { + "ip":"10.0.6.6", + "via":"eth-rt6" + } + ] + }, + "3.3.3.3\/32":{ + "routeType":"N IA", + "cost":20, + "area":"0.0.0.3", + "nexthops":[ + { + "ip":"10.0.6.6", + "via":"eth-rt6" + } + ] + }, + "4.4.4.4\/32":{ + "routeType":"N IA", + "cost":30, + "area":"0.0.0.3", + "nexthops":[ + { + "ip":"10.0.6.6", + "via":"eth-rt6" + } + ] + }, + "5.5.5.5\/32":{ + "routeType":"N IA", + "cost":40, + "area":"0.0.0.3", + "nexthops":[ + { + "ip":"10.0.6.6", + "via":"eth-rt6" + } + ] + }, + "6.6.6.6\/32":{ + "routeType":"N IA", + "cost":10, + "area":"0.0.0.3", + "nexthops":[ + { + "ip":"10.0.6.6", + "via":"eth-rt6" + } + ] + }, + "7.7.7.7\/32":{ + "routeType":"N", + "cost":0, + "area":"0.0.0.3", + "nexthops":[ + { + "ip":" ", + "directly attached to":"lo" + } + ] + }, + "10.0.1.0\/24":{ + "routeType":"N IA", + "cost":40, + "area":"0.0.0.3", + "nexthops":[ + { + "ip":"10.0.6.6", + "via":"eth-rt6" + } + ] + }, + "10.0.2.0\/24":{ + "routeType":"N IA", + "cost":30, + "area":"0.0.0.3", + "nexthops":[ + { + "ip":"10.0.6.6", + "via":"eth-rt6" + } + ] + }, + "10.0.3.0\/24":{ + "routeType":"N IA", + "cost":30, + "area":"0.0.0.3", + "nexthops":[ + { + "ip":"10.0.6.6", + "via":"eth-rt6" + } + ] + }, + "10.0.4.0\/24":{ + "routeType":"N IA", + "cost":20, + "area":"0.0.0.3", + "nexthops":[ + { + "ip":"10.0.6.6", + "via":"eth-rt6" + } + ] + }, + "10.0.5.0\/24":{ + "routeType":"N IA", + "cost":40, + "area":"0.0.0.3", + "nexthops":[ + { + "ip":"10.0.6.6", + "via":"eth-rt6" + } + ] + }, + "10.0.6.0\/24":{ + "routeType":"N", + "cost":10, + "area":"0.0.0.3", + "nexthops":[ + { + "ip":" ", + "directly attached to":"eth-rt6" + } + ] + }, + "6.6.6.6":{ + "routeType":"R ", + "cost":10, + "area":"0.0.0.3", + "routerType":"abr", + "nexthops":[ + { + "ip":"10.0.6.6", + "via":"eth-rt6" + } + ] + } +} diff --git a/tests/topotests/ospf_gr_topo1/rt7/show_ip_route.json b/tests/topotests/ospf_gr_topo1/rt7/show_ip_route.json new file mode 100644 index 0000000000..0fb906b76b --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/rt7/show_ip_route.json @@ -0,0 +1,210 @@ +{ + "0.0.0.0\/0":[ + { + "prefix":"0.0.0.0\/0", + "protocol":"ospf", + "distance":110, + "metric":11, + "nexthops":[ + { + "ip":"10.0.6.6", + "afi":"ipv4", + "interfaceName":"eth-rt6" + } + ] + } + ], + "1.1.1.1\/32":[ + { + "prefix":"1.1.1.1\/32", + "protocol":"ospf", + "distance":110, + "metric":40, + "nexthops":[ + { + "ip":"10.0.6.6", + "afi":"ipv4", + "interfaceName":"eth-rt6" + } + ] + } + ], + "2.2.2.2\/32":[ + { + "prefix":"2.2.2.2\/32", + "protocol":"ospf", + "distance":110, + "metric":30, + "nexthops":[ + { + "ip":"10.0.6.6", + "afi":"ipv4", + "interfaceName":"eth-rt6" + } + ] + } + ], + "3.3.3.3\/32":[ + { + "prefix":"3.3.3.3\/32", + "protocol":"ospf", + "distance":110, + "metric":20, + "nexthops":[ + { + "ip":"10.0.6.6", + "afi":"ipv4", + "interfaceName":"eth-rt6" + } + ] + } + ], + "4.4.4.4\/32":[ + { + "prefix":"4.4.4.4\/32", + "protocol":"ospf", + "distance":110, + "metric":30, + "nexthops":[ + { + "ip":"10.0.6.6", + "afi":"ipv4", + "interfaceName":"eth-rt6" + } + ] + } + ], + "5.5.5.5\/32":[ + { + "prefix":"5.5.5.5\/32", + "protocol":"ospf", + "distance":110, + "metric":40, + "nexthops":[ + { + "ip":"10.0.6.6", + "afi":"ipv4", + "interfaceName":"eth-rt6" + } + ] + } + ], + "6.6.6.6\/32":[ + { + "prefix":"6.6.6.6\/32", + "protocol":"ospf", + "distance":110, + "metric":10, + "nexthops":[ + { + "ip":"10.0.6.6", + "afi":"ipv4", + "interfaceName":"eth-rt6" + } + ] + } + ], + "7.7.7.7\/32":[ + { + "prefix":"7.7.7.7\/32", + "protocol":"ospf", + "distance":110, + "metric":0, + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"lo" + } + ] + } + ], + "10.0.1.0\/24":[ + { + "prefix":"10.0.1.0\/24", + "protocol":"ospf", + "distance":110, + "metric":40, + "nexthops":[ + { + "ip":"10.0.6.6", + "afi":"ipv4", + "interfaceName":"eth-rt6" + } + ] + } + ], + "10.0.2.0\/24":[ + { + "prefix":"10.0.2.0\/24", + "protocol":"ospf", + "distance":110, + "metric":30, + "nexthops":[ + { + "ip":"10.0.6.6", + "afi":"ipv4", + "interfaceName":"eth-rt6" + } + ] + } + ], + "10.0.3.0\/24":[ + { + "prefix":"10.0.3.0\/24", + "protocol":"ospf", + "distance":110, + "metric":30, + "nexthops":[ + { + "ip":"10.0.6.6", + "afi":"ipv4", + "interfaceName":"eth-rt6" + } + ] + } + ], + "10.0.4.0\/24":[ + { + "prefix":"10.0.4.0\/24", + "protocol":"ospf", + "distance":110, + "metric":20, + "nexthops":[ + { + "ip":"10.0.6.6", + "afi":"ipv4", + "interfaceName":"eth-rt6" + } + ] + } + ], + "10.0.5.0\/24":[ + { + "prefix":"10.0.5.0\/24", + "protocol":"ospf", + "distance":110, + "metric":40, + "nexthops":[ + { + "ip":"10.0.6.6", + "afi":"ipv4", + "interfaceName":"eth-rt6" + } + ] + } + ], + "10.0.6.0\/24":[ + { + "prefix":"10.0.6.0\/24", + "protocol":"ospf", + "distance":110, + "metric":10, + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"eth-rt6" + } + ] + } + ] +} diff --git a/tests/topotests/ospf_gr_topo1/rt7/zebra.conf b/tests/topotests/ospf_gr_topo1/rt7/zebra.conf new file mode 100644 index 0000000000..c481e4532b --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/rt7/zebra.conf @@ -0,0 +1,23 @@ +password 1 +hostname rt7 +log file zebra.log +log commands +! +debug zebra event +debug zebra packet +debug zebra rib +debug zebra kernel +! +interface lo + ip address 7.7.7.7/32 +! +interface stub1 + ip address 192.168.1.1/24 +! +interface eth-rt6 + ip address 10.0.6.7/24 +! +ip forwarding +! +line vty +! diff --git a/tests/topotests/ospf_gr_topo1/test_ospf_gr_topo1.py b/tests/topotests/ospf_gr_topo1/test_ospf_gr_topo1.py new file mode 100755 index 0000000000..0507c2d516 --- /dev/null +++ b/tests/topotests/ospf_gr_topo1/test_ospf_gr_topo1.py @@ -0,0 +1,393 @@ +#!/usr/bin/env python + +# +# test_ospf_gr_topo1.py +# Part of NetDEF Topology Tests +# +# Copyright (c) 2021 by +# Network Device Education Foundation, Inc. ("NetDEF") +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +test_ospf_gr_topo1.py: + + +---------+ + | RT1 | + | 1.1.1.1 | + +---------+ + |eth-rt2 + | + |10.0.1.0/24 + | + |eth-rt1 + +---------+ + | RT2 | + | 2.2.2.2 | + +---------+ + |eth-rt3 + | + |10.0.2.0/24 + | + |eth-rt2 + +---------+ + | RT3 | + | 3.3.3.3 | + +---------+ + eth-rt4| |eth-rt6 + | | + 10.0.3.0/24 | | 10.0.4.0/24 + +---------+ +--------+ + | | + |eth-rt3 |eth-rt3 + +---------+ +---------+ + | RT4 | | RT6 | + | 4.4.4.4 | | 6.6.6.6 | + +---------+ +---------+ + |eth-rt5 |eth-rt7 + | | + |10.0.5.0/24 |10.0.6.0/24 + | | + |eth-rt4 |eth-rt6 + +---------+ +---------+ + | RT5 | | RT7 | + | 5.5.5.5 | | 7.7.7.7 | + +---------+ +---------+ +""" + +import os +import sys +import pytest +import json +import re +import tempfile +from time import sleep +from functools import partial + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topolog import logger +from lib.common_config import ( + kill_router_daemons, + start_router_daemons, +) + +# Required to instantiate the topology builder class. +from mininet.topo import Topo + +pytestmark = [pytest.mark.ospfd] + +# Global multi-dimensional dictionary containing all expected outputs +outputs = {} + + +class TemplateTopo(Topo): + "Test topology builder" + + def build(self, *_args, **_opts): + "Build function" + tgen = get_topogen(self) + + # + # Define FRR Routers + # + for router in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6", "rt7"]: + tgen.add_router(router) + + # + # Define connections + # + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["rt1"], nodeif="eth-rt2") + switch.add_link(tgen.gears["rt2"], nodeif="eth-rt1") + + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["rt1"], nodeif="stub1") + + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["rt2"], nodeif="eth-rt3") + switch.add_link(tgen.gears["rt3"], nodeif="eth-rt2") + + switch = tgen.add_switch("s4") + switch.add_link(tgen.gears["rt3"], nodeif="eth-rt4") + switch.add_link(tgen.gears["rt4"], nodeif="eth-rt3") + + switch = tgen.add_switch("s5") + switch.add_link(tgen.gears["rt3"], nodeif="eth-rt6") + switch.add_link(tgen.gears["rt6"], nodeif="eth-rt3") + + switch = tgen.add_switch("s6") + switch.add_link(tgen.gears["rt4"], nodeif="eth-rt5") + switch.add_link(tgen.gears["rt5"], nodeif="eth-rt4") + + switch = tgen.add_switch("s7") + switch.add_link(tgen.gears["rt6"], nodeif="eth-rt7") + switch.add_link(tgen.gears["rt7"], nodeif="eth-rt6") + + switch = tgen.add_switch("s8") + switch.add_link(tgen.gears["rt7"], nodeif="stub1") + + +def setup_module(mod): + "Sets up the pytest environment" + tgen = Topogen(TemplateTopo, mod.__name__) + tgen.start_topology() + + router_list = tgen.routers() + + # For all registered routers, load the zebra configuration file + for rname, router in router_list.items(): + router.load_config( + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) + ) + router.load_config( + TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname)) + ) + + tgen.start_router() + + +def teardown_module(mod): + "Teardown the pytest environment" + tgen = get_topogen() + + # This function tears down the whole topology. + tgen.stop_topology() + + +def router_compare_json_output(rname, command, reference, tries): + "Compare router JSON output" + + logger.info('Comparing router "%s" "%s" output', rname, command) + + tgen = get_topogen() + filename = "{}/{}/{}".format(CWD, rname, reference) + expected = json.loads(open(filename).read()) + + test_func = partial(topotest.router_json_cmp, tgen.gears[rname], command, expected) + _, diff = topotest.run_and_expect(test_func, None, count=tries, wait=0.5) + assertmsg = '"{}" JSON output mismatches the expected result'.format(rname) + assert diff is None, assertmsg + + +def check_routers(initial_convergence=False, exiting=None, restarting=None): + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6", "rt7"]: + # Check the RIB first, which should be preserved across restarts in + # all routers of the routing domain. + if initial_convergence == True: + tries = 240 + else: + tries = 1 + router_compare_json_output( + rname, "show ip route ospf json", "show_ip_route.json", tries + ) + + # Check that all adjacencies are up and running (except when there's + # an OSPF instance that is shutting down). + if exiting == None: + tries = 240 + router_compare_json_output( + rname, "show ip ospf neighbor json", "show_ip_ospf_neighbor.json", tries + ) + + # Check the OSPF RIB and LSDB. + # In the restarting router, wait up to one minute for the LSDB to converge. + if exiting != rname: + if initial_convergence == True or restarting == rname: + tries = 240 + else: + tries = 1 + router_compare_json_output( + rname, "show ip ospf database json", "show_ip_ospf_database.json", tries + ) + router_compare_json_output( + rname, "show ip ospf route json", "show_ip_ospf_route.json", tries + ) + + +# +# Test initial network convergence +# +def test_initial_convergence(): + logger.info("Test: verify initial network convergence") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + check_routers(initial_convergence=True) + + +# +# Test rt1 performing a graceful restart +# +def test_gr_rt1(): + logger.info("Test: verify rt1 performing a graceful restart") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + tgen.net["rt1"].cmd('vtysh -c "graceful-restart prepare ip ospf"') + sleep(3) + kill_router_daemons(tgen, "rt1", ["ospfd"], save_config=False) + check_routers(exiting="rt1") + + start_router_daemons(tgen, "rt1", ["ospfd"]) + check_routers(restarting="rt1") + + +# +# Test rt2 performing a graceful restart +# +def test_gr_rt2(): + logger.info("Test: verify rt2 performing a graceful restart") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + tgen.net["rt2"].cmd('vtysh -c "graceful-restart prepare ip ospf"') + sleep(3) + kill_router_daemons(tgen, "rt2", ["ospfd"], save_config=False) + check_routers(exiting="rt2") + + start_router_daemons(tgen, "rt2", ["ospfd"]) + check_routers(restarting="rt2") + + +# +# Test rt3 performing a graceful restart +# +def test_gr_rt3(): + logger.info("Test: verify rt3 performing a graceful restart") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + tgen.net["rt3"].cmd('vtysh -c "graceful-restart prepare ip ospf"') + sleep(3) + kill_router_daemons(tgen, "rt3", ["ospfd"], save_config=False) + check_routers(exiting="rt3") + + start_router_daemons(tgen, "rt3", ["ospfd"]) + check_routers(restarting="rt3") + + +# +# Test rt4 performing a graceful restart +# +def test_gr_rt4(): + logger.info("Test: verify rt4 performing a graceful restart") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + tgen.net["rt4"].cmd('vtysh -c "graceful-restart prepare ip ospf"') + sleep(3) + kill_router_daemons(tgen, "rt4", ["ospfd"], save_config=False) + check_routers(exiting="rt4") + + start_router_daemons(tgen, "rt4", ["ospfd"]) + check_routers(restarting="rt4") + + +# +# Test rt5 performing a graceful restart +# +def test_gr_rt5(): + logger.info("Test: verify rt5 performing a graceful restart") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + tgen.net["rt5"].cmd('vtysh -c "graceful-restart prepare ip ospf"') + sleep(3) + kill_router_daemons(tgen, "rt5", ["ospfd"], save_config=False) + check_routers(exiting="rt5") + + start_router_daemons(tgen, "rt5", ["ospfd"]) + check_routers(restarting="rt5") + + +# +# Test rt6 performing a graceful restart +# +def test_gr_rt6(): + logger.info("Test: verify rt6 performing a graceful restart") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + tgen.net["rt6"].cmd('vtysh -c "graceful-restart prepare ip ospf"') + sleep(3) + kill_router_daemons(tgen, "rt6", ["ospfd"], save_config=False) + check_routers(exiting="rt6") + + start_router_daemons(tgen, "rt6", ["ospfd"]) + check_routers(restarting="rt6") + + +# +# Test rt7 performing a graceful restart +# +def test_gr_rt7(): + logger.info("Test: verify rt7 performing a graceful restart") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + tgen.net["rt7"].cmd('vtysh -c "graceful-restart prepare ip ospf"') + sleep(3) + kill_router_daemons(tgen, "rt7", ["ospfd"], save_config=False) + check_routers(exiting="rt7") + + start_router_daemons(tgen, "rt7", ["ospfd"]) + check_routers(restarting="rt7") + + +# Memory leak test template +def test_memory_leak(): + "Run the memory leak test and report results." + tgen = get_topogen() + if not tgen.is_memleak_enabled(): + pytest.skip("Memory leak test/report is disabled") + + tgen.report_memory_leaks() + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/ospf_suppress_fa/test_ospf_suppress_fa.py b/tests/topotests/ospf_suppress_fa/test_ospf_suppress_fa.py index 76e50beb5c..a22fbf458a 100644 --- a/tests/topotests/ospf_suppress_fa/test_ospf_suppress_fa.py +++ b/tests/topotests/ospf_suppress_fa/test_ospf_suppress_fa.py @@ -50,6 +50,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen # Required to instantiate the topology builder class. from mininet.topo import Topo +pytestmark = [pytest.mark.ospfd] + class NetworkTopo(Topo): "OSPF topology builder" diff --git a/tests/topotests/ospf_tilfa_topo1/test_ospf_tilfa_topo1.py b/tests/topotests/ospf_tilfa_topo1/test_ospf_tilfa_topo1.py index 489690471c..b3da6e2a1a 100644 --- a/tests/topotests/ospf_tilfa_topo1/test_ospf_tilfa_topo1.py +++ b/tests/topotests/ospf_tilfa_topo1/test_ospf_tilfa_topo1.py @@ -71,6 +71,8 @@ from lib.topolog import logger # Required to instantiate the topology builder class. from mininet.topo import Topo +pytestmark = [pytest.mark.ospfd] + class TemplateTopo(Topo): "Test topology builder" diff --git a/tests/topotests/ospf_topo2/test_ospf_topo2.py b/tests/topotests/ospf_topo2/test_ospf_topo2.py index 6451f5fb32..8b8d5d6e9f 100644 --- a/tests/topotests/ospf_topo2/test_ospf_topo2.py +++ b/tests/topotests/ospf_topo2/test_ospf_topo2.py @@ -46,6 +46,8 @@ from lib.topolog import logger # Required to instantiate the topology builder class. from mininet.topo import Topo +pytestmark = [pytest.mark.ospfd] + class OSPFTopo(Topo): "Test topology builder" diff --git a/tests/topotests/ospfv3_basic_functionality/ospfv3_asbr_summary_topo1.json b/tests/topotests/ospfv3_basic_functionality/ospfv3_asbr_summary_topo1.json new file mode 100644 index 0000000000..74a0de489f --- /dev/null +++ b/tests/topotests/ospfv3_basic_functionality/ospfv3_asbr_summary_topo1.json @@ -0,0 +1,198 @@ +{ + "address_types": [ + "ipv6" + ], + "ipv6base": "fd00::", + "ipv6mask": 64, + "link_ip_start": { + "ipv6": "fd00::", + "v6mask": 64 + }, + "lo_prefix": { + "ipv6": "2001:db8:f::", + "v6mask": 128 + }, + "routers": { + "r0": { + "links": { + "lo": { + "ipv6": "auto", + "type": "loopback" + }, + "r1": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r2": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r3": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4, + "network": "point-to-point" + } + }, + "r3-link0": { + "ipv6": "auto", + "description": "DummyIntftoR3" + } + }, + "ospf6": { + "router_id": "100.1.1.0", + "neighbors": { + "r1": {}, + "r2": {}, + "r3": {} + } + } + }, + "r1": { + "links": { + "lo": { + "ipv6": "auto", + "type": "loopback" + }, + "r0": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r2": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r3": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r3-link0": { + "ipv6": "auto", + "description": "DummyIntftoR3" + } + }, + "ospf6": { + "router_id": "100.1.1.1", + "neighbors": { + "r0": {}, + "r2": {}, + "r3": {} + } + } + }, + "r2": { + "links": { + "lo": { + "ipv6": "auto", + "type": "loopback" + }, + "r0": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r1": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r3": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + } + }, + "ospf6": { + "router_id": "100.1.1.2", + "neighbors": { + "r1": {}, + "r0": {}, + "r3": {} + } + } + }, + "r3": { + "links": { + "lo": { + "ipv6": "auto", + "type": "loopback" + }, + "r0": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4, + "network": "point-to-point" + } + }, + "r0-link0": { + "ipv6": "auto", + "description": "DummyIntftoR0" + }, + "r1": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r2": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r1-link0": { + "ipv6": "auto", + "description": "DummyIntftoR1", + "ospf6": { + "area": "0.0.0.0" + } + } + }, + "ospf6": { + "router_id": "100.1.1.3", + "neighbors": { + "r0": {}, + "r1": {}, + "r2": {} + } + } + } + } +}
\ No newline at end of file diff --git a/tests/topotests/ospfv3_basic_functionality/ospfv3_ecmp.json b/tests/topotests/ospfv3_basic_functionality/ospfv3_ecmp.json new file mode 100644 index 0000000000..c928093925 --- /dev/null +++ b/tests/topotests/ospfv3_basic_functionality/ospfv3_ecmp.json @@ -0,0 +1,347 @@ +{ + "address_types": [ + "ipv6" + ], + "ipv6base": "fd00::", + "ipv6mask": 64, + "link_ip_start": { + "ipv6": "fd00::", + "v6mask": 64 + }, + "lo_prefix": { + "ipv6": "2001:db8:f::", + "v6mask": 128 + }, + "routers": { + "r0": { + "links": { + "lo": { + "ipv6": "auto", + "type": "loopback" + }, + "r1": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r1-link1": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r1-link2": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r1-link3": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r1-link4": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r1-link5": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r1-link6": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r1-link7": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r2": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r3": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4, + "network": "point-to-point" + } + } + }, + "ospf6": { + "router_id": "100.1.1.0", + "neighbors": { + "r1": {}, + "r1-link1": { + "nbr": "r1" + }, + "r1-link2": { + "nbr": "r1" + }, + "r1-link3": { + "nbr": "r1" + }, + "r1-link4": { + "nbr": "r1" + }, + "r1-link5": { + "nbr": "r1" + }, + "r1-link6": { + "nbr": "r1" + }, + "r1-link7": { + "nbr": "r1" + }, + "r2": {}, + "r3": {} + } + } + }, + "r1": { + "links": { + "lo": { + "ipv6": "auto", + "type": "loopback" + }, + "r0": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r0-link1": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r0-link2": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r0-link3": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r0-link4": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r0-link5": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r0-link6": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r0-link7": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r2": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r3": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r3-link0": { + "ipv6": "auto", + "description": "DummyIntftoR3" + } + }, + "ospf6": { + "router_id": "100.1.1.1", + "neighbors": { + "r0": {}, + "r0-link1": { + "nbr": "r0" + }, + "r0-link2": { + "nbr": "r0" + }, + "r0-link3": { + "nbr": "r0" + }, + "r0-link4": { + "nbr": "r0" + }, + "r0-link5": { + "nbr": "r0" + }, + "r0-link6": { + "nbr": "r0" + }, + "r0-link7": { + "nbr": "r0" + }, + "r2": {}, + "r3": {} + } + } + }, + "r2": { + "links": { + "lo": { + "ipv6": "auto", + "type": "loopback" + }, + "r0": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r1": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r3": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + } + }, + "ospf6": { + "router_id": "100.1.1.2", + "neighbors": { + "r1": {}, + "r0": {}, + "r3": {} + } + } + }, + "r3": { + "links": { + "lo": { + "ipv6": "auto", + "type": "loopback" + }, + "r0": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4, + "network": "point-to-point" + } + }, + "r1": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r2": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r1-link0": { + "ipv6": "auto", + "description": "DummyIntftoR1", + "ospf": { + "area": "0.0.0.0" + }, + "ospf6": { + "area": "0.0.0.0" + } + } + }, + "ospf6": { + "router_id": "100.1.1.3", + "neighbors": { + "r0": {}, + "r1": {}, + "r2": {} + } + } + } + } +}
\ No newline at end of file diff --git a/tests/topotests/ospfv3_basic_functionality/ospfv3_routemaps.json b/tests/topotests/ospfv3_basic_functionality/ospfv3_routemaps.json new file mode 100644 index 0000000000..226f84f320 --- /dev/null +++ b/tests/topotests/ospfv3_basic_functionality/ospfv3_routemaps.json @@ -0,0 +1,137 @@ +{ + "address_types": ["ipv6"], + "ipv6base": "fd00::", + "ipv6mask": 64, + "link_ip_start": {"ipv6": "fd00::", "v6mask": 64}, + "lo_prefix": {"ipv6": "2001:db8:f::", "v6mask": 128}, + "routers": { + "r0": { + "links": { + "r1": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r2": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r3": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + } + }, + "ospf6": { + "router_id": "100.1.1.0", + "neighbors": {"r1": {}, "r2": {}, "r3": {}} + } + }, + "r1": { + "links": { + "r0": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r2": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r3": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + } + }, + "ospf6": { + "router_id": "100.1.1.1", + "neighbors": {"r0": {}, "r2": {}, "r3": {}} + } + }, + "r2": { + "links": { + "r0": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r1": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r3": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + } + }, + "ospf6": { + "router_id": "100.1.1.2", + "neighbors": {"r1": {}, "r0": {}, "r3": {}} + } + }, + "r3": { + "links": { + "r0": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r1": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r2": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + } + }, + "ospf6": { + "router_id": "100.1.1.3", + "neighbors": {"r0": {}, "r1": {}, "r2": {}} + } + } + } +} diff --git a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_asbr_summary_topo1.py b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_asbr_summary_topo1.py new file mode 100644 index 0000000000..6a4b60fbed --- /dev/null +++ b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_asbr_summary_topo1.py @@ -0,0 +1,1928 @@ +#!/usr/bin/python + +# +# Copyright (c) 2021 by VMware, Inc. ("VMware") +# Used Copyright (c) 2018 by Network Device Education Foundation, Inc. +# ("NetDEF") in this file. +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + + +"""OSPF Summarisation Functionality Automation.""" +import os +import sys +import time +import pytest +import json +from copy import deepcopy +from ipaddress import IPv4Address +from lib.topotest import frr_unicode + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../lib/")) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from mininet.topo import Topo +from lib.topogen import Topogen, get_topogen +import ipaddress +from time import sleep + +# Import topoJson from lib, to create topology and initial configuration +from lib.common_config import ( + start_topology, + write_test_header, + kill_router_daemons, + write_test_footer, + reset_config_on_routers, + stop_router, + start_router, + verify_rib, + create_static_routes, + step, + start_router_daemons, + create_route_maps, + shutdown_bringup_interface, + create_prefix_lists, + create_route_maps, + create_interfaces_cfg, + topo_daemons, +) +from lib.topolog import logger +from lib.topojson import build_topo_from_json, build_config_from_json +from lib.ospf import ( + verify_ospf6_neighbor, + clear_ospf, + verify_ospf6_rib, + create_router_ospf, + verify_ospf_summary, +) + + +# Global variables +topo = None +# Reading the data from JSON File for topology creation +jsonFile = "{}/ospfv3_asbr_summary_topo1.json".format(CWD) +try: + with open(jsonFile, "r") as topoJson: + topo = json.load(topoJson) +except IOError: + assert False, "Could not read file {}".format(jsonFile) + +NETWORK = { + "ipv4": [ + "11.0.20.1/32", + "11.0.20.2/32", + "11.0.20.3/32", + "11.0.20.4/32", + "11.0.20.5/32", + ], + "ipv6": [ + "2011:0:20::1/128", + "2011:0:20::2/128", + "2011:0:20::3/128", + "2011:0:20::4/128", + "2011:0:20::5/128", + ], +} +NETWORK_11 = { + "ipv4": ["11.0.20.6/32", "11.0.20.7/32"], + "ipv6": ["2011:0:20::6/128", "2011:0:20::7/128"], +} + +NETWORK2 = { + "ipv4": [ + "12.0.20.1/32", + "12.0.20.2/32", + "12.0.20.3/32", + "12.0.20.4/32", + "12.0.20.5/32", + ], + "ipv6": [ + "2012:0:20::1/128", + "2012:0:20::2/128", + "2012:0:20::3/128", + "2012:0:20::4/128", + "2012:0:20::5/128", + ], +} +SUMMARY = { + "ipv4": ["11.0.0.0/8", "12.0.0.0/8", "11.0.0.0/24"], + "ipv6": ["2011::/32", "2012::/32", "2011::/64", "2011::/24"], +} +""" +TOPOOLOGY = + Please view in a fixed-width font such as Courier. + +---+ A0 +---+ + +R1 +------------+R2 | + +-+-+- +--++ + | -- -- | + | -- A0 -- | + A0| ---- | + | ---- | A0 + | -- -- | + | -- -- | + +-+-+- +-+-+ + +R0 +-------------+R3 | + +---+ A0 +---+ + +TESTCASES = +1. OSPF summarisation functionality. +2. OSPF summarisation with advertise and no advertise option +3. OSPF summarisation with route map modification of metric type. +4. OSPF CLI Show verify ospf ASBR summary config and show commands behaviours. +5. OSPF summarisation Chaos. +""" + + +class CreateTopo(Topo): + """ + Test topology builder. + + * `Topo`: Topology object + """ + + def build(self, *_args, **_opts): + """Build function.""" + tgen = get_topogen(self) + + # Building topology from json file + build_topo_from_json(tgen, topo) + + +def setup_module(mod): + """ + Sets up the pytest environment + + * `mod`: module name + """ + global topo + testsuite_run_time = time.asctime(time.localtime(time.time())) + logger.info("Testsuite start time: {}".format(testsuite_run_time)) + logger.info("=" * 40) + + logger.info("Running setup_module to create topology") + + # This function initiates the topology build with Topogen... + tgen = Topogen(CreateTopo, mod.__name__) + # ... and here it calls Mininet initialization functions. + + # get list of daemons needs to be started for this suite. + daemons = topo_daemons(tgen, topo) + + # Starting topology, create tmp files which are loaded to routers + # to start deamons and then start routers + start_topology(tgen, daemons) + + # Creating configuration from JSON + build_config_from_json(tgen, topo) + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + # Api call verify whether OSPF is converged + ospf_covergence = verify_ospf6_neighbor(tgen, topo) + assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format( + ospf_covergence + ) + + logger.info("Running setup_module() done") + + +def teardown_module(mod): + """ + Teardown the pytest environment. + + * `mod`: module name + """ + + logger.info("Running teardown_module to delete topology") + + tgen = get_topogen() + + # Stop toplogy and Remove tmp files + tgen.stop_topology() + + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) + logger.info("=" * 40) + + +def red_static(dut, config=True): + """ + Local 'def' for Redstribute static routes inside ospf. + + Parameters + ---------- + * `dut` : DUT on which configs have to be made. + * `config` : True or False, True by default for configure, set False for + unconfiguration. + """ + global topo + tgen = get_topogen() + if config: + ospf_red = {dut: {"ospf6": {"redistribute": [{"redist_type": "static"}]}}} + else: + ospf_red = { + dut: { + "ospf6": {"redistribute": [{"redist_type": "static", "delete": True}]} + } + } + result = create_router_ospf(tgen, topo, ospf_red) + assert result is True, "Testcase : Failed \n Error: {}".format(result) + + +def red_connected(dut, config=True): + """ + Local 'def' for Redstribute connected routes inside ospf + + Parameters + ---------- + * `dut` : DUT on which configs have to be made. + * `config` : True or False, True by default for configure, set False for + unconfiguration. + """ + global topo + tgen = get_topogen() + if config: + ospf_red = {dut: {"ospf6": {"redistribute": [{"redist_type": "connected"}]}}} + else: + ospf_red = { + dut: { + "ospf6": { + "redistribute": [{"redist_type": "connected", "delete": True}] + } + } + } + result = create_router_ospf(tgen, topo, ospf_red) + assert result is True, "Testcase: Failed \n Error: {}".format(result) + + +# ################################## +# Test cases start here. +# ################################## + +def test_ospfv3_type5_summary_tc42_p0(request): + """OSPF summarisation functionality.""" + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + global topo + step("Bring up the base config as per the topology") + reset_config_on_routers(tgen) + + protocol = 'ospf' + + step( + "Configure 5 static routes from the same network on R0" + "5 static routes from different networks and redistribute in R0") + input_dict_static_rtes = { + "r0": { + "static_routes": [ + { + "network": NETWORK["ipv6"], + "next_hop": "blackhole" + }, + { + "network": NETWORK2["ipv6"], + "next_hop": "blackhole" + } + ] + } + } + result = create_static_routes(tgen, input_dict_static_rtes) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + dut = 'r0' + red_static(dut) + + step("Verify that routes are learnt on R1.") + dut = 'r1' + + result = verify_ospf6_rib(tgen, dut, input_dict_static_rtes) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + result = verify_rib(tgen, "ipv6", dut, + input_dict_static_rtes, protocol=protocol) + assert result is True, "Testcase {} : Failed" \ + "Error: Routes is missing in RIB".format(tc_name) + + step( + "Configure External Route summary in R0 to summarise 5" + " routes to one route. with aggregate timer as 6 sec") + + ospf_summ_r1 = { + "r0": { + "ospf6": { + "summary-address": [{ + "prefix": SUMMARY["ipv6"][0].split('/')[0], + "mask": "32" + }], + "aggr_timer": 6 + } + } + } + result = create_router_ospf(tgen, topo, ospf_summ_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + step( + "Verify that external routes are summarised to configured summary " + "address on R0 after 5 secs of delay timer expiry and only one " + "route is sent to R1.") + input_dict_summary = { + "r0": { + "static_routes": [{"network": SUMMARY["ipv6"][0]}] + } + } + dut = 'r1' + + result = verify_ospf6_rib(tgen, dut, input_dict_summary) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + result = verify_rib(tgen, "ipv6", dut, + input_dict_summary, protocol=protocol) + assert result is True, "Testcase {} : Failed" \ + "Error: Routes is missing in RIB".format(tc_name) + + step("Verify that show ip ospf summary should show the summaries.") + input_dict = { + SUMMARY["ipv6"][0]: { + "Summary address": SUMMARY["ipv6"][0], + "Metric-type": "E2", + "Metric": 20, + "Tag": 0, + "External route count": 5 + } + } + dut = 'r0' + result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6') + assert result is True, "Testcase {} : Failed" \ + "Error: Summary missing in OSPF DB".format(tc_name) + + step( + "Verify that originally advertised routes are withdraw from there" + " peer.") + input_dict = { + "r0": { + "static_routes": [ + { + "network": NETWORK["ipv6"], + "next_hop": "blackhole" + } + ] + } + } + dut = 'r1' + result = verify_ospf6_rib(tgen, dut, input_dict, expected=False) + assert result is not True, "Testcase {} : Failed \n Error: "\ + "Routes still present in OSPF RIB {}".format(tc_name, result) + + result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol, + expected=False) + assert result is not True, "Testcase {} : Failed" \ + "Error: Routes still present in RIB".format(tc_name) + + step("Delete the configured summary") + ospf_summ_r1 = { + "r0": { + "ospf6": { + "summary-address": [{ + "prefix": SUMMARY["ipv6"][0].split('/')[0], + "mask": "32", + "del_aggr_timer": True, + "delete": True + }] + } + } + } + result = create_router_ospf(tgen, topo, ospf_summ_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + step("Verify that summary lsa is withdrawn from R1 and deleted from R0.") + dut = 'r1' + result = verify_ospf6_rib(tgen, dut, input_dict, expected=False) + assert result is not True, "Testcase {} : Failed \n Error: "\ + "Routes still present in OSPF RIB {}".format(tc_name, result) + + result = verify_rib(tgen, "ipv6", dut, + input_dict_summary, protocol=protocol, expected=False) + assert result is not True, "Testcase {} : Failed" \ + "Error: Summary Route still present in RIB".format(tc_name) + + step("show ip ospf summary should not have any summary address.") + input_dict = { + SUMMARY["ipv6"][0]: { + "Summary address": SUMMARY["ipv6"][0], + "Metric-type": "E2", + "Metric": 20, + "Tag": 0, + "External route count": 5 + } + } + dut = 'r0' + result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6', expected=False) + assert result is not True, "Testcase {} : Failed" \ + "Error: Summary still present in DB".format(tc_name) + + dut = 'r1' + step("All 5 routes are advertised after deletion of configured summary.") + + result = verify_ospf6_rib(tgen, dut, input_dict_static_rtes) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + result = verify_rib(tgen, "ipv6", dut, + input_dict_static_rtes, protocol=protocol) + assert result is True, "Testcase {} : Failed" \ + "Error: Routes is missing in RIB".format(tc_name) + + step("configure the summary again and delete static routes .") + ospf_summ_r1 = { + "r0": { + "ospf6": { + "summary-address": [{ + "prefix": SUMMARY["ipv6"][0].split('/')[0], + "mask": "32" + }] + } + } + } + result = create_router_ospf(tgen, topo, ospf_summ_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + input_dict = { + SUMMARY["ipv6"][0]: { + "Summary address": SUMMARY["ipv6"][0], + "Metric-type": "E2", + "Metric": 20, + "Tag": 0, + "External route count": 5 + } + } + dut = 'r0' + result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6') + assert result is True, "Testcase {} : Failed" \ + "Error: Summary missing in OSPF DB".format(tc_name) + + input_dict = { + "r0": { + "static_routes": [ + { + "network": NETWORK["ipv6"], + "next_hop": "blackhole", + "delete": True + } + ] + } + } + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + input_dict_summary = { + "r0": { + "static_routes": [{"network": SUMMARY["ipv6"][0]}] + } + } + step("Verify that summary route is withdrawn from R1.") + + dut = 'r1' + result = verify_ospf6_rib(tgen, dut, input_dict_summary, expected=False) + assert result is not True, "Testcase {} : Failed \n Error: "\ + "Routes still present in OSPF RIB {}".format(tc_name, result) + + result = verify_rib(tgen, "ipv6", dut, + input_dict_summary, protocol=protocol, expected=False) + assert result is not True, "Testcase {} : Failed" \ + "Error: Routes still present in RIB".format(tc_name) + + step("Add back static routes.") + input_dict_static_rtes = { + "r0": { + "static_routes": [ + { + "network": NETWORK["ipv6"], + "next_hop": "blackhole" + } + ] + } + } + result = create_static_routes(tgen, input_dict_static_rtes) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + step( + "Verify that external routes are summarised to configured summary" + " address on R0 and only one route is sent to R1.") + dut = 'r1' + + result = verify_ospf6_rib(tgen, dut, input_dict_static_rtes, expected=False) + assert result is not True, "Testcase {} : Failed \n Error: "\ + "Routes still present in OSPF RIB {}".format(tc_name, result) + + result = verify_rib( + tgen, "ipv6", dut, input_dict_static_rtes, + protocol=protocol, expected=False) + assert result is not True, "Testcase {} : Failed" \ + "Error: Routes still present in RIB".format(tc_name) + + input_dict_summary = { + "r0": { + "static_routes": [{"network": SUMMARY["ipv6"][0]}] + } + } + dut = 'r1' + + result = verify_ospf6_rib(tgen, dut, input_dict_summary) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + result = verify_rib(tgen, "ipv6", dut, + input_dict_summary, protocol=protocol) + assert result is True, "Testcase {} : Failed" \ + "Error: Routes is missing in RIB".format(tc_name) + + step("Verify that show ip ospf summary should show configure summaries.") + + input_dict = { + SUMMARY["ipv6"][0]: { + "Summary address": SUMMARY["ipv6"][0], + "Metric-type": "E2", + "Metric": 20, + "Tag": 0, + "External route count": 5 + } + } + dut = 'r0' + result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6') + assert result is True, "Testcase {} : Failed" \ + "Error: Summary missing in OSPF DB".format(tc_name) + + step("Configure new static route which is matching configured summary.") + input_dict_static_rtes = { + "r0": { + "static_routes": [ + { + "network": NETWORK_11["ipv6"], + "next_hop": "blackhole" + } + ] + } + } + result = create_static_routes(tgen, input_dict_static_rtes) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + # step("verify that summary lsa is not refreshed.") + # show ip ospf database command is not working, waiting for DEV fix. + + step("Delete one of the static route.") + input_dict_static_rtes = { + "r0": { + "static_routes": [ + { + "network": NETWORK_11["ipv6"], + "next_hop": "blackhole", + "delete": True + } + ] + } + } + result = create_static_routes(tgen, input_dict_static_rtes) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + # step("verify that summary lsa is not refreshed.") + # show ip ospf database command is not working, waiting for DEV fix. + + # step("Verify that deleted static route is removed from ospf LSDB.") + # show ip ospf database command is not working, waiting for DEV fix. + + step( + "Configure redistribute connected and configure ospf external" + " summary address to summarise the connected routes.") + + dut = 'r0' + red_connected(dut) + clear_ospf(tgen, dut, ospf='ospf6') + + ip = topo['routers']['r0']['links']['r3']['ipv6'] + + ip_net = str(ipaddress.ip_interface(u'{}'.format(ip)).network) + ospf_summ_r1 = { + "r0": { + "ospf6": { + "summary-address": [{ + "prefix": ip_net.split('/')[0], + "mask": "8" + }] + } + } + } + result = create_router_ospf(tgen, topo, ospf_summ_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + step( + "Verify that external routes are summarised to configured " + "summary address on R0 and only one route is sent to R1.") + + input_dict_summary = { + "r0": { + "static_routes": [{"network": "fd00::/64"}] + } + } + dut = 'r1' + result = verify_ospf6_rib(tgen, dut, input_dict_summary) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + result = verify_rib(tgen, "ipv6", dut, + input_dict_summary, protocol=protocol) + assert result is True, "Testcase {} : Failed" \ + "Error: Routes is missing in RIB".format(tc_name) + + step("Shut one of the interface") + intf = topo['routers']['r0']['links']['r3-link0']['interface'] + shutdown_bringup_interface(tgen, dut, intf, False) + + # step("verify that summary lsa is not refreshed.") + # show ip ospf database command is not working, waiting for DEV fix. + + # step("Verify that deleted connected route is removed from ospf LSDB.") + # show ip ospf database command is not working, waiting for DEV fix. + + step("Un do shut the interface") + shutdown_bringup_interface(tgen, dut, intf, True) + + # step("verify that summary lsa is not refreshed.") + # show ip ospf database command is not working, waiting for DEV fix. + + # step("Verify that deleted connected route is removed from ospf LSDB.") + # show ip ospf database command is not working, waiting for DEV fix. + + step("Delete OSPF process.") + ospf_del = { + "r0": { + "ospf6": { + "delete": True + } + } + } + result = create_router_ospf(tgen, topo, ospf_del) + assert result is True, "Testcase : Failed \n Error: {}".format(result) + + step("Reconfigure ospf process with summary") + reset_config_on_routers(tgen) + + input_dict_static_rtes = { + "r0": { + "static_routes": [ + { + "network": NETWORK["ipv6"], + "next_hop": "blackhole" + }, + { + "network": NETWORK2["ipv6"], + "next_hop": "blackhole" + } + ] + } + } + result = create_static_routes(tgen, input_dict_static_rtes) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + dut = 'r0' + red_static(dut) + red_connected(dut) + ospf_summ_r1 = { + "r0": { + "ospf6": { + "summary-address": [{ + "prefix": SUMMARY["ipv6"][0].split('/')[0], + "mask": "32" + }] + } + } + } + result = create_router_ospf(tgen, topo, ospf_summ_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + step( + "Verify that external routes are summarised to configured summary " + "address on R0 and only one route is sent to R1.") + + input_dict = { + SUMMARY["ipv6"][0]: { + "Summary address": SUMMARY["ipv6"][0], + "Metric-type": "E2", + "Metric": 20, + "Tag": 0, + "External route count": 5 + } + } + dut = 'r0' + result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6') + assert result is True, "Testcase {} : Failed" \ + "Error: Summary missing in OSPF DB".format(tc_name) + + + input_dict_summary = { + "r0": { + "static_routes": [{"network": SUMMARY["ipv6"][0]}] + } + } + + dut = 'r1' + result = verify_ospf6_rib(tgen, dut, input_dict_summary) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + result = verify_rib(tgen, "ipv6", dut, + input_dict_summary, protocol=protocol) + assert result is True, "Testcase {} : Failed" \ + "Error: Routes is missing in RIB".format(tc_name) + + ospf_summ_r1 = { + "r0": { + "ospf6": { + "summary-address": [{ + "prefix": SUMMARY["ipv6"][0].split('/')[0], + "mask": "32" + }] + } + } + } + result = create_router_ospf(tgen, topo, ospf_summ_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + # step("verify that summary lsa is not refreshed.") + # show ip ospf database command is not working, waiting for DEV fix. + + step("Delete the redistribute command in ospf.") + dut = 'r0' + red_connected(dut, config=False) + red_static(dut, config=False) + + step("Verify that summary route is withdrawn from the peer.") + + dut = 'r1' + result = verify_ospf6_rib(tgen, dut, input_dict_summary, expected=False) + assert result is not True, "Testcase {} : Failed \n Error: "\ + "Routes still present in OSPF RIB {}".format(tc_name, result) + + result = verify_rib(tgen, "ipv6", dut, + input_dict_summary, protocol=protocol, expected=False) + assert result is not True, "Testcase {} : Failed" \ + "Error: Routes still present in RIB".format(tc_name) + + ospf_summ_r1 = { + "r0": { + "ospf6": { + "summary-address": [{ + "prefix": SUMMARY["ipv6"][0].split('/')[0], + "mask": "32", + "metric": "1234" + }] + } + } + } + result = create_router_ospf(tgen, topo, ospf_summ_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + write_test_footer(tc_name) + + +def test_ospfv3_type5_summary_tc46_p0(request): + """OSPF summarisation with advertise and no advertise option""" + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + global topo + step("Bring up the base config as per the topology") + step("Configure OSPF on all the routers of the topology.") + reset_config_on_routers(tgen) + + protocol = 'ospf' + + step( + "Configure 5 static routes from the same network on R0" + "5 static routes from different networks and redistribute in R0") + input_dict_static_rtes = { + "r0": { + "static_routes": [ + { + "network": NETWORK["ipv6"], + "next_hop": "blackhole" + }, + { + "network": NETWORK2["ipv6"], + "next_hop": "blackhole" + } + ] + } + } + result = create_static_routes(tgen, input_dict_static_rtes) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + dut = 'r0' + red_static(dut) + + step("Verify that routes are learnt on R1.") + dut = 'r1' + + result = verify_ospf6_rib(tgen, dut, input_dict_static_rtes) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + result = verify_rib(tgen, "ipv6", dut, + input_dict_static_rtes, protocol=protocol) + assert result is True, "Testcase {} : Failed" \ + "Error: Routes is missing in RIB".format(tc_name) + + step( + "Configure External Route summary in R0 to summarise 5" + " routes to one route with no advertise option.") + ospf_summ_r1 = { + "r0": { + "ospf6": { + "summary-address": [{ + "prefix": SUMMARY["ipv6"][0].split('/')[0], + "mask": "32", + "advertise": False + }] + } + } + } + result = create_router_ospf(tgen, topo, ospf_summ_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + step( + "Verify that external routes are summarised to configured summary" + " address on R0 and summary route is not advertised to neighbor as" + " no advertise is configured..") + + input_dict_summary = { + "r0": { + "static_routes": [{"network": SUMMARY["ipv6"][0]}] + } + } + dut = 'r1' + + result = verify_ospf6_rib(tgen, dut, input_dict_summary, expected=False) + assert result is not True, "Testcase {} : Failed \n Error: "\ + "Routes still present in OSPF RIB {}".format(tc_name, result) + + result = verify_rib(tgen, "ipv6", dut, input_dict_summary, + protocol=protocol, expected=False) + assert result is not True, "Testcase {} : Failed" \ + "Error: Routes still present in RIB".format(tc_name) + + step( + "Verify that show ip ospf summary should show the " + "configured summaries.") + input_dict = { + SUMMARY["ipv6"][0]: { + "Summary address": SUMMARY["ipv6"][0], + "External route count": 5 + } + } + dut = 'r0' + result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6') + assert result is True, "Testcase {} : Failed" \ + "Error: Summary missing in OSPF DB".format(tc_name) + + step("Delete the configured summary") + ospf_summ_r1 = { + "r0": { + "ospf6": { + "summary-address": [{ + "prefix": SUMMARY["ipv6"][0].split('/')[0], + "mask": "32", + "delete": True + }] + } + } + } + result = create_router_ospf(tgen, topo, ospf_summ_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + step("Summary has 5 sec delay timer, sleep 5 secs...") + sleep(5) + + step("Verify that summary lsa is withdrawn from R1 and deleted from R0.") + dut = 'r1' + result = verify_ospf6_rib(tgen, dut, input_dict, expected=False) + assert result is not True, "Testcase {} : Failed \n Error: "\ + "Routes still present in OSPF RIB {}".format(tc_name, result) + + result = verify_rib(tgen, "ipv6", dut, + input_dict_summary, protocol=protocol, expected=False) + assert result is not True, "Testcase {} : Failed" \ + "Error: Summary Route still present in RIB".format(tc_name) + + step("show ip ospf summary should not have any summary address.") + input_dict = { + SUMMARY["ipv6"][0]: { + "Summary address": SUMMARY["ipv6"][0], + "Metric-type": "E2", + "Metric": 20, + "Tag": 1234, + "External route count": 5 + } + } + dut = 'r0' + result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6', expected=False) + assert result is not True, "Testcase {} : Failed" \ + "Error: Summary still present in DB".format(tc_name) + + step("Reconfigure summary with no advertise.") + ospf_summ_r1 = { + "r0": { + "ospf6": { + "summary-address": [{ + "prefix": SUMMARY["ipv6"][0].split('/')[0], + "mask": "32", + "advertise": False + }] + } + } + } + result = create_router_ospf(tgen, topo, ospf_summ_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + step( + "Verify that external routes are summarised to configured summary" + " address on R0 and summary route is not advertised to neighbor as" + " no advertise is configured..") + + input_dict_summary = { + "r0": { + "static_routes": [{"network": SUMMARY["ipv6"][0]}] + } + } + dut = 'r1' + + result = verify_ospf6_rib(tgen, dut, input_dict_summary, expected=False) + assert result is not True, "Testcase {} : Failed \n Error: "\ + "Routes still present in OSPF RIB {}".format(tc_name, result) + + result = verify_rib(tgen, "ipv6", dut, input_dict_summary, + protocol=protocol, expected=False) + assert result is not True, "Testcase {} : Failed" \ + "Error: Routes still present in RIB".format(tc_name) + + step( + "Verify that show ip ospf summary should show the " + "configured summaries.") + input_dict = { + SUMMARY["ipv6"][0]: { + "Summary address": SUMMARY["ipv6"][0], + "External route count": 5 + } + } + dut = 'r0' + result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6') + assert result is True, "Testcase {} : Failed" \ + "Error: Summary missing in OSPF DB".format(tc_name) + + step( + "Change summary address from no advertise to advertise " + "(summary-address 10.0.0.0 255.255.0.0)") + + ospf_summ_r1 = { + "r0": { + "ospf6": { + "summary-address": [{ + "prefix": SUMMARY["ipv6"][0].split('/')[0], + "mask": "32", + "advertise": False + }] + } + } + } + result = create_router_ospf(tgen, topo, ospf_summ_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + ospf_summ_r1 = { + "r0": { + "ospf6": { + "summary-address": [{ + "prefix": SUMMARY["ipv6"][0].split('/')[0], + "mask": "32" + }] + } + } + } + result = create_router_ospf(tgen, topo, ospf_summ_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + step( + "Verify that external routes are summarised to configured summary " + "address on R0 after 5 secs of delay timer expiry and only one " + "route is sent to R1.") + input_dict_summary = { + "r0": { + "static_routes": [{"network": SUMMARY["ipv6"][0]}] + } + } + dut = 'r1' + + result = verify_ospf6_rib(tgen, dut, input_dict_summary) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + result = verify_rib(tgen, "ipv6", dut, + input_dict_summary, protocol=protocol) + assert result is True, "Testcase {} : Failed" \ + "Error: Routes is missing in RIB".format(tc_name) + + step("Verify that show ip ospf summary should show the summaries.") + input_dict = { + SUMMARY["ipv6"][0]: { + "Summary address": SUMMARY["ipv6"][0], + "Metric-type": "E2", + "Metric": 20, + "Tag": 0, + "External route count": 5 + } + } + dut = 'r0' + result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6') + assert result is True, "Testcase {} : Failed" \ + "Error: Summary missing in OSPF DB".format(tc_name) + + step( + "Verify that originally advertised routes are withdraw from there" + " peer.") + input_dict = { + "r0": { + "static_routes": [ + { + "network": NETWORK["ipv6"], + "next_hop": "blackhole" + } + ] + } + } + dut = 'r1' + result = verify_ospf6_rib(tgen, dut, input_dict, expected=False) + assert result is not True, "Testcase {} : Failed \n Error: "\ + "Routes still present in OSPF RIB {}".format(tc_name, result) + + result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol, + expected=False) + assert result is not True, "Testcase {} : Failed" \ + "Error: Routes is present in RIB".format(tc_name) + + write_test_footer(tc_name) + + +def test_ospfv3_type5_summary_tc48_p0(request): + """OSPF summarisation with route map modification of metric type.""" + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + global topo + step("Bring up the base config as per the topology") + reset_config_on_routers(tgen) + + protocol = 'ospf' + + step( + "Configure 5 static routes from the same network on R0" + "5 static routes from different networks and redistribute in R0") + input_dict_static_rtes = { + "r0": { + "static_routes": [ + { + "network": NETWORK["ipv6"], + "next_hop": "blackhole" + }, + { + "network": NETWORK2["ipv6"], + "next_hop": "blackhole" + } + ] + } + } + result = create_static_routes(tgen, input_dict_static_rtes) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + dut = 'r0' + red_static(dut) + + step("Verify that routes are learnt on R1.") + dut = 'r1' + + result = verify_ospf6_rib(tgen, dut, input_dict_static_rtes) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + result = verify_rib(tgen, "ipv6", dut, + input_dict_static_rtes, protocol=protocol) + assert result is True, "Testcase {} : Failed" \ + "Error: Routes is missing in RIB".format(tc_name) + + step( + "Configure External Route summary in R0 to summarise 5" + " routes to one route.") + + ospf_summ_r1 = { + "r0": { + "ospf6": { + "summary-address": [{ + "prefix": SUMMARY["ipv6"][0].split('/')[0], + "mask": "32" + }] + } + } + } + result = create_router_ospf(tgen, topo, ospf_summ_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + step( + "Verify that external routes are summarised to configured summary " + "address on R0 after 5 secs of delay timer expiry and only one " + "route is sent to R1.") + input_dict_summary = { + "r0": { + "static_routes": [{"network": SUMMARY["ipv6"][0]}] + } + } + dut = 'r1' + + result = verify_ospf6_rib(tgen, dut, input_dict_summary) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + result = verify_rib(tgen, "ipv6", dut, + input_dict_summary, protocol=protocol) + assert result is True, "Testcase {} : Failed" \ + "Error: Routes is missing in RIB".format(tc_name) + + step("Verify that show ip ospf summary should show the summaries.") + input_dict = { + SUMMARY["ipv6"][0]: { + "Summary address": SUMMARY["ipv6"][0], + "Metric-type": "E2", + "Metric": 20, + "Tag": 0, + "External route count": 5 + } + } + dut = 'r0' + result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6') + assert result is True, "Testcase {} : Failed" \ + "Error: Summary missing in OSPF DB".format(tc_name) + + step( + "Verify that originally advertised routes are withdraw from there" + " peer.") + input_dict = { + "r0": { + "static_routes": [ + { + "network": NETWORK["ipv6"], + "next_hop": "blackhole" + } + ] + } + } + dut = 'r1' + result = verify_ospf6_rib(tgen, dut, input_dict, expected=False) + assert result is not True, "Testcase {} : Failed \n Error: "\ + "Routes still present in OSPF RIB {}".format(tc_name, result) + + result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol, + expected=False) + assert result is not True, "Testcase {} : Failed" \ + "Error: Routes still present in RIB".format(tc_name) + + step( + "Configure route map and & rule to permit configured summary address," + " redistribute static & connected routes with the route map.") + step("Configure prefixlist to permit the static routes, add to route map.") + # Create ip prefix list + pfx_list = { + "r0": { + "prefix_lists": { + "ipv6": { + "pf_list_1_ipv6": [ + { + "seqid": 10, + "network": "any", + "action": "permit" + } + ] + } + } + } + } + result = create_prefix_lists(tgen, pfx_list) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + + routemaps = { + "r0": { + "route_maps": { + "rmap_ipv6": [{ + "action": "permit", + "seq_id": '1', + "match": { + "ipv6": { + "prefix_lists": + "pf_list_1_ipv6" + } + } + }] + } + } + } + result = create_route_maps(tgen, routemaps) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + ospf_red_r1 = { + "r0": { + "ospf6": { + "redistribute": [{ + "redist_type": "static", + "route_map": "rmap_ipv6" + }] + } + } + } + result = create_router_ospf(tgen, topo, ospf_red_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + step( + "Verify that external routes are summarised to configured" + "summary address on R0 and only one route is sent to R1. Verify that " + "show ip ospf summary should show the configure summaries.") + + input_dict_summary = { + "r0": { + "static_routes": [{"network": SUMMARY["ipv6"][0]}] + } + } + dut = 'r1' + + result = verify_ospf6_rib(tgen, dut, input_dict_summary) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + result = verify_rib(tgen, "ipv6", dut, + input_dict_summary, protocol=protocol) + assert result is True, "Testcase {} : Failed" \ + "Error: Routes is missing in RIB".format(tc_name) + + input_dict = { + SUMMARY["ipv6"][0]: { + "Summary address": SUMMARY["ipv6"][0], + "Metric-type": "E2", + "Metric": 20, + "Tag": 0, + "External route count": 5 + } + } + dut = 'r0' + result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6') + assert result is True, "Testcase {} : Failed" \ + "Error: Summary missing in OSPF DB".format(tc_name) + + step("Configure metric type as 1 in route map.") + + + routemaps = { + "r0": { + "route_maps": { + "rmap_ipv6": [{ + "seq_id": '1', + "action": "permit", + "set":{ + "metric-type": "type-1" + } + }] + } + } + } + result = create_route_maps(tgen, routemaps) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + step( + "Verify that external routes(static / connected) are summarised" + " to configured summary address with metric type 2.") + input_dict = { + SUMMARY["ipv6"][0]: { + "Summary address": SUMMARY["ipv6"][0], + "Metric-type": "E2", + "Metric": 20, + "Tag": 0, + "External route count": 5 + } + } + dut = 'r0' + result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6') + assert result is True, "Testcase {} : Failed" \ + "Error: Summary missing in OSPF DB".format(tc_name) + + step("Un configure metric type from route map.") + + routemaps = { + "r0": { + "route_maps": { + "rmap_ipv6": [{ + "action": "permit", + "seq_id": '1', + "set":{ + "metric-type": "type-1", + "delete": True + } + }] + } + } + } + result = create_route_maps(tgen, routemaps) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + step( + "Verify that external routes(static / connected) are summarised" + " to configured summary address with metric type 2.") + input_dict = { + SUMMARY["ipv6"][0]: { + "Summary address": SUMMARY["ipv6"][0], + "Metric-type": "E2", + "Metric": 20, + "Tag": 0, + "External route count": 5 + } + } + dut = 'r0' + result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6') + assert result is True, "Testcase {} : Failed" \ + "Error: Summary missing in OSPF DB".format(tc_name) + + step("Change rule from permit to deny in prefix list.") + pfx_list = { + "r0": { + "prefix_lists": { + "ipv6": { + "pf_list_1_ipv6": [ + { + "seqid": 10, + "network": "any", + "action": "deny" + } + ] + } + } + } + } + result = create_prefix_lists(tgen, pfx_list) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + step( + "Verify that previously originated summary lsa " + "is withdrawn from the neighbor.") + input_dict_summary = { + "r0": { + "static_routes": [{"network": SUMMARY["ipv6"][0]}] + } + } + dut = 'r1' + + step("summary route has delay of 5 secs, wait for 5 secs") + + sleep(5) + + result = verify_ospf6_rib(tgen, dut, input_dict_summary, expected=False) + assert result is not True, "Testcase {} : Failed \n Error: "\ + "Routes still present in OSPF RIB {}".format(tc_name, result) + + result = verify_rib(tgen, "ipv6", dut, + input_dict_summary, protocol=protocol, expected=False) + assert result is not True, "Testcase {} : Failed" \ + "Error: Routes still present in RIB".format(tc_name) + + write_test_footer(tc_name) + + +def test_ospfv3_type5_summary_tc51_p2(request): + """OSPF CLI Show. + + verify ospf ASBR summary config and show commands behaviours. + """ + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + global topo + step("Bring up the base config as per the topology") + reset_config_on_routers(tgen) + + step("Configure all the supported OSPF ASBR summary commands on DUT.") + ospf_summ_r1 = { + "r0": { + "ospf6": { + "summary-address": [{ + "prefix": SUMMARY["ipv6"][0].split('/')[0], + "mask": "32", + "tag": 4294967295 + }, + { + "prefix": SUMMARY["ipv6"][0].split('/')[0], + "mask": "16", + "advertise": True + }, + { + "prefix": SUMMARY["ipv6"][0].split('/')[0], + "mask": "24", + "advertise": False + }, + { + "prefix": SUMMARY["ipv6"][0].split('/')[0], + "mask": "24", + "advertise": False + }, + ] + } + } + } + result = create_router_ospf(tgen, topo, ospf_summ_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + + step("Configure and re configure all the commands 10 times in a loop.") + + for itrate in range(0,10): + ospf_summ_r1 = { + "r0": { + "ospf6": { + "summary-address": [{ + "prefix": SUMMARY["ipv6"][0].split('/')[0], + "mask": "8", + "tag": 4294967295 + }, + { + "prefix": SUMMARY["ipv6"][0].split('/')[0], + "mask": "16", + "advertise": True + }, + { + "prefix": SUMMARY["ipv6"][0].split('/')[0], + "mask": "24", + "advertise": False + }, + { + "prefix": SUMMARY["ipv6"][0].split('/')[0], + "mask": "24", + "advertise": False + }, + ] + } + } + } + result = create_router_ospf(tgen, topo, ospf_summ_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + ospf_summ_r1 = { + "r0": { + "ospf6": { + "summary-address": [{ + "prefix": SUMMARY["ipv6"][0].split('/')[0], + "mask": "8", + "tag": 4294967295, + "delete": True + }, + { + "prefix": SUMMARY["ipv6"][0].split('/')[0], + "mask": "16", + "advertise": True, + "delete": True + }, + { + "prefix": SUMMARY["ipv6"][0].split('/')[0], + "mask": "24", + "advertise": False, + "delete": True + }, + { + "prefix": SUMMARY["ipv6"][0].split('/')[0], + "mask": "24", + "advertise": False, + "delete": True + }, + ] + } + } + } + result = create_router_ospf(tgen, topo, ospf_summ_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + step("Verify the show commands") + + input_dict = { + SUMMARY["ipv6"][3]: { + "Summary address": SUMMARY["ipv6"][3], + "Metric-type": "E2", + "Metric": 20, + "Tag": 0, + "External route count": 0 + } + } + dut = 'r0' + result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6') + assert result is True, "Testcase {} : Failed" \ + "Error: Summary missing in OSPF DB".format(tc_name) + + write_test_footer(tc_name) + + +def test_ospfv3_type5_summary_tc49_p2(request): + """OSPF summarisation Chaos.""" + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + global topo + step("Bring up the base config as per the topology") + reset_config_on_routers(tgen) + + protocol = 'ospf' + + step( + "Configure 5 static routes from the same network on R0" + "5 static routes from different networks and redistribute in R0") + input_dict_static_rtes = { + "r0": { + "static_routes": [ + { + "network": NETWORK["ipv6"], + "next_hop": "blackhole" + }, + { + "network": NETWORK2["ipv6"], + "next_hop": "blackhole" + } + ] + } + } + result = create_static_routes(tgen, input_dict_static_rtes) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + dut = 'r0' + red_static(dut) + + step("Verify that routes are learnt on R1.") + dut = 'r1' + + result = verify_ospf6_rib(tgen, dut, input_dict_static_rtes) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + result = verify_rib(tgen, "ipv6", dut, + input_dict_static_rtes, protocol=protocol) + assert result is True, "Testcase {} : Failed" \ + "Error: Routes is missing in RIB".format(tc_name) + + step( + "Configure External Route summary in R0 to summarise 5" + " routes to one route.") + + ospf_summ_r1 = { + "r0": { + "ospf6": { + "summary-address": [{ + "prefix": SUMMARY["ipv6"][0].split('/')[0], + "mask": "32" + }] + } + } + } + result = create_router_ospf(tgen, topo, ospf_summ_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + step( + "Verify that external routes are summarised to configured summary " + "address on R0 after 5 secs of delay timer expiry and only one " + "route is sent to R1.") + input_dict_summary = { + "r0": { + "static_routes": [{"network": SUMMARY["ipv6"][0]}] + } + } + dut = 'r1' + + result = verify_ospf6_rib(tgen, dut, input_dict_summary) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + result = verify_rib(tgen, "ipv6", dut, + input_dict_summary, protocol=protocol) + assert result is True, "Testcase {} : Failed" \ + "Error: Routes is missing in RIB".format(tc_name) + + step("Verify that show ip ospf summary should show the summaries.") + input_dict = { + SUMMARY["ipv6"][0]: { + "Summary address": SUMMARY["ipv6"][0], + "Metric-type": "E2", + "Metric": 20, + "Tag": 0, + "External route count": 5 + } + } + dut = 'r0' + result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6') + assert result is True, "Testcase {} : Failed" \ + "Error: Summary missing in OSPF DB".format(tc_name) + + step( + "Verify that originally advertised routes are withdraw from there" + " peer.") + input_dict = { + "r0": { + "static_routes": [ + { + "network": NETWORK["ipv6"], + "next_hop": "blackhole" + } + ] + } + } + dut = 'r1' + result = verify_ospf6_rib(tgen, dut, input_dict, expected=False) + assert result is not True, "Testcase {} : Failed \n Error: "\ + "Routes still present in OSPF RIB {}".format(tc_name, result) + + result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol, + expected=False) + assert result is not True, "Testcase {} : Failed" \ + "Error: Routes still present in RIB".format(tc_name) + + step('Reload the FRR router') + # stop/start -> restart FRR router and verify + stop_router(tgen, 'r0') + start_router(tgen, 'r0') + + step( + "Verify that external routes are summarised to configured summary " + "address on R0 after 5 secs of delay timer expiry and only one " + "route is sent to R1.") + input_dict_summary = { + "r0": { + "static_routes": [{"network": SUMMARY["ipv6"][0]}] + } + } + dut = 'r1' + + result = verify_ospf6_rib(tgen, dut, input_dict_summary) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + result = verify_rib(tgen, "ipv6", dut, + input_dict_summary, protocol=protocol) + assert result is True, "Testcase {} : Failed" \ + "Error: Routes is missing in RIB".format(tc_name) + + step("Verify that show ip ospf summary should show the summaries.") + input_dict = { + SUMMARY["ipv6"][0]: { + "Summary address": SUMMARY["ipv6"][0], + "Metric-type": "E2", + "Metric": 20, + "Tag": 0, + "External route count": 5 + } + } + dut = 'r0' + result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6') + assert result is True, "Testcase {} : Failed" \ + "Error: Summary missing in OSPF DB".format(tc_name) + + step( + "Verify that originally advertised routes are withdraw from there" + " peer.") + input_dict = { + "r0": { + "static_routes": [ + { + "network": NETWORK["ipv6"], + "next_hop": "blackhole" + } + ] + } + } + dut = 'r1' + result = verify_ospf6_rib(tgen, dut, input_dict, expected=False) + assert result is not True, "Testcase {} : Failed \n Error: "\ + "Routes still present in OSPF RIB {}".format(tc_name, result) + + result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol, + expected=False) + assert result is not True, "Testcase {} : Failed" \ + "Error: Routes still present in RIB".format(tc_name) + + step("Kill OSPF6d daemon on R0.") + kill_router_daemons(tgen, "r0", ["ospf6d"]) + + step("Bring up OSPF6d daemon on R0.") + start_router_daemons(tgen, "r0", ["ospf6d"]) + + step("Verify OSPF neighbors are up after bringing back ospf6d in R0") + # Api call verify whether OSPF is converged + ospf_covergence = verify_ospf6_neighbor(tgen, topo) + assert ospf_covergence is True, ("setup_module :Failed \n Error:" + " {}".format(ospf_covergence)) + + step( + "Verify that external routes are summarised to configured summary " + "address on R0 after 5 secs of delay timer expiry and only one " + "route is sent to R1.") + input_dict_summary = { + "r0": { + "static_routes": [{"network": SUMMARY["ipv6"][0]}] + } + } + dut = 'r1' + + result = verify_ospf6_rib(tgen, dut, input_dict_summary) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + result = verify_rib(tgen, "ipv6", dut, + input_dict_summary, protocol=protocol) + assert result is True, "Testcase {} : Failed" \ + "Error: Routes is missing in RIB".format(tc_name) + + step("Verify that show ip ospf summary should show the summaries.") + input_dict = { + SUMMARY["ipv6"][0]: { + "Summary address": SUMMARY["ipv6"][0], + "Metric-type": "E2", + "Metric": 20, + "Tag": 0, + "External route count": 5 + } + } + dut = 'r0' + result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6') + assert result is True, "Testcase {} : Failed" \ + "Error: Summary missing in OSPF DB".format(tc_name) + + step( + "Verify that originally advertised routes are withdraw from there" + " peer.") + input_dict = { + "r0": { + "static_routes": [ + { + "network": NETWORK["ipv6"], + "next_hop": "blackhole" + } + ] + } + } + dut = 'r1' + result = verify_ospf6_rib(tgen, dut, input_dict, expected=False) + assert result is not True, "Testcase {} : Failed \n Error: "\ + "Routes still present in OSPF RIB {}".format(tc_name, result) + + result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol, + expected=False) + assert result is not True, "Testcase {} : Failed" \ + "Error: Routes still present in RIB".format(tc_name) + + step("restart zebrad") + kill_router_daemons(tgen, "r0", ["zebra"]) + + step("Bring up zebra daemon on R0.") + start_router_daemons(tgen, "r0", ["zebra"]) + + step( + "Verify that external routes are summarised to configured summary " + "address on R0 after 5 secs of delay timer expiry and only one " + "route is sent to R1.") + input_dict_summary = { + "r0": { + "static_routes": [{"network": SUMMARY["ipv6"][0]}] + } + } + dut = 'r1' + + result = verify_ospf6_rib(tgen, dut, input_dict_summary) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + result = verify_rib(tgen, "ipv6", dut, + input_dict_summary, protocol=protocol) + assert result is True, "Testcase {} : Failed" \ + "Error: Routes is missing in RIB".format(tc_name) + + step("Verify that show ip ospf summary should show the summaries.") + input_dict = { + SUMMARY["ipv6"][0]: { + "Summary address": SUMMARY["ipv6"][0], + "Metric-type": "E2", + "Metric": 20, + "Tag": 0, + "External route count": 5 + } + } + dut = 'r0' + result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6') + assert result is True, "Testcase {} : Failed" \ + "Error: Summary missing in OSPF DB".format(tc_name) + + step( + "Verify that originally advertised routes are withdraw from there" + " peer.") + input_dict = { + "r0": { + "static_routes": [ + { + "network": NETWORK["ipv6"], + "next_hop": "blackhole" + } + ] + } + } + dut = 'r1' + result = verify_ospf6_rib(tgen, dut, input_dict, expected=False) + assert result is not True, "Testcase {} : Failed \n Error: "\ + "Routes still present in OSPF RIB {}".format(tc_name, result) + + result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol, + expected=False) + assert result is not True, "Testcase {} : Failed" \ + "Error: Routes still present in RIB".format(tc_name) + + write_test_footer(tc_name) + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_ecmp.py b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_ecmp.py new file mode 100644 index 0000000000..50c5144b3f --- /dev/null +++ b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_ecmp.py @@ -0,0 +1,523 @@ +#!/usr/bin/python + +# +# Copyright (c) 2021 by VMware, Inc. ("VMware") +# Used Copyright (c) 2018 by Network Device Education Foundation, Inc. +# ("NetDEF") in this file. +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + + +"""OSPF Basic Functionality Automation.""" +import os +import sys +import time +import pytest +import json +from copy import deepcopy +from ipaddress import IPv4Address +from lib.topotest import frr_unicode + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../lib/")) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from mininet.topo import Topo +from lib.topogen import Topogen, get_topogen +import ipaddress + +# Import topoJson from lib, to create topology and initial configuration +from lib.common_config import ( + start_topology, + write_test_header, + write_test_footer, + reset_config_on_routers, + verify_rib, + create_static_routes, + step, + create_route_maps, + shutdown_bringup_interface, + create_interfaces_cfg, + topo_daemons, + get_frr_ipv6_linklocal, +) +from lib.topolog import logger +from lib.topojson import build_topo_from_json, build_config_from_json + +from lib.ospf import ( + verify_ospf6_neighbor, + config_ospf_interface, + clear_ospf, + verify_ospf6_rib, + create_router_ospf, + verify_ospf6_interface, + verify_ospf6_database, + config_ospf6_interface, +) + +from ipaddress import IPv6Address + +pytestmark = [pytest.mark.ospfd, pytest.mark.staticd] + + +# Global variables +topo = None + +# Reading the data from JSON File for topology creation +jsonFile = "{}/ospfv3_ecmp.json".format(CWD) +try: + with open(jsonFile, "r") as topoJson: + topo = json.load(topoJson) +except IOError: + assert False, "Could not read file {}".format(jsonFile) + +NETWORK = { + "ipv4": [ + "11.0.20.1/32", + "11.0.20.2/32", + "11.0.20.3/32", + "11.0.20.4/32", + "11.0.20.5/32", + ], + "ipv6": ["2::1/128", "2::2/128", "2::3/128", "2::4/128", "2::5/128"], +} +""" +TOPOLOGY : + Please view in a fixed-width font such as Courier. + +---+ A1 +---+ + +R1 +------------+R2 | + +-+-+- +--++ + | -- -- | + | -- A0 -- | + A0| ---- | + | ---- | A2 + | -- -- | + | -- -- | + +-+-+- +-+-+ + +R0 +-------------+R3 | + +---+ A3 +---+ + +TESTCASES : +1. Verify OSPF ECMP with max path configured as 8 (ECMPconfigured at FRR level) +2. Verify OSPF ECMP with max path configured as 2 (Edge having 2 uplink ports) + """ + + +class CreateTopo(Topo): + """ + Test topology builder. + + * `Topo`: Topology object + """ + + def build(self, *_args, **_opts): + """Build function.""" + tgen = get_topogen(self) + + # Building topology from json file + build_topo_from_json(tgen, topo) + + +def setup_module(mod): + """ + Sets up the pytest environment + + * `mod`: module name + """ + global topo + testsuite_run_time = time.asctime(time.localtime(time.time())) + logger.info("Testsuite start time: {}".format(testsuite_run_time)) + logger.info("=" * 40) + + logger.info("Running setup_module to create topology") + + # This function initiates the topology build with Topogen... + tgen = Topogen(CreateTopo, mod.__name__) + # ... and here it calls Mininet initialization functions. + + # get list of daemons needs to be started for this suite. + daemons = topo_daemons(tgen, topo) + + # Starting topology, create tmp files which are loaded to routers + # to start deamons and then start routers + start_topology(tgen, daemons) + + # Creating configuration from JSON + build_config_from_json(tgen, topo) + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + ospf_covergence = verify_ospf6_neighbor(tgen, topo) + assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format( + ospf_covergence + ) + + logger.info("Running setup_module() done") + + +def teardown_module(mod): + """ + Teardown the pytest environment. + + * `mod`: module name + """ + + logger.info("Running teardown_module to delete topology") + + tgen = get_topogen() + + # Stop toplogy and Remove tmp files + tgen.stop_topology() + + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) + logger.info("=" * 40) + + +def red_static(dut, config=True): + """Local def for Redstribute static routes inside ospf.""" + global topo + tgen = get_topogen() + if config: + ospf_red = {dut: {"ospf6": {"redistribute": [{"redist_type": "static"}]}}} + else: + ospf_red = { + dut: { + "ospf6": {"redistribute": [{"redist_type": "static", "delete": True}]} + } + } + result = create_router_ospf(tgen, topo, ospf_red) + assert result is True, "Testcase : Failed \n Error: {}".format(result) + + +def red_connected(dut, config=True): + """Local def for Redstribute connected routes inside ospf.""" + global topo + tgen = get_topogen() + if config: + ospf_red = {dut: {"ospf6": {"redistribute": [{"redist_type": "connected"}]}}} + else: + ospf_red = { + dut: { + "ospf6": { + "redistribute": [{"redist_type": "connected", "del_action": True}] + } + } + } + result = create_router_ospf(tgen, topo, ospf_red) + assert result is True, "Testcase: Failed \n Error: {}".format(result) + + +def get_llip(onrouter, intf): + """ + API to get the link local ipv6 address of a perticular interface + + Parameters + ---------- + * `fromnode`: Source node + * `tonode` : interface for which link local ip needs to be returned. + + Usage + ----- + result = get_llip('r1', 'r2-link0') + + Returns + ------- + 1) link local ipv6 address from the interface. + 2) errormsg - when link local ip not found. + """ + tgen = get_topogen() + intf = topo["routers"][onrouter]["links"][intf]["interface"] + llip = get_frr_ipv6_linklocal(tgen, onrouter, intf) + if llip: + logger.info("llip ipv6 address to be set as NH is %s", llip) + return llip + return None + + +def get_glipv6(onrouter, intf): + """ + API to get the global ipv6 address of a perticular interface + + Parameters + ---------- + * `onrouter`: Source node + * `intf` : interface for which link local ip needs to be returned. + + Usage + ----- + result = get_glipv6('r1', 'r2-link0') + + Returns + ------- + 1) global ipv6 address from the interface. + 2) errormsg - when link local ip not found. + """ + glipv6 = (topo["routers"][onrouter]["links"][intf]["ipv6"]).split("/")[0] + if glipv6: + logger.info("Global ipv6 address to be set as NH is %s", glipv6) + return glipv6 + return None + + +# ################################## +# Test cases start here. +# ################################## + + +def test_ospfv3_ecmp_tc16_p0(request): + """ + Verify OSPF ECMP. + + Verify OSPF ECMP with max path configured as 8 (ECMP + configured at FRR level) + """ + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + global topo + step("Bring up the base config as per the topology") + step("Configure 8 interfaces between R1 and R2 and enable ospf in area 0.") + + reset_config_on_routers(tgen) + + step("Verify that OSPF is up with 8 neighborship sessions.") + dut = "r1" + ospf_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut) + assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format( + ospf_covergence + ) + + step("Configure a static route in R0 and redistribute in OSPF.") + + input_dict = { + "r0": { + "static_routes": [ + { + "network": NETWORK["ipv6"][0], + "no_of_ip": 5, + "next_hop": "Null0", + } + ] + } + } + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + dut = "r0" + red_static(dut) + + llip = get_llip("r0", "r1-link1") + assert llip is not None, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Verify that route in R2 in stalled with 8 next hops.") + nh = [] + for item in range(1, 7): + nh.append(llip) + + llip = get_llip("r0", "r1") + assert llip is not None, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + nh2 = llip + + nh.append(nh2) + + dut = "r1" + result = verify_ospf6_rib(tgen, dut, input_dict, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + protocol = "ospf" + result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("shut no shut all the interfaces on the remote router - R2") + dut = "r1" + for intfr in range(1, 7): + intf = topo["routers"]["r1"]["links"]["r0-link{}".format(intfr)]["interface"] + shutdown_bringup_interface(tgen, dut, intf, False) + + result = verify_ospf6_rib(tgen, dut, input_dict, next_hop=nh, expected=False) + assert ( + result is not True + ), "Testcase {} : Failed \n Route present in OSPF RIB. Error: {}".format( + tc_name, result + ) + + protocol = "ospf" + result = verify_rib( + tgen, "ipv6", dut, input_dict, protocol=protocol, next_hop=nh, expected=False + ) + assert ( + result is not True + ), "Testcase {} : Failed \n Route present in RIB. Error: {}".format(tc_name, result) + + for intfr in range(1, 7): + intf = topo["routers"]["r1"]["links"]["r0-link{}".format(intfr)]["interface"] + shutdown_bringup_interface(tgen, dut, intf, True) + + result = verify_ospf6_rib(tgen, dut, input_dict, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + protocol = "ospf" + result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("shut no shut on all the interfaces on DUT (r1)") + for intfr in range(1, 7): + intf = topo["routers"]["r1"]["links"]["r0-link{}".format(intfr)]["interface"] + shutdown_bringup_interface(tgen, dut, intf, False) + + for intfr in range(1, 7): + intf = topo["routers"]["r1"]["links"]["r0-link{}".format(intfr)]["interface"] + shutdown_bringup_interface(tgen, dut, intf, True) + + step( + "Verify that all the neighbours are up and routes are installed" + " with 8 next hop in ospf and ip route tables on R1." + ) + + step("Verify that OSPF is up with 8 neighborship sessions.") + dut = "r1" + ospf_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut) + assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format( + ospf_covergence + ) + + dut = "r1" + result = verify_ospf6_rib(tgen, dut, input_dict, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + protocol = "ospf" + result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +def test_ospfv3_ecmp_tc17_p0(request): + """ + Verify OSPF ECMP. + + Verify OSPF ECMP with max path configured as 2 (Edge having 2 uplink ports) + """ + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + global topo + step("Bring up the base config as per the topology") + step("Configure 2 interfaces between R1 and R2 & enable ospf in area 0.") + + reset_config_on_routers(tgen) + + step("Verify that OSPF is up with 2 neighborship sessions.") + dut = "r1" + ospf_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut) + assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format( + ospf_covergence + ) + + step("Configure a static route in R0 and redistribute in OSPF.") + + input_dict = { + "r0": { + "static_routes": [ + { + "network": NETWORK["ipv6"][0], + "no_of_ip": 5, + "next_hop": "Null0", + } + ] + } + } + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + dut = "r0" + red_static(dut) + + step("Verify that route in R2 in stalled with 2 next hops.") + + llip = get_llip("r0", "r1-link1") + assert llip is not None, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + nh1 = llip + + llip = get_llip("r0", "r1") + assert llip is not None, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + nh2 = llip + + nh = [nh1, nh2] + + dut = "r1" + result = verify_ospf6_rib(tgen, dut, input_dict, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + protocol = "ospf" + result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Configure ECMP value as 1.") + max_path = {"r1": {"ospf6": {"maximum-paths": 1}}} + result = create_router_ospf(tgen, topo, max_path) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol, next_hop=nh2) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + dut = "r1" + max_path = {"r1": {"ospf6": {"maximum-paths": 2}}} + result = create_router_ospf(tgen, topo, max_path) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Configure cost on R0 as 100") + r0_ospf_cost = {"r0": {"links": {"r1": {"ospf6": {"cost": 100}}}}} + result = config_ospf6_interface(tgen, topo, r0_ospf_cost) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + dut = "r1" + result = verify_ospf6_rib(tgen, dut, input_dict, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + protocol = "ospf" + result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_routemaps.py b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_routemaps.py new file mode 100644 index 0000000000..d8cf3bd02d --- /dev/null +++ b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_routemaps.py @@ -0,0 +1,875 @@ +#!/usr/bin/python + +# +# Copyright (c) 2021 by VMware, Inc. ("VMware") +# Used Copyright (c) 2018 by Network Device Education Foundation, Inc. +# ("NetDEF") in this file. +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + + +"""OSPF Basic Functionality Automation.""" +import os +import sys +import time +import pytest +import json +from copy import deepcopy +from ipaddress import IPv4Address +from lib.topotest import frr_unicode + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../lib/")) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from mininet.topo import Topo +from lib.topogen import Topogen, get_topogen +import ipaddress + +# Import topoJson from lib, to create topology and initial configuration +from lib.common_config import ( + start_topology, + write_test_header, + write_test_footer, + reset_config_on_routers, + create_prefix_lists, + verify_rib, + create_static_routes, + step, + create_route_maps, + verify_prefix_lists, + get_frr_ipv6_linklocal, + topo_daemons, +) +from lib.topolog import logger +from lib.topojson import build_topo_from_json, build_config_from_json + +from lib.ospf import ( + verify_ospf6_neighbor, + config_ospf_interface, + clear_ospf, + verify_ospf6_rib, + create_router_ospf, + verify_ospf6_interface, + verify_ospf6_database, + config_ospf6_interface, +) + +from ipaddress import IPv6Address + +pytestmark = [pytest.mark.ospfd, pytest.mark.staticd] + + +# Global variables +topo = None + +# Reading the data from JSON File for topology creation +jsonFile = "{}/ospfv3_routemaps.json".format(CWD) +try: + with open(jsonFile, "r") as topoJson: + topo = json.load(topoJson) +except IOError: + assert False, "Could not read file {}".format(jsonFile) + +NETWORK = { + "ipv4": [ + "11.0.20.1/32", + "11.0.20.2/32", + "11.0.20.3/32", + "11.0.20.4/32", + "11.0.20.5/32", + ], + "ipv6": ["2::1/128", "2::2/128", "2::3/128", "2::4/128", "2::5/128"], +} + +routerids = ["100.1.1.0", "100.1.1.1", "100.1.1.2", "100.1.1.3"] + +""" +TOPOOLOGY = + Please view in a fixed-width font such as Courier. + +---+ A1 +---+ + +R1 +------------+R2 | + +-+-+- +--++ + | -- -- | + | -- A0 -- | + A0| ---- | + | ---- | A2 + | -- -- | + | -- -- | + +-+-+- +-+-+ + +R0 +-------------+R3 | + +---+ A3 +---+ + +TESTCASES = +2. Verify OSPF route map support functionality when route map is not + configured at system level but configured in OSPF +4. Verify OSPF route map support functionality + when route map actions are toggled. +5. Verify OSPF route map support functionality with multiple sequence + numbers in a single route-map for different match/set clauses. +6. Verify OSPF route map support functionality when we add/remove route-maps + with multiple set clauses and without any match statement.(Set only) +7. Verify OSPF route map support functionality when we + add/remove route-maps with multiple match clauses and without + any set statement.(Match only) +8. Verify OSPF route map applied to ospf redistribution with ipv6 prefix list + """ + + +class CreateTopo(Topo): + """ + Test topology builder. + + * `Topo`: Topology object + """ + + def build(self, *_args, **_opts): + """Build function.""" + tgen = get_topogen(self) + + # Building topology from json file + build_topo_from_json(tgen, topo) + + +def setup_module(mod): + """ + Sets up the pytest environment + + * `mod`: module name + """ + global topo + testsuite_run_time = time.asctime(time.localtime(time.time())) + logger.info("Testsuite start time: {}".format(testsuite_run_time)) + logger.info("=" * 40) + + logger.info("Running setup_module to create topology") + + # This function initiates the topology build with Topogen... + tgen = Topogen(CreateTopo, mod.__name__) + # ... and here it calls Mininet initialization functions. + + # get list of daemons needs to be started for this suite. + daemons = topo_daemons(tgen, topo) + + # Starting topology, create tmp files which are loaded to routers + # to start deamons and then start routers + start_topology(tgen, daemons) + + # Creating configuration from JSON + build_config_from_json(tgen, topo) + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + ospf_covergence = verify_ospf6_neighbor(tgen, topo) + assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format( + ospf_covergence + ) + + logger.info("Running setup_module() done") + + +def teardown_module(mod): + """ + Teardown the pytest environment. + + * `mod`: module name + """ + + logger.info("Running teardown_module to delete topology") + + tgen = get_topogen() + + # Stop toplogy and Remove tmp files + tgen.stop_topology() + + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) + logger.info("=" * 40) + + +# ################################## +# Test cases start here. +# ################################## + + +def test_ospfv3_routemaps_functionality_tc20_p0(request): + """ + OSPF route map support functionality. + + Verify OSPF route map support functionality when route map is not + configured at system level but configured in OSPF + + """ + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + global topo + step("Bring up the base config as per the topology") + + reset_config_on_routers(tgen) + + step("Create static routes(10.0.20.1/32 and 10.0.20.2/32) in R0") + # Create Static routes + input_dict = { + "r0": { + "static_routes": [ + { + "network": NETWORK["ipv6"][0], + "no_of_ip": 5, + "next_hop": "Null0", + } + ] + } + } + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Redistribute to ospf using route map ( non existent route map)") + ospf_red_r1 = { + "r0": { + "ospf6": { + "redistribute": [{"redist_type": "static", "route_map": "rmap_ipv6"}] + } + } + } + result = create_router_ospf(tgen, topo, ospf_red_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step( + "Verify that routes are not allowed in OSPF even tough no " + "matching routing map is configured." + ) + + dut = "r1" + protocol = "ospf" + result = verify_ospf6_rib(tgen, dut, input_dict) + assert ( + result is not True + ), "Testcase {} : Failed \n Route found in the RIB, Error: {}".format( + tc_name, result + ) + + result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol) + assert ( + result is not True + ), "Testcase {} : Failed \n Route found in the RIB, Error: {}".format( + tc_name, result + ) + + step( + "configure the route map with the same name that is used " + "in the ospf with deny rule." + ) + + # Create route map + routemaps = {"r0": {"route_maps": {"rmap_ipv6": [{"action": "deny"}]}}} + result = create_route_maps(tgen, routemaps) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("verify that now route map is activated & routes are denied in OSPF.") + dut = "r1" + protocol = "ospf" + result = verify_ospf6_rib(tgen, dut, input_dict, expected=False) + assert ( + result is not True + ), "Testcase {} : Failed \n Route found in the RIB, Error: {}".format( + tc_name, result + ) + + result = verify_rib( + tgen, "ipv6", dut, input_dict, protocol=protocol, expected=False + ) + assert ( + result is not True + ), "Testcase {} : Failed \n Route found in the RIB, Error: {}".format( + tc_name, result + ) + + # Create route map + routemaps = {"r0": {"route_maps": {"rmap_ipv6": [{"action": "deny"}]}}} + result = create_route_maps(tgen, routemaps) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("verify that now route map is activated & routes are denied in OSPF.") + dut = "r1" + protocol = "ospf" + result = verify_ospf6_rib(tgen, dut, input_dict, expected=False) + assert ( + result is not True + ), "Testcase {} : Failed \n Route found in the RIB, Error: {}".format( + tc_name, result + ) + + result = verify_rib( + tgen, "ipv6", dut, input_dict, protocol=protocol, expected=False + ) + assert ( + result is not True + ), "Testcase {} : Failed \n Route found in the RIB, Error: {}".format( + tc_name, result + ) + + step("Delete the route map.") + # Create route map + routemaps = { + "r0": {"route_maps": {"rmap_ipv6": [{"action": "deny", "delete": True}]}} + } + result = create_route_maps(tgen, routemaps) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step( + "Verify that routes are allowed in OSPF even tough " + "no matching routing map is configured." + ) + dut = "r1" + protocol = "ospf" + result = verify_ospf6_rib(tgen, dut, input_dict, expected=False) + assert ( + result is not True + ), "Testcase {} : Failed \n Route found in the RIB, Error: {}".format( + tc_name, result + ) + + result = verify_rib( + tgen, "ipv6", dut, input_dict, protocol=protocol, expected=False + ) + assert ( + result is not True + ), "Testcase {} : Failed \n Route found in the RIB, Error: {}".format( + tc_name, result + ) + + write_test_footer(tc_name) + + +def test_ospfv3_routemaps_functionality_tc25_p0(request): + """ + OSPF route map support functionality. + + Verify OSPF route map support functionality + when route map actions are toggled. + + """ + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + global topo + step("Bring up the base config as per the topology") + + reset_config_on_routers(tgen) + + step( + "Create static routes(10.0.20.1/32) in R1 and redistribute " + "to OSPF using route map." + ) + + # Create Static routes + input_dict = { + "r0": { + "static_routes": [ + { + "network": NETWORK["ipv6"][0], + "no_of_ip": 5, + "next_hop": "Null0", + } + ] + } + } + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + ospf_red_r0 = { + "r0": { + "ospf6": { + "redistribute": [{"redist_type": "static", "route_map": "rmap_ipv6"}] + } + } + } + result = create_router_ospf(tgen, topo, ospf_red_r0) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + step("Configure route map with permit rule") + # Create route map + routemaps = {"r0": {"route_maps": {"rmap_ipv6": [{"action": "permit"}]}}} + result = create_route_maps(tgen, routemaps) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Verify that route is advertised to R1.") + dut = "r1" + protocol = "ospf" + result = verify_ospf6_rib(tgen, dut, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + step("Configure route map with deny rule") + # Create route map + routemaps = { + "r0": {"route_maps": {"rmap_ipv6": [{"seq_id": 10, "action": "deny"}]}} + } + result = create_route_maps(tgen, routemaps) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + # Api call verify whether OSPF is converged + ospf_covergence = verify_ospf6_neighbor(tgen, topo) + assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format( + ospf_covergence + ) + + step("Verify that route is not advertised to R1.") + dut = "r1" + protocol = "ospf" + result = verify_ospf6_rib(tgen, dut, input_dict, expected=False) + assert ( + result is not True + ), "Testcase {} : Failed \n Route found in the RIB, Error: {}".format( + tc_name, result + ) + + result = verify_rib( + tgen, "ipv6", dut, input_dict, protocol=protocol, expected=False + ) + assert ( + result is not True + ), "Testcase {} : Failed \n Route found in the RIB, Error: {}".format( + tc_name, result + ) + + write_test_footer(tc_name) + + +def test_ospfv3_routemaps_functionality_tc22_p0(request): + """ + OSPF Route map - Multiple sequence numbers. + + Verify OSPF route map support functionality with multiple sequence + numbers in a single route-map for different match/set clauses. + + """ + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + global topo + step("Bring up the base config as per the topology") + + reset_config_on_routers(tgen) + + step( + "Configure route map with seq number 10 to with ip prefix" + " permitting route 10.0.20.1/32 in R1" + ) + step( + "Configure route map with seq number 20 to with ip prefix" + " permitting route 10.0.20.2/32 in R1" + ) + + # Create route map + input_dict_3 = { + "r0": { + "route_maps": { + "rmap_ipv6": [ + { + "action": "permit", + "seq_id": "10", + "match": {"ipv6": {"prefix_lists": "pf_list_1_ipv6"}}, + }, + { + "action": "permit", + "seq_id": "20", + "match": {"ipv6": {"prefix_lists": "pf_list_2_ipv4"}}, + }, + ] + } + } + } + result = create_route_maps(tgen, input_dict_3) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + # Create ip prefix list + input_dict_2 = { + "r0": { + "prefix_lists": { + "ipv4": { + "pf_list_1_ipv6": [ + {"seqid": 10, "network": NETWORK["ipv6"][0], "action": "permit"} + ] + } + } + } + } + result = create_prefix_lists(tgen, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + # Create ip prefix list + input_dict_2 = { + "r0": { + "prefix_lists": { + "ipv4": { + "pf_list_2_ipv4": [ + {"seqid": 10, "network": NETWORK["ipv6"][1], "action": "permit"} + ] + } + } + } + } + result = create_prefix_lists(tgen, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Configure static routes 10.0.20.1/32 and 10.0.20.2 in R1") + # Create Static routes + input_dict = { + "r0": { + "static_routes": [ + { + "network": NETWORK["ipv6"][0], + "no_of_ip": 5, + "next_hop": "Null0", + } + ] + } + } + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Configure redistribute static route with route map.") + ospf_red_r0 = { + "r0": { + "ospf6": { + "redistribute": [{"redist_type": "static", "route_map": "rmap_ipv6"}] + } + } + } + result = create_router_ospf(tgen, topo, ospf_red_r0) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + input_dict = { + "r0": { + "static_routes": [ + { + "network": NETWORK["ipv6"][0], + "no_of_ip": 2, + "next_hop": "Null0", + } + ] + } + } + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Verify that both routes are learned in R1 and R2") + dut = "r1" + protocol = "ospf" + result = verify_ospf6_rib(tgen, dut, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + dut = "r2" + protocol = "ospf" + result = verify_ospf6_rib(tgen, dut, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Change route map with seq number 20 to deny.") + # Create route map + input_dict_3 = { + "r0": { + "route_maps": { + "rmap_ipv6": [ + { + "action": "deny", + "seq_id": "20", + "match": {"ipv6": {"prefix_lists": "pf_list_2_ipv4"}}, + } + ] + } + } + } + result = create_route_maps(tgen, input_dict_3) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step( + "Verify the route 10.0.20.2/32 is withdrawn and not present " + "in the routing table of R0 and R1." + ) + + input_dict = { + "r0": {"static_routes": [{"network": NETWORK["ipv6"][1], "next_hop": "Null0"}]} + } + + dut = "r1" + protocol = "ospf" + result = verify_ospf6_rib(tgen, dut, input_dict, expected=False) + assert ( + result is not True + ), "Testcase {} : Failed \n Route found in the RIB, Error: {}".format( + tc_name, result + ) + + result = verify_rib( + tgen, "ipv6", dut, input_dict, protocol=protocol, expected=False + ) + assert ( + result is not True + ), "Testcase {} : Failed \n Route found in the RIB, Error: {}".format( + tc_name, result + ) + + dut = "r2" + protocol = "ospf" + result = verify_ospf6_rib(tgen, dut, input_dict, expected=False) + assert ( + result is not True + ), "Testcase {} : Failed \n Route found in the RIB, Error: {}".format( + tc_name, result + ) + + result = verify_rib( + tgen, "ipv6", dut, input_dict, protocol=protocol, expected=False + ) + assert ( + result is not True + ), "Testcase {} : Failed \n Route found in the RIB, Error: {}".format( + tc_name, result + ) + + write_test_footer(tc_name) + + +def test_ospfv3_routemaps_functionality_tc24_p0(request): + """ + OSPF Route map - Multiple set clauses. + + Verify OSPF route map support functionality when we + add/remove route-maps with multiple match clauses and without + any set statement.(Match only) + + """ + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + global topo + step("Bring up the base config as per the topology") + + reset_config_on_routers(tgen) + + step( + "Create static routes(10.0.20.1/32) in R1 and redistribute to " + "OSPF using route map." + ) + # Create Static routes + input_dict = { + "r0": { + "static_routes": [ + { + "network": NETWORK["ipv6"][0], + "no_of_ip": 1, + "next_hop": "Null0", + } + ] + } + } + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + ospf_red_r0 = { + "r0": { + "ospf6": { + "redistribute": [{"redist_type": "static", "route_map": "rmap_ipv6"}] + } + } + } + result = create_router_ospf(tgen, topo, ospf_red_r0) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + # Create ip prefix list + pfx_list = { + "r0": { + "prefix_lists": { + "ipv6": { + "pf_list_1_ipv6": [ + {"seqid": 10, "network": "any", "action": "permit"} + ] + } + } + } + } + result = create_prefix_lists(tgen, pfx_list) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("verify that prefix-list is created in R0.") + result = verify_prefix_lists(tgen, pfx_list) + assert ( + result is not True + ), "Testcase {} : Failed \n Prefix list not " "present. Error: {}".format( + tc_name, result + ) + + # Create route map + routemaps = { + "r0": { + "route_maps": { + "rmap_ipv6": [ + { + "action": "permit", + "match": {"ipv6": {"prefix_lists": "pf_list_1_ipv6"}}, + } + ] + } + } + } + result = create_route_maps(tgen, routemaps) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Verify that metric falls back to original metric for ospf routes.") + dut = "r1" + protocol = "ospf" + + result = verify_ospf6_rib(tgen, dut, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step( + "Create static routes(10.0.20.1/32) in R1 and redistribute to " + "OSPF using route map." + ) + # Create Static routes + input_dict = { + "r0": { + "static_routes": [ + { + "network": NETWORK["ipv6"][1], + "no_of_ip": 1, + "next_hop": "Null0", + "tag": 1000, + } + ] + } + } + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + # Create ip prefix list + pfx_list = { + "r0": { + "prefix_lists": { + "ipv6": { + "pf_list_1_ipv6": [ + {"seqid": 10, "network": "any", "action": "permit"} + ] + } + } + } + } + result = create_prefix_lists(tgen, pfx_list) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("verify that prefix-list is created in R0.") + result = verify_prefix_lists(tgen, pfx_list) + assert ( + result is not True + ), "Testcase {} : Failed \n Prefix list not " "present. Error: {}".format( + tc_name, result + ) + + # Create route map + routemaps = { + "r0": { + "route_maps": { + "rmap_ipv6": [{"action": "permit", "match": {"ipv6": {"tag": "1000"}}}] + } + } + } + result = create_route_maps(tgen, routemaps) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Verify that metric falls back to original metric for ospf routes.") + dut = "r1" + protocol = "ospf" + + result = verify_ospf6_rib(tgen, dut, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Delete the match clause with tag in route map") + # Create route map + routemaps = { + "r0": { + "route_maps": { + "rmap_ipv6": [ + { + "action": "permit", + "match": {"ipv6": {"tag": "1000", "delete": True}}, + } + ] + } + } + } + result = create_route_maps(tgen, routemaps) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Verify that metric falls back to original metric for ospf routes.") + dut = "r1" + protocol = "ospf" + + result = verify_ospf6_rib(tgen, dut, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Delete the match clause with metric in route map.") + + # Create route map + routemaps = { + "r0": { + "route_maps": { + "rmap_ipv6": [ + { + "action": "permit", + "match": {"ipv6": {"prefix_lists": "pf_list_1_ipv6"}}, + } + ] + } + } + } + result = create_route_maps(tgen, routemaps) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + result = verify_ospf6_rib(tgen, dut, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_rte_calc.py b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_rte_calc.py index 4aa71bfb16..860f17ba67 100644 --- a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_rte_calc.py +++ b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_rte_calc.py @@ -74,6 +74,9 @@ from lib.ospf import ( from ipaddress import IPv6Address +pytestmark = [pytest.mark.ospfd, pytest.mark.staticd] + + # Global variables topo = None @@ -281,6 +284,233 @@ def red_connected(dut, config=True): # ################################## # Test cases start here. # ################################## +def test_ospfv3_redistribution_tc5_p0(request): + """Test OSPF intra area route calculations.""" + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + global topo + step("Bring up the base config.") + reset_config_on_routers(tgen) + + step("Verify that OSPF neighbors are FULL.") + ospf_covergence = verify_ospf6_neighbor(tgen, topo) + assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format( + ospf_covergence + ) + + step("verify intra area route is calculated for r0-r3 interface ip in R1") + ip = topo["routers"]["r0"]["links"]["r3"]["ipv6"] + ip_net = str(ipaddress.ip_interface(u"{}".format(ip)).network) + + llip = get_llip("r0", "r1") + assert llip is not None, "Testcase {} : Failed \n Error: {}".format(tc_name, llip) + + nh = llip + input_dict = { + "r1": {"static_routes": [{"network": ip_net, "no_of_ip": 1, "routeType": "N"}]} + } + + dut = "r1" + result = verify_ospf6_rib(tgen, dut, input_dict, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + protocol = "ospf" + result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Delete the ip address on newly configured loopback of R0") + topo1 = { + "r0": { + "links": { + "r3": { + "ipv6": topo["routers"]["r0"]["links"]["r3"]["ipv6"], + "interface": topo["routers"]["r0"]["links"]["r3"]["interface"], + "delete": True, + } + } + } + } + + result = create_interfaces_cfg(tgen, topo1) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + dut = "r1" + result = verify_ospf6_rib(tgen, dut, input_dict, next_hop=nh, expected=False) + assert ( + result is not True + ), "Testcase {} : Failed \n Route present in RIB. Error: {}".format(tc_name, result) + + protocol = "ospf" + result = verify_rib( + tgen, "ipv6", dut, input_dict, protocol=protocol, next_hop=nh, expected=False + ) + assert ( + result is not True + ), "Testcase {} : Failed \n Route present in RIB. Error: {}".format(tc_name, result) + + step("Add back the deleted ip address on newly configured interface of R0") + topo1 = { + "r0": { + "links": { + "r3": { + "ipv6": topo["routers"]["r0"]["links"]["r3"]["ipv6"], + "interface": topo["routers"]["r0"]["links"]["r3"]["interface"], + } + } + } + } + + result = create_interfaces_cfg(tgen, topo1) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + dut = "r1" + result = verify_ospf6_rib(tgen, dut, input_dict, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + protocol = "ospf" + result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Shut no shut interface on R0") + dut = "r0" + intf = topo["routers"]["r0"]["links"]["r3"]["interface"] + shutdown_bringup_interface(tgen, dut, intf, False) + + step("un shut the OSPF interface on R0") + dut = "r0" + shutdown_bringup_interface(tgen, dut, intf, True) + + dut = "r1" + result = verify_ospf6_rib(tgen, dut, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + protocol = "ospf" + result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +def test_ospfv3_redistribution_tc6_p0(request): + """Test OSPF inter area route calculations.""" + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + global topo + step("Bring up the base config.") + reset_config_on_routers(tgen) + + step("Verify that OSPF neighbors are FULL.") + ospf_covergence = verify_ospf6_neighbor(tgen, topo) + assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format( + ospf_covergence + ) + + step("verify intra area route is calculated for r0-r3 interface ip in R1") + ip = topo["routers"]["r0"]["links"]["r3"]["ipv6"] + ip_net = str(ipaddress.ip_interface(u"{}".format(ip)).network) + llip = get_llip("r0", "r1") + assert llip is not None, "Testcase {} : Failed \n Error: {}".format(tc_name, llip) + nh = llip + input_dict = { + "r1": {"static_routes": [{"network": ip_net, "no_of_ip": 1, "routeType": "N"}]} + } + + dut = "r1" + result = verify_ospf6_rib(tgen, dut, input_dict, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + protocol = "ospf" + result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Delete the ip address on newly configured loopback of R0") + topo1 = { + "r0": { + "links": { + "r3": { + "ipv6": topo["routers"]["r0"]["links"]["r3"]["ipv6"], + "interface": topo["routers"]["r0"]["links"]["r3"]["interface"], + "delete": True, + } + } + } + } + + result = create_interfaces_cfg(tgen, topo1) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + dut = "r1" + result = verify_ospf6_rib(tgen, dut, input_dict, next_hop=nh, expected=False) + assert ( + result is not True + ), "Testcase {} : Failed \n Route present in RIB. Error: {}".format(tc_name, result) + + protocol = "ospf" + result = verify_rib( + tgen, "ipv6", dut, input_dict, protocol=protocol, next_hop=nh, expected=False + ) + assert ( + result is not True + ), "Testcase {} : Failed \n Route present in RIB. Error: {}".format(tc_name, result) + + step("Add back the deleted ip address on newly configured interface of R0") + topo1 = { + "r0": { + "links": { + "r3": { + "ipv6": topo["routers"]["r0"]["links"]["r3"]["ipv6"], + "interface": topo["routers"]["r0"]["links"]["r3"]["interface"], + } + } + } + } + + result = create_interfaces_cfg(tgen, topo1) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + dut = "r1" + result = verify_ospf6_rib(tgen, dut, input_dict, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + protocol = "ospf" + result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Shut no shut interface on R0") + dut = "r0" + intf = topo["routers"]["r0"]["links"]["r3"]["interface"] + shutdown_bringup_interface(tgen, dut, intf, False) + + step("Verify that intraroute calculated for R1 intf on R0 is deleted.") + dut = "r1" + + step("un shut the OSPF interface on R0") + dut = "r0" + shutdown_bringup_interface(tgen, dut, intf, True) + + dut = "r1" + result = verify_ospf6_rib(tgen, dut, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + protocol = "ospf" + result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + def test_ospfv3_cost_tc52_p0(request): """OSPF Cost - verifying ospf interface cost functionality""" tc_name = request.node.name @@ -368,7 +598,6 @@ def test_ospfv3_cost_tc52_p0(request): write_test_footer(tc_name) - if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_single_area.py b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_single_area.py index a84f1a1eb6..0c1c51c78a 100644 --- a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_single_area.py +++ b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_single_area.py @@ -54,7 +54,7 @@ from lib.common_config import ( create_route_maps, shutdown_bringup_interface, create_interfaces_cfg, - topo_daemons, + topo_daemons ) from lib.topolog import logger from lib.topojson import build_topo_from_json, build_config_from_json @@ -72,6 +72,9 @@ from lib.ospf import ( from ipaddress import IPv6Address +pytestmark = [pytest.mark.ospfd, pytest.mark.staticd] + + # Global variables topo = None diff --git a/tests/topotests/pim_acl/h1/zebra.conf b/tests/topotests/pim_acl/h1/zebra.conf new file mode 100644 index 0000000000..3d6540d40c --- /dev/null +++ b/tests/topotests/pim_acl/h1/zebra.conf @@ -0,0 +1,10 @@ +! +hostname h1 +log file zebra.log +! +interface h1-eth0 + description connection to r1 via sw1 + ip address 192.168.100.10/24 +! +ip route 0.0.0.0/0 192.168.100.1 +! diff --git a/tests/topotests/pim_acl/h2/zebra.conf b/tests/topotests/pim_acl/h2/zebra.conf new file mode 100644 index 0000000000..95342f9e8a --- /dev/null +++ b/tests/topotests/pim_acl/h2/zebra.conf @@ -0,0 +1,8 @@ +hostname h2 +! +interface h2-eth0 + description connection to r1 via sw2 + ip address 192.168.101.2/24 +! +ip route 0.0.0.0/0 192.168.101.1 +! diff --git a/tests/topotests/pim_acl/r1/acl_1_pim_join.json b/tests/topotests/pim_acl/r1/acl_1_pim_join.json new file mode 100644 index 0000000000..1b44b2b5cf --- /dev/null +++ b/tests/topotests/pim_acl/r1/acl_1_pim_join.json @@ -0,0 +1,21 @@ +{ + "r1-eth0":{ + "name":"r1-eth0", + "state":"up", + "address":"192.168.100.1", + "flagMulticast":true, + "flagBroadcast":true, + "lanDelayEnabled":true, + "239.100.0.1":{ + "*":{ + "source":"*", + "group":"239.100.0.1", + "upTime":"--:--:--", + "expire":"--:--", + "prune":"--:--", + "channelJoinName":"NOINFO", + "protocolIgmp":1 + } + } + } +} diff --git a/tests/topotests/pim_acl/r1/acl_2_pim_join.json b/tests/topotests/pim_acl/r1/acl_2_pim_join.json new file mode 100644 index 0000000000..c020a489a9 --- /dev/null +++ b/tests/topotests/pim_acl/r1/acl_2_pim_join.json @@ -0,0 +1,21 @@ +{ + "r1-eth0":{ + "name":"r1-eth0", + "state":"up", + "address":"192.168.100.1", + "flagMulticast":true, + "flagBroadcast":true, + "lanDelayEnabled":true, + "239.100.0.17":{ + "*":{ + "source":"*", + "group":"239.100.0.17", + "upTime":"--:--:--", + "expire":"--:--", + "prune":"--:--", + "channelJoinName":"NOINFO", + "protocolIgmp":1 + } + } + } +} diff --git a/tests/topotests/pim_acl/r1/acl_3_pim_join.json b/tests/topotests/pim_acl/r1/acl_3_pim_join.json new file mode 100644 index 0000000000..6122f73992 --- /dev/null +++ b/tests/topotests/pim_acl/r1/acl_3_pim_join.json @@ -0,0 +1,21 @@ +{ + "r1-eth0":{ + "name":"r1-eth0", + "state":"up", + "address":"192.168.100.1", + "flagMulticast":true, + "flagBroadcast":true, + "lanDelayEnabled":true, + "239.100.0.32":{ + "*":{ + "source":"*", + "group":"239.100.0.32", + "upTime":"--:--:--", + "expire":"--:--", + "prune":"--:--", + "channelJoinName":"NOINFO", + "protocolIgmp":1 + } + } + } +} diff --git a/tests/topotests/pim_acl/r1/acl_4_pim_join.json b/tests/topotests/pim_acl/r1/acl_4_pim_join.json new file mode 100644 index 0000000000..5f72256ba7 --- /dev/null +++ b/tests/topotests/pim_acl/r1/acl_4_pim_join.json @@ -0,0 +1,21 @@ +{ + "r1-eth0":{ + "name":"r1-eth0", + "state":"up", + "address":"192.168.100.1", + "flagMulticast":true, + "flagBroadcast":true, + "lanDelayEnabled":true, + "239.100.0.255":{ + "*":{ + "source":"*", + "group":"239.100.0.255", + "upTime":"--:--:--", + "expire":"--:--", + "prune":"--:--", + "channelJoinName":"NOINFO", + "protocolIgmp":1 + } + } + } +} diff --git a/tests/topotests/pim_acl/r1/acl_5_pim_join.json b/tests/topotests/pim_acl/r1/acl_5_pim_join.json new file mode 100644 index 0000000000..70021bdbec --- /dev/null +++ b/tests/topotests/pim_acl/r1/acl_5_pim_join.json @@ -0,0 +1,21 @@ +{ + "r1-eth0":{ + "name":"r1-eth0", + "state":"up", + "address":"192.168.100.1", + "flagMulticast":true, + "flagBroadcast":true, + "lanDelayEnabled":true, + "239.100.0.97":{ + "*":{ + "source":"*", + "group":"239.100.0.97", + "upTime":"--:--:--", + "expire":"--:--", + "prune":"--:--", + "channelJoinName":"NOINFO", + "protocolIgmp":1 + } + } + } +} diff --git a/tests/topotests/pim_acl/r1/acl_6_pim_join.json b/tests/topotests/pim_acl/r1/acl_6_pim_join.json new file mode 100644 index 0000000000..2baac6cb22 --- /dev/null +++ b/tests/topotests/pim_acl/r1/acl_6_pim_join.json @@ -0,0 +1,21 @@ +{ + "r1-eth0":{ + "name":"r1-eth0", + "state":"up", + "address":"192.168.100.1", + "flagMulticast":true, + "flagBroadcast":true, + "lanDelayEnabled":true, + "239.100.0.70":{ + "*":{ + "source":"*", + "group":"239.100.0.70", + "upTime":"--:--:--", + "expire":"--:--", + "prune":"--:--", + "channelJoinName":"NOINFO", + "protocolIgmp":1 + } + } + } +} diff --git a/tests/topotests/pim_acl/r1/ospf_neighbor.json b/tests/topotests/pim_acl/r1/ospf_neighbor.json new file mode 100644 index 0000000000..a8fc093e90 --- /dev/null +++ b/tests/topotests/pim_acl/r1/ospf_neighbor.json @@ -0,0 +1,59 @@ +{ + "neighbors":{ + "192.168.0.11":[ + { + "priority":10, + "state":"Full\/Backup", + "address":"192.168.101.11", + "ifaceName":"r1-eth1:192.168.101.1", + "retransmitCounter":0, + "requestCounter":0, + "dbSummaryCounter":0 + } + ], + "192.168.0.12":[ + { + "priority":0, + "state":"Full\/DROther", + "address":"192.168.101.12", + "ifaceName":"r1-eth1:192.168.101.1", + "retransmitCounter":0, + "requestCounter":0, + "dbSummaryCounter":0 + } + ], + "192.168.0.13":[ + { + "priority":0, + "state":"Full\/DROther", + "address":"192.168.101.13", + "ifaceName":"r1-eth1:192.168.101.1", + "retransmitCounter":0, + "requestCounter":0, + "dbSummaryCounter":0 + } + ], + "192.168.0.14":[ + { + "priority":0, + "state":"Full\/DROther", + "address":"192.168.101.14", + "ifaceName":"r1-eth1:192.168.101.1", + "retransmitCounter":0, + "requestCounter":0, + "dbSummaryCounter":0 + } + ], + "192.168.0.15":[ + { + "priority":0, + "state":"Full\/DROther", + "address":"192.168.101.15", + "ifaceName":"r1-eth1:192.168.101.1", + "retransmitCounter":0, + "requestCounter":0, + "dbSummaryCounter":0 + } + ] + } +} diff --git a/tests/topotests/pim_acl/r1/ospfd.conf b/tests/topotests/pim_acl/r1/ospfd.conf new file mode 100644 index 0000000000..e1f47fb3b1 --- /dev/null +++ b/tests/topotests/pim_acl/r1/ospfd.conf @@ -0,0 +1,16 @@ +hostname r1 +! +debug ospf event +! +interface r1-eth1 + ip ospf hello-interval 2 + ip ospf dead-interval 10 + ip ospf priority 20 +! +router ospf + ospf router-id 192.168.0.1 + passive-interface r1-eth0 + network 192.168.0.1/32 area 0 + network 192.168.100.0/24 area 0 + network 192.168.101.0/24 area 0 + diff --git a/tests/topotests/pim_acl/r1/pim_neighbor.json b/tests/topotests/pim_acl/r1/pim_neighbor.json new file mode 100644 index 0000000000..ae95e8db14 --- /dev/null +++ b/tests/topotests/pim_acl/r1/pim_neighbor.json @@ -0,0 +1,31 @@ +{ + "r1-eth0":{ + }, + "r1-eth1":{ + "192.168.101.12":{ + "interface":"r1-eth1", + "neighbor":"192.168.101.12", + "drPriority":1 + }, + "192.168.101.15":{ + "interface":"r1-eth1", + "neighbor":"192.168.101.15", + "drPriority":1 + }, + "192.168.101.14":{ + "interface":"r1-eth1", + "neighbor":"192.168.101.14", + "drPriority":1 + }, + "192.168.101.11":{ + "interface":"r1-eth1", + "neighbor":"192.168.101.11", + "drPriority":1 + }, + "192.168.101.13":{ + "interface":"r1-eth1", + "neighbor":"192.168.101.13", + "drPriority":1 + } + } +} diff --git a/tests/topotests/pim_acl/r1/pimd.conf b/tests/topotests/pim_acl/r1/pimd.conf new file mode 100644 index 0000000000..a148c73146 --- /dev/null +++ b/tests/topotests/pim_acl/r1/pimd.conf @@ -0,0 +1,31 @@ +hostname r1 +! +debug igmp events +debug igmp packets +debug pim events +debug pim packets +debug pim trace +debug pim zebra +debug pim bsm +! +ip pim rp 192.168.0.11 prefix-list rp-pl-1 +ip pim rp 192.168.0.12 prefix-list rp-pl-2 +ip pim rp 192.168.0.13 prefix-list rp-pl-3 +ip pim rp 192.168.0.14 prefix-list rp-pl-4 +ip pim rp 192.168.0.15 prefix-list rp-pl-5 +ip pim join-prune-interval 5 +! +interface r1-eth0 + ip igmp + ip igmp version 2 + ip pim +! +interface r1-eth1 + ip pim +! +ip prefix-list rp-pl-1 seq 10 permit 239.100.0.0/28 +ip prefix-list rp-pl-2 seq 10 permit 239.100.0.17/32 +ip prefix-list rp-pl-3 seq 10 permit 239.100.0.32/27 +ip prefix-list rp-pl-4 seq 10 permit 239.100.0.128/25 +ip prefix-list rp-pl-4 seq 20 permit 239.100.0.96/28 +ip prefix-list rp-pl-5 seq 10 permit 239.100.0.64/28 diff --git a/tests/topotests/pim_acl/r1/zebra.conf b/tests/topotests/pim_acl/r1/zebra.conf new file mode 100644 index 0000000000..74feb8f6a7 --- /dev/null +++ b/tests/topotests/pim_acl/r1/zebra.conf @@ -0,0 +1,18 @@ +! +hostname r1 +log file zebra.log +! +ip forwarding +ipv6 forwarding +! +interface lo + ip address 192.168.0.1/32 +! +interface r1-eth0 + description connection to h1 via sw1 + ip address 192.168.100.1/24 +! +interface r1-eth1 + description connection to r11/12/13/14/15 via sw2 + ip address 192.168.101.1/24 +! diff --git a/tests/topotests/pim_acl/r11/acl_1_pim_join.json b/tests/topotests/pim_acl/r11/acl_1_pim_join.json new file mode 100644 index 0000000000..289bf51e76 --- /dev/null +++ b/tests/topotests/pim_acl/r11/acl_1_pim_join.json @@ -0,0 +1,19 @@ +{ + "r11-eth0":{ + "name":"r11-eth0", + "state":"up", + "address":"192.168.101.11", + "flagMulticast":true, + "flagBroadcast":true, + "lanDelayEnabled":true, + "239.100.0.1":{ + "*":{ + "source":"*", + "group":"239.100.0.1", + "prune":"--:--", + "channelJoinName":"JOIN", + "protocolPim":1 + } + } + } +} diff --git a/tests/topotests/pim_acl/r11/ospfd.conf b/tests/topotests/pim_acl/r11/ospfd.conf new file mode 100644 index 0000000000..e107220a4e --- /dev/null +++ b/tests/topotests/pim_acl/r11/ospfd.conf @@ -0,0 +1,14 @@ +hostname r11 +! +debug ospf event +! +interface r11-eth0 + ip ospf hello-interval 2 + ip ospf dead-interval 10 + ip ospf priority 10 +! +router ospf + ospf router-id 192.168.0.11 + network 192.168.0.11/32 area 0 + network 192.168.101.0/24 area 0 +! diff --git a/tests/topotests/pim_acl/r11/pimd.conf b/tests/topotests/pim_acl/r11/pimd.conf new file mode 100644 index 0000000000..b1d45205da --- /dev/null +++ b/tests/topotests/pim_acl/r11/pimd.conf @@ -0,0 +1,17 @@ +hostname r11 +! +debug pim events +debug pim packets +debug pim trace +debug pim zebra +debug pim bsm +! +ip pim rp 192.168.0.11 239.100.0.0/28 +ip pim join-prune-interval 5 +! +interface lo + ip pim +! +interface r11-eth0 + ip pim +! diff --git a/tests/topotests/pim_acl/r11/zebra.conf b/tests/topotests/pim_acl/r11/zebra.conf new file mode 100644 index 0000000000..137706d245 --- /dev/null +++ b/tests/topotests/pim_acl/r11/zebra.conf @@ -0,0 +1,13 @@ +! +hostname r11 +log file zebra.log +! +interface lo + ip address 192.168.0.11/32 +! +interface r11-eth0 + description connection to r1 via sw1 + ip address 192.168.101.11/24 +! +ip route 0.0.0.0/0 192.168.101.1 +! diff --git a/tests/topotests/pim_acl/r12/acl_2_pim_join.json b/tests/topotests/pim_acl/r12/acl_2_pim_join.json new file mode 100644 index 0000000000..76ab7ee701 --- /dev/null +++ b/tests/topotests/pim_acl/r12/acl_2_pim_join.json @@ -0,0 +1,19 @@ +{ + "r12-eth0":{ + "name":"r12-eth0", + "state":"up", + "address":"192.168.101.12", + "flagMulticast":true, + "flagBroadcast":true, + "lanDelayEnabled":true, + "239.100.0.17":{ + "*":{ + "source":"*", + "group":"239.100.0.17", + "prune":"--:--", + "channelJoinName":"JOIN", + "protocolPim":1 + } + } + } +} diff --git a/tests/topotests/pim_acl/r12/ospfd.conf b/tests/topotests/pim_acl/r12/ospfd.conf new file mode 100644 index 0000000000..f9203c78e4 --- /dev/null +++ b/tests/topotests/pim_acl/r12/ospfd.conf @@ -0,0 +1,14 @@ +hostname r12 +! +debug ospf event +! +interface r12-eth0 + ip ospf hello-interval 2 + ip ospf dead-interval 10 + ip ospf priority 0 +! +router ospf + ospf router-id 192.168.0.12 + network 192.168.0.12/32 area 0 + network 192.168.101.0/24 area 0 +! diff --git a/tests/topotests/pim_acl/r12/pimd.conf b/tests/topotests/pim_acl/r12/pimd.conf new file mode 100644 index 0000000000..ba9e7d902f --- /dev/null +++ b/tests/topotests/pim_acl/r12/pimd.conf @@ -0,0 +1,17 @@ +hostname r12 +! +debug pim events +debug pim packets +debug pim trace +debug pim zebra +debug pim bsm +! +ip pim rp 192.168.0.12 239.100.0.17/32 +ip pim join-prune-interval 5 +! +interface lo + ip pim +! +interface r12-eth0 + ip pim +! diff --git a/tests/topotests/pim_acl/r12/zebra.conf b/tests/topotests/pim_acl/r12/zebra.conf new file mode 100644 index 0000000000..bede104906 --- /dev/null +++ b/tests/topotests/pim_acl/r12/zebra.conf @@ -0,0 +1,13 @@ +! +hostname r12 +log file zebra.log +! +interface lo + ip address 192.168.0.12/32 +! +interface r12-eth0 + description connection to r1 via sw1 + ip address 192.168.101.12/24 +! +ip route 0.0.0.0/0 192.168.101.1 +! diff --git a/tests/topotests/pim_acl/r13/acl_3_pim_join.json b/tests/topotests/pim_acl/r13/acl_3_pim_join.json new file mode 100644 index 0000000000..48ad72cbe1 --- /dev/null +++ b/tests/topotests/pim_acl/r13/acl_3_pim_join.json @@ -0,0 +1,19 @@ +{ + "r13-eth0":{ + "name":"r13-eth0", + "state":"up", + "address":"192.168.101.13", + "flagMulticast":true, + "flagBroadcast":true, + "lanDelayEnabled":true, + "239.100.0.32":{ + "*":{ + "source":"*", + "group":"239.100.0.32", + "prune":"--:--", + "channelJoinName":"JOIN", + "protocolPim":1 + } + } + } +} diff --git a/tests/topotests/pim_acl/r13/ospfd.conf b/tests/topotests/pim_acl/r13/ospfd.conf new file mode 100644 index 0000000000..830c5a14b6 --- /dev/null +++ b/tests/topotests/pim_acl/r13/ospfd.conf @@ -0,0 +1,14 @@ +hostname r13 +! +debug ospf event +! +interface r13-eth0 + ip ospf hello-interval 2 + ip ospf dead-interval 10 + ip ospf priority 0 +! +router ospf + ospf router-id 192.168.0.13 + network 192.168.0.13/32 area 0 + network 192.168.101.0/24 area 0 +! diff --git a/tests/topotests/pim_acl/r13/pimd.conf b/tests/topotests/pim_acl/r13/pimd.conf new file mode 100644 index 0000000000..2ff1743574 --- /dev/null +++ b/tests/topotests/pim_acl/r13/pimd.conf @@ -0,0 +1,17 @@ +hostname r13 +! +debug pim events +debug pim packets +debug pim trace +debug pim zebra +debug pim bsm +! +ip pim rp 192.168.0.13 239.100.0.32/27 +ip pim join-prune-interval 5 +! +interface lo + ip pim +! +interface r13-eth0 + ip pim +! diff --git a/tests/topotests/pim_acl/r13/zebra.conf b/tests/topotests/pim_acl/r13/zebra.conf new file mode 100644 index 0000000000..f9ff27abac --- /dev/null +++ b/tests/topotests/pim_acl/r13/zebra.conf @@ -0,0 +1,13 @@ +! +hostname r13 +log file zebra.log +! +interface lo + ip address 192.168.0.13/32 +! +interface r13-eth0 + description connection to r1 via sw1 + ip address 192.168.101.13/24 +! +ip route 0.0.0.0/0 192.168.101.1 +! diff --git a/tests/topotests/pim_acl/r14/acl_4_pim_join.json b/tests/topotests/pim_acl/r14/acl_4_pim_join.json new file mode 100644 index 0000000000..46d86dd40d --- /dev/null +++ b/tests/topotests/pim_acl/r14/acl_4_pim_join.json @@ -0,0 +1,19 @@ +{ + "r14-eth0":{ + "name":"r14-eth0", + "state":"up", + "address":"192.168.101.14", + "flagMulticast":true, + "flagBroadcast":true, + "lanDelayEnabled":true, + "239.100.0.255":{ + "*":{ + "source":"*", + "group":"239.100.0.255", + "prune":"--:--", + "channelJoinName":"JOIN", + "protocolPim":1 + } + } + } +} diff --git a/tests/topotests/pim_acl/r14/acl_5_pim_join.json b/tests/topotests/pim_acl/r14/acl_5_pim_join.json new file mode 100644 index 0000000000..2b291a8a0c --- /dev/null +++ b/tests/topotests/pim_acl/r14/acl_5_pim_join.json @@ -0,0 +1,19 @@ +{ + "r14-eth0":{ + "name":"r14-eth0", + "state":"up", + "address":"192.168.101.14", + "flagMulticast":true, + "flagBroadcast":true, + "lanDelayEnabled":true, + "239.100.0.97":{ + "*":{ + "source":"*", + "group":"239.100.0.97", + "prune":"--:--", + "channelJoinName":"JOIN", + "protocolPim":1 + } + } + } +} diff --git a/tests/topotests/pim_acl/r14/ospfd.conf b/tests/topotests/pim_acl/r14/ospfd.conf new file mode 100644 index 0000000000..422e4c08b0 --- /dev/null +++ b/tests/topotests/pim_acl/r14/ospfd.conf @@ -0,0 +1,14 @@ +hostname r14 +! +debug ospf event +! +interface r14-eth0 + ip ospf hello-interval 2 + ip ospf dead-interval 10 + ip ospf priority 0 +! +router ospf + ospf router-id 192.168.0.14 + network 192.168.0.14/32 area 0 + network 192.168.101.0/24 area 0 +! diff --git a/tests/topotests/pim_acl/r14/pimd.conf b/tests/topotests/pim_acl/r14/pimd.conf new file mode 100644 index 0000000000..1324a9e40b --- /dev/null +++ b/tests/topotests/pim_acl/r14/pimd.conf @@ -0,0 +1,18 @@ +hostname r14 +! +debug pim events +debug pim packets +debug pim trace +debug pim zebra +debug pim bsm +! +ip pim rp 192.168.0.14 239.100.0.96/28 +ip pim rp 192.168.0.14 239.100.0.128/25 +ip pim join-prune-interval 5 +! +interface lo + ip pim +! +interface r14-eth0 + ip pim +! diff --git a/tests/topotests/pim_acl/r14/zebra.conf b/tests/topotests/pim_acl/r14/zebra.conf new file mode 100644 index 0000000000..8761b46206 --- /dev/null +++ b/tests/topotests/pim_acl/r14/zebra.conf @@ -0,0 +1,13 @@ +! +hostname r14 +log file zebra.log +! +interface lo + ip address 192.168.0.14/32 +! +interface r14-eth0 + description connection to r1 via sw1 + ip address 192.168.101.14/24 +! +ip route 0.0.0.0/0 192.168.101.1 +! diff --git a/tests/topotests/pim_acl/r15/acl_6_pim_join.json b/tests/topotests/pim_acl/r15/acl_6_pim_join.json new file mode 100644 index 0000000000..05fed4ecc5 --- /dev/null +++ b/tests/topotests/pim_acl/r15/acl_6_pim_join.json @@ -0,0 +1,19 @@ +{ + "r15-eth0":{ + "name":"r15-eth0", + "state":"up", + "address":"192.168.101.15", + "flagMulticast":true, + "flagBroadcast":true, + "lanDelayEnabled":true, + "239.100.0.70":{ + "*":{ + "source":"*", + "group":"239.100.0.70", + "prune":"--:--", + "channelJoinName":"JOIN", + "protocolPim":1 + } + } + } +} diff --git a/tests/topotests/pim_acl/r15/ospfd.conf b/tests/topotests/pim_acl/r15/ospfd.conf new file mode 100644 index 0000000000..cd4d7b3875 --- /dev/null +++ b/tests/topotests/pim_acl/r15/ospfd.conf @@ -0,0 +1,14 @@ +hostname r15 +! +debug ospf event +! +interface r15-eth0 + ip ospf hello-interval 2 + ip ospf dead-interval 10 + ip ospf priority 0 +! +router ospf + ospf router-id 192.168.0.15 + network 192.168.0.15/32 area 0 + network 192.168.101.0/24 area 0 +! diff --git a/tests/topotests/pim_acl/r15/pimd.conf b/tests/topotests/pim_acl/r15/pimd.conf new file mode 100644 index 0000000000..f47e78c221 --- /dev/null +++ b/tests/topotests/pim_acl/r15/pimd.conf @@ -0,0 +1,17 @@ +hostname r15 +! +debug pim events +debug pim packets +debug pim trace +debug pim zebra +debug pim bsm +! +ip pim rp 192.168.0.15 239.100.0.64/28 +ip pim join-prune-interval 5 +! +interface lo + ip pim +! +interface r15-eth0 + ip pim +! diff --git a/tests/topotests/pim_acl/r15/zebra.conf b/tests/topotests/pim_acl/r15/zebra.conf new file mode 100644 index 0000000000..f6909dd020 --- /dev/null +++ b/tests/topotests/pim_acl/r15/zebra.conf @@ -0,0 +1,13 @@ +! +hostname r15 +log file zebra.log +! +interface lo + ip address 192.168.0.15/32 +! +interface r15-eth0 + description connection to r1 via sw1 + ip address 192.168.101.15/24 +! +ip route 0.0.0.0/0 192.168.101.1 +! diff --git a/tests/topotests/pim_acl/test_pim_acl.py b/tests/topotests/pim_acl/test_pim_acl.py new file mode 100755 index 0000000000..77917a0239 --- /dev/null +++ b/tests/topotests/pim_acl/test_pim_acl.py @@ -0,0 +1,418 @@ +#!/usr/bin/env python + +# +# test_pim_acl.py +# Part of NetDEF Topology Tests +# +# Copyright (c) 2020 by +# Network Device Education Foundation, Inc. ("NetDEF") +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +test_pim_acl.py: Test PIM with RP selection using ACLs +""" + +# Test PIM RP selection with ACLs +# +# Testing RP selection with ACLs. R1 uses multiple ACLs +# to select desired RPs (R11 to R15) +# +# Test steps: +# - setup_module() +# Create topology. Hosts are only using zebra/staticd, +# no PIM, no OSPF (using IGMPv2 for multicast) +# - test_ospf_convergence() +# Wait for OSPF convergence in each VRF. OSPF is run on +# R1 and R11 - R15. +# - test_pim_convergence() +# Wait for PIM convergence on all routers. PIM is run on +# R1 and R11 - R15. +# - test_mcast_acl_1(): +# Test 1st ACL entry 239.100.0.0/28 with 239.100.0.1 which +# should use R11 as RP +# Stop multicast after verification +# - test_mcast_acl_2(): +# Test 2nd ACL entry 239.100.0.17/32 with 239.100.0.17 which +# should use R12 as RP +# Stop multicast after verification +# - test_mcast_acl_3(): +# Test 3rd ACL entry 239.100.0.32/27 with 239.100.0.32 which +# should use R13 as RP +# Stop multicast after verification +# - test_mcast_acl_4(): +# Test 4th ACL entry 239.100.0.128/25 with 239.100.0.255 which +# should use R14 as RP +# Stop multicast after verification +# - test_mcast_acl_5(): +# Test 5th ACL entry 239.100.0.96/28 with 239.100.0.97 which +# should use R14 as RP +# Stop multicast after verification +# - test_mcast_acl_6(): +# Test 6th ACL entry 239.100.0.64/28 with 239.100.0.70 which +# should use R15 as RP +# Stop multicast after verification +# - teardown_module() +# shutdown topology +# + + +TOPOLOGY = """ + +----------+ + | Host H2 | + | Source | + +----------+ + .2 | + +-----------+ | +----------+ + | | .1 | .11 | Host R11 | ++---------+ | R1 |---------+--------| PIM RP | +| Host H1 | 192.168.100.0/24 | | 192.168.101.0/24 +----------+ +| receive |------------------| uses ACLs | | +----------+ +|IGMP JOIN| .10 .1 | to pick | | .12 | Host R12 | ++---------+ | RP | +--------| PIM RP | + | | | +----------+ + +-----------+ | +----------+ + | .13 | Host R13 | + +--------| PIM RP | + | +----------+ + | +----------+ + | .14 | Host R14 | + +--------| PIM RP | + | +----------+ + | +----------+ + | .15 | Host R15 | + +--------| PIM RP | + +----------+ +""" + +import json +import functools +import os +import sys +import pytest +import re +import time +from time import sleep +import socket + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topolog import logger + +# Required to instantiate the topology builder class. +from mininet.topo import Topo + +pytestmark = [pytest.mark.pimd, pytest.mark.ospfd] + + +# +# Test global variables: +# They are used to handle communicating with external application. +# +APP_SOCK_PATH = '/tmp/topotests/apps.sock' +HELPER_APP_PATH = os.path.join(CWD, "../lib/mcast-tester.py") +app_listener = None +app_clients = {} + +def listen_to_applications(): + "Start listening socket to connect with applications." + # Remove old socket. + try: + os.unlink(APP_SOCK_PATH) + except OSError: + pass + + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0) + sock.bind(APP_SOCK_PATH) + sock.listen(10) + global app_listener + app_listener = sock + +def accept_host(host): + "Accept connection from application running in hosts." + global app_listener, app_clients + conn = app_listener.accept() + app_clients[host] = { + 'fd': conn[0], + 'address': conn[1] + } + +def close_applications(): + "Signal applications to stop and close all sockets." + global app_listener, app_clients + + if app_listener: + # Close listening socket. + app_listener.close() + + # Remove old socket. + try: + os.unlink(APP_SOCK_PATH) + except OSError: + pass + + # Close all host connections. + for host in ["h1", "h2"]: + if app_clients.get(host) is None: + continue + app_clients[host]["fd"].close() + + # Reset listener and clients data struct + app_listener = None + app_clients = {} + + +class PIMACLTopo(Topo): + "PIM ACL Test Topology" + + def build(self): + tgen = get_topogen(self) + + # Create the hosts + for hostNum in range(1,3): + tgen.add_router("h{}".format(hostNum)) + + # Create the main router + tgen.add_router("r1") + + # Create the PIM RP routers + for rtrNum in range(11, 16): + tgen.add_router("r{}".format(rtrNum)) + + # Setup Switches and connections + for swNum in range(1, 3): + tgen.add_switch("sw{}".format(swNum)) + + # Add connections H1 to R1 switch sw1 + tgen.gears["h1"].add_link(tgen.gears["sw1"]) + tgen.gears["r1"].add_link(tgen.gears["sw1"]) + + # Add connections R1 to R1x switch sw2 + tgen.gears["r1"].add_link(tgen.gears["sw2"]) + tgen.gears["h2"].add_link(tgen.gears["sw2"]) + tgen.gears["r11"].add_link(tgen.gears["sw2"]) + tgen.gears["r12"].add_link(tgen.gears["sw2"]) + tgen.gears["r13"].add_link(tgen.gears["sw2"]) + tgen.gears["r14"].add_link(tgen.gears["sw2"]) + tgen.gears["r15"].add_link(tgen.gears["sw2"]) + + +##################################################### +# +# Tests starting +# +##################################################### + +def setup_module(module): + logger.info("PIM RP ACL Topology: \n {}".format(TOPOLOGY)) + + tgen = Topogen(PIMACLTopo, module.__name__) + tgen.start_topology() + + # Starting Routers + router_list = tgen.routers() + + for rname, router in router_list.items(): + logger.info("Loading router %s" % rname) + router.load_config( + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) + ) + if rname[0] != 'h': + # Only load ospf on routers, not on end hosts + router.load_config( + TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname)) + ) + router.load_config( + TopoRouter.RD_PIM, os.path.join(CWD, "{}/pimd.conf".format(rname)) + ) + tgen.start_router() + + +def teardown_module(module): + tgen = get_topogen() + tgen.stop_topology() + close_applications() + + +def test_ospf_convergence(): + "Test for OSPFv2 convergence" + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + logger.info("Checking OSPFv2 convergence on router r1") + + router = tgen.gears["r1"] + reffile = os.path.join(CWD, "r1/ospf_neighbor.json") + expected = json.loads(open(reffile).read()) + + test_func = functools.partial( + topotest.router_json_cmp, router, "show ip ospf neighbor json", expected + ) + _, res = topotest.run_and_expect(test_func, None, count=60, wait=2) + assertmsg = "OSPF router R1 did not converge" + assert res is None, assertmsg + + +def test_pim_convergence(): + "Test for PIM convergence" + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + logger.info("Checking PIM convergence on router r1") + + router = tgen.gears["r1"] + reffile = os.path.join(CWD, "r1/pim_neighbor.json") + expected = json.loads(open(reffile).read()) + + test_func = functools.partial( + topotest.router_json_cmp, router, "show ip pim neighbor json", expected + ) + _, res = topotest.run_and_expect(test_func, None, count=60, wait=2) + assertmsg = "PIM router R1 did not converge" + assert res is None, assertmsg + + + +def check_mcast_entry(entry, mcastaddr, pimrp): + "Helper function to check RP" + tgen = get_topogen() + + logger.info("Testing PIM RP selection for ACL {} entry using {}".format(entry, mcastaddr)); + + # Start applications socket. + listen_to_applications() + + tgen.gears["h2"].run("{} --send='0.7' '{}' '{}' '{}' &".format( + HELPER_APP_PATH, APP_SOCK_PATH, mcastaddr, 'h2-eth0')) + accept_host("h2") + + tgen.gears["h1"].run("{} '{}' '{}' '{}' &".format( + HELPER_APP_PATH, APP_SOCK_PATH, mcastaddr, 'h1-eth0')) + accept_host("h1") + + logger.info("mcast join and source for {} started".format(mcastaddr)) + + # tgen.mininet_cli() + + router = tgen.gears["r1"] + reffile = os.path.join(CWD, "r1/acl_{}_pim_join.json".format(entry)) + expected = json.loads(open(reffile).read()) + + logger.info("verifying pim join on r1 for {}".format(mcastaddr)) + test_func = functools.partial( + topotest.router_json_cmp, router, "show ip pim join json", expected + ) + _, res = topotest.run_and_expect(test_func, None, count=60, wait=2) + assertmsg = "PIM router r1 did not show join status" + assert res is None, assertmsg + + logger.info("verifying pim join on PIM RP {} for {}".format(pimrp, mcastaddr)) + router = tgen.gears[pimrp] + reffile = os.path.join(CWD, "{}/acl_{}_pim_join.json".format(pimrp, entry)) + expected = json.loads(open(reffile).read()) + + test_func = functools.partial( + topotest.router_json_cmp, router, "show ip pim join json", expected + ) + _, res = topotest.run_and_expect(test_func, None, count=60, wait=2) + assertmsg = "PIM router {} did not get selected as the PIM RP".format(pimrp) + assert res is None, assertmsg + + close_applications() + return + + +def test_mcast_acl_1(): + "Test 1st ACL entry 239.100.0.0/28 with 239.100.0.1" + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + check_mcast_entry(1, '239.100.0.1', 'r11') + + +def test_mcast_acl_2(): + "Test 2nd ACL entry 239.100.0.17/32 with 239.100.0.17" + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + check_mcast_entry(2, '239.100.0.17', 'r12') + + +def test_mcast_acl_3(): + "Test 3rd ACL entry 239.100.0.32/27 with 239.100.0.32" + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + check_mcast_entry(3, '239.100.0.32', 'r13') + + +def test_mcast_acl_4(): + "Test 4th ACL entry 239.100.0.128/25 with 239.100.0.255" + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + check_mcast_entry(4, '239.100.0.255', 'r14') + + +def test_mcast_acl_5(): + "Test 5th ACL entry 239.100.0.96/28 with 239.100.0.97" + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + check_mcast_entry(5, '239.100.0.97', 'r14') + + +def test_mcast_acl_6(): + "Test 6th ACL entry 239.100.0.64/28 with 239.100.0.70" + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + check_mcast_entry(6, '239.100.0.70', 'r15') + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/pim_basic/r1/pimd.conf b/tests/topotests/pim_basic/r1/pimd.conf index f64a46deb3..737019fa51 100644 --- a/tests/topotests/pim_basic/r1/pimd.conf +++ b/tests/topotests/pim_basic/r1/pimd.conf @@ -15,3 +15,4 @@ interface lo ip pim ! ip pim rp 10.254.0.3 +ip pim join-prune-interval 5 diff --git a/tests/topotests/pim_basic/rp/pimd.conf b/tests/topotests/pim_basic/rp/pimd.conf index 6e35c97971..fd26bc4d71 100644 --- a/tests/topotests/pim_basic/rp/pimd.conf +++ b/tests/topotests/pim_basic/rp/pimd.conf @@ -6,6 +6,7 @@ interface rp-eth0 interface lo ip pim ! +ip pim join-prune-interval 5 ip pim rp 10.254.0.3 ip pim register-accept-list ACCEPT diff --git a/tests/topotests/pim_basic_topo2/r2/pimd.conf b/tests/topotests/pim_basic_topo2/r2/pimd.conf index 0b32ded19a..9f389deb11 100644 --- a/tests/topotests/pim_basic_topo2/r2/pimd.conf +++ b/tests/topotests/pim_basic_topo2/r2/pimd.conf @@ -10,3 +10,4 @@ interface r2-eth2 ip pim ip pim bfd ! +ip pim join-prune-interval 5 diff --git a/tests/topotests/pim_igmp_vrf/h1/zebra.conf b/tests/topotests/pim_igmp_vrf/h1/zebra.conf new file mode 100644 index 0000000000..3d6540d40c --- /dev/null +++ b/tests/topotests/pim_igmp_vrf/h1/zebra.conf @@ -0,0 +1,10 @@ +! +hostname h1 +log file zebra.log +! +interface h1-eth0 + description connection to r1 via sw1 + ip address 192.168.100.10/24 +! +ip route 0.0.0.0/0 192.168.100.1 +! diff --git a/tests/topotests/pim_igmp_vrf/h2/zebra.conf b/tests/topotests/pim_igmp_vrf/h2/zebra.conf new file mode 100644 index 0000000000..95342f9e8a --- /dev/null +++ b/tests/topotests/pim_igmp_vrf/h2/zebra.conf @@ -0,0 +1,8 @@ +hostname h2 +! +interface h2-eth0 + description connection to r1 via sw2 + ip address 192.168.101.2/24 +! +ip route 0.0.0.0/0 192.168.101.1 +! diff --git a/tests/topotests/pim_igmp_vrf/h3/zebra.conf b/tests/topotests/pim_igmp_vrf/h3/zebra.conf new file mode 100644 index 0000000000..ef99b1cd8f --- /dev/null +++ b/tests/topotests/pim_igmp_vrf/h3/zebra.conf @@ -0,0 +1,10 @@ +! +hostname h3 +log file zebra.log +! +interface h3-eth0 + description connection to r1 via sw3 + ip address 192.168.100.20/24 +! +ip route 0.0.0.0/0 192.168.100.1 +! diff --git a/tests/topotests/pim_igmp_vrf/h4/zebra.conf b/tests/topotests/pim_igmp_vrf/h4/zebra.conf new file mode 100644 index 0000000000..6a2e466000 --- /dev/null +++ b/tests/topotests/pim_igmp_vrf/h4/zebra.conf @@ -0,0 +1,8 @@ +hostname h4 +! +interface h4-eth0 + description connection to r1 via sw4 + ip address 192.168.101.4/24 +! +ip route 0.0.0.0/0 192.168.101.1 +! diff --git a/tests/topotests/pim_igmp_vrf/r1/ospf_blue_neighbor.json b/tests/topotests/pim_igmp_vrf/r1/ospf_blue_neighbor.json new file mode 100644 index 0000000000..604d25fac1 --- /dev/null +++ b/tests/topotests/pim_igmp_vrf/r1/ospf_blue_neighbor.json @@ -0,0 +1,15 @@ +{ + "blue":{ + "vrfName":"blue", + "neighbors":{ + "192.168.0.11":[ + { + "priority":10, + "state":"Full\/Backup", + "address":"192.168.101.11", + "ifaceName":"r1-eth1:192.168.101.1" + } + ] + } + } +} diff --git a/tests/topotests/pim_igmp_vrf/r1/ospf_red_neighbor.json b/tests/topotests/pim_igmp_vrf/r1/ospf_red_neighbor.json new file mode 100644 index 0000000000..456bb87520 --- /dev/null +++ b/tests/topotests/pim_igmp_vrf/r1/ospf_red_neighbor.json @@ -0,0 +1,16 @@ +{ + "red":{ + "vrfName":"red", + "neighbors":{ + "192.168.0.12":[ + { + "priority":10, + "state":"Full\/Backup", + "address":"192.168.101.12", + "ifaceName":"r1-eth3:192.168.101.1" + } + ] + } + } +} + diff --git a/tests/topotests/pim_igmp_vrf/r1/ospfd.conf b/tests/topotests/pim_igmp_vrf/r1/ospfd.conf new file mode 100644 index 0000000000..263b5867cc --- /dev/null +++ b/tests/topotests/pim_igmp_vrf/r1/ospfd.conf @@ -0,0 +1,26 @@ +hostname r1 +! +debug ospf event +! +! +interface r1-eth1 + ip ospf hello-interval 2 + ip ospf dead-interval 10 + ip ospf priority 20 +! +interface r1-eth3 + ip ospf hello-interval 2 + ip ospf dead-interval 10 + ip ospf priority 20 +! +router ospf vrf blue + ospf router-id 192.168.0.1 + network 192.168.0.1/32 area 0 + network 192.168.100.0/24 area 0 + network 192.168.101.0/24 area 0 +router ospf vrf red + ospf router-id 192.168.0.1 + network 192.168.0.1/32 area 0 + network 192.168.100.0/24 area 0 + network 192.168.101.0/24 area 0 +! diff --git a/tests/topotests/pim_igmp_vrf/r1/pim_blue_join.json b/tests/topotests/pim_igmp_vrf/r1/pim_blue_join.json new file mode 100644 index 0000000000..8568bae2bc --- /dev/null +++ b/tests/topotests/pim_igmp_vrf/r1/pim_blue_join.json @@ -0,0 +1,22 @@ +{ + "r1-eth0":{ + "name":"r1-eth0", + "state":"up", + "address":"192.168.100.1", + "flagMulticast":true, + "flagBroadcast":true, + "lanDelayEnabled":true, + "239.100.0.1":{ + "*":{ + "source":"*", + "group":"239.100.0.1", + "upTime":"--:--:--", + "expire":"--:--", + "prune":"--:--", + "channelJoinName":"NOINFO", + "protocolIgmp":1 + } + } + } +} + diff --git a/tests/topotests/pim_igmp_vrf/r1/pim_blue_neighbor.json b/tests/topotests/pim_igmp_vrf/r1/pim_blue_neighbor.json new file mode 100644 index 0000000000..ea7d4aca6f --- /dev/null +++ b/tests/topotests/pim_igmp_vrf/r1/pim_blue_neighbor.json @@ -0,0 +1,13 @@ +{ + "blue":{ + }, + "r1-eth0":{ + }, + "r1-eth1":{ + "192.168.101.11":{ + "interface":"r1-eth1", + "neighbor":"192.168.101.11", + "drPriority":1 + } + } +} diff --git a/tests/topotests/pim_igmp_vrf/r1/pim_blue_pimreg11.json b/tests/topotests/pim_igmp_vrf/r1/pim_blue_pimreg11.json new file mode 100644 index 0000000000..d3642f854a --- /dev/null +++ b/tests/topotests/pim_igmp_vrf/r1/pim_blue_pimreg11.json @@ -0,0 +1,14 @@ +{ + "pimreg11":{ + "name":"pimreg11", + "state":"up", + "address":"0.0.0.0", + "flagAllMulticast":true, + "lanDelayEnabled":true, + "drAddress":"*", + "drPriority":1, + "drUptime":"--:--:--", + "drElections":0, + "drChanges":0 + } +} diff --git a/tests/topotests/pim_igmp_vrf/r1/pim_red_join.json b/tests/topotests/pim_igmp_vrf/r1/pim_red_join.json new file mode 100644 index 0000000000..d0037ca4b0 --- /dev/null +++ b/tests/topotests/pim_igmp_vrf/r1/pim_red_join.json @@ -0,0 +1,21 @@ +{ + "r1-eth2":{ + "name":"r1-eth2", + "state":"up", + "address":"192.168.100.1", + "flagMulticast":true, + "flagBroadcast":true, + "lanDelayEnabled":true, + "239.100.0.1":{ + "*":{ + "source":"*", + "group":"239.100.0.1", + "upTime":"--:--:--", + "expire":"--:--", + "prune":"--:--", + "channelJoinName":"NOINFO", + "protocolIgmp":1 + } + } + } +} diff --git a/tests/topotests/pim_igmp_vrf/r1/pim_red_neighbor.json b/tests/topotests/pim_igmp_vrf/r1/pim_red_neighbor.json new file mode 100644 index 0000000000..e17b40854a --- /dev/null +++ b/tests/topotests/pim_igmp_vrf/r1/pim_red_neighbor.json @@ -0,0 +1,13 @@ +{ + "r1-eth2":{ + }, + "r1-eth3":{ + "192.168.101.12":{ + "interface":"r1-eth3", + "neighbor":"192.168.101.12", + "drPriority":1 + } + }, + "red":{ + } +} diff --git a/tests/topotests/pim_igmp_vrf/r1/pim_red_pimreg12.json b/tests/topotests/pim_igmp_vrf/r1/pim_red_pimreg12.json new file mode 100644 index 0000000000..45b6cd9645 --- /dev/null +++ b/tests/topotests/pim_igmp_vrf/r1/pim_red_pimreg12.json @@ -0,0 +1,14 @@ +{ + "pimreg12":{ + "name":"pimreg12", + "state":"up", + "address":"0.0.0.0", + "flagAllMulticast":true, + "lanDelayEnabled":true, + "drAddress":"*", + "drPriority":1, + "drUptime":"--:--:--", + "drElections":0, + "drChanges":0 + } +} diff --git a/tests/topotests/pim_igmp_vrf/r1/pimd.conf b/tests/topotests/pim_igmp_vrf/r1/pimd.conf new file mode 100644 index 0000000000..f04c255de9 --- /dev/null +++ b/tests/topotests/pim_igmp_vrf/r1/pimd.conf @@ -0,0 +1,27 @@ +hostname r1 +! +debug igmp events +debug igmp packets +debug pim events +debug pim packets +debug pim trace +debug pim zebra +debug pim bsm +! +interface r1-eth0 + ip igmp + ip igmp version 2 + ip pim +! +interface r1-eth1 + ip pim +! +interface r1-eth2 + ip igmp + ip igmp version 2 + ip pim +! +interface r1-eth3 + ip pim +! +ip pim join-prune-interval 5 diff --git a/tests/topotests/pim_igmp_vrf/r1/zebra.conf b/tests/topotests/pim_igmp_vrf/r1/zebra.conf new file mode 100644 index 0000000000..9da9280945 --- /dev/null +++ b/tests/topotests/pim_igmp_vrf/r1/zebra.conf @@ -0,0 +1,30 @@ +! +hostname r1 +log file zebra.log +! +ip forwarding +ipv6 forwarding +! +interface blue vrf blue + ip address 192.168.0.1/32 +! +interface red vrf red + ip address 192.168.0.1/32 +! +interface r1-eth0 vrf blue + description connection to h1 via sw1 + ip address 192.168.100.1/24 +! +interface r1-eth1 vrf blue + description connection to r11 via sw2 + ip address 192.168.101.1/24 +! +interface r1-eth2 vrf red + description connection to h1 via sw3 + ip address 192.168.100.1/24 +! +interface r1-eth3 vrf red + description connection to r12 via sw4 + ip address 192.168.101.1/24 +! + diff --git a/tests/topotests/pim_igmp_vrf/r11/ospfd.conf b/tests/topotests/pim_igmp_vrf/r11/ospfd.conf new file mode 100644 index 0000000000..e107220a4e --- /dev/null +++ b/tests/topotests/pim_igmp_vrf/r11/ospfd.conf @@ -0,0 +1,14 @@ +hostname r11 +! +debug ospf event +! +interface r11-eth0 + ip ospf hello-interval 2 + ip ospf dead-interval 10 + ip ospf priority 10 +! +router ospf + ospf router-id 192.168.0.11 + network 192.168.0.11/32 area 0 + network 192.168.101.0/24 area 0 +! diff --git a/tests/topotests/pim_igmp_vrf/r11/pim_blue_join.json b/tests/topotests/pim_igmp_vrf/r11/pim_blue_join.json new file mode 100644 index 0000000000..289bf51e76 --- /dev/null +++ b/tests/topotests/pim_igmp_vrf/r11/pim_blue_join.json @@ -0,0 +1,19 @@ +{ + "r11-eth0":{ + "name":"r11-eth0", + "state":"up", + "address":"192.168.101.11", + "flagMulticast":true, + "flagBroadcast":true, + "lanDelayEnabled":true, + "239.100.0.1":{ + "*":{ + "source":"*", + "group":"239.100.0.1", + "prune":"--:--", + "channelJoinName":"JOIN", + "protocolPim":1 + } + } + } +} diff --git a/tests/topotests/pim_igmp_vrf/r11/pimd.conf b/tests/topotests/pim_igmp_vrf/r11/pimd.conf new file mode 100644 index 0000000000..b1d45205da --- /dev/null +++ b/tests/topotests/pim_igmp_vrf/r11/pimd.conf @@ -0,0 +1,17 @@ +hostname r11 +! +debug pim events +debug pim packets +debug pim trace +debug pim zebra +debug pim bsm +! +ip pim rp 192.168.0.11 239.100.0.0/28 +ip pim join-prune-interval 5 +! +interface lo + ip pim +! +interface r11-eth0 + ip pim +! diff --git a/tests/topotests/pim_igmp_vrf/r11/zebra.conf b/tests/topotests/pim_igmp_vrf/r11/zebra.conf new file mode 100644 index 0000000000..137706d245 --- /dev/null +++ b/tests/topotests/pim_igmp_vrf/r11/zebra.conf @@ -0,0 +1,13 @@ +! +hostname r11 +log file zebra.log +! +interface lo + ip address 192.168.0.11/32 +! +interface r11-eth0 + description connection to r1 via sw1 + ip address 192.168.101.11/24 +! +ip route 0.0.0.0/0 192.168.101.1 +! diff --git a/tests/topotests/pim_igmp_vrf/r12/ospfd.conf b/tests/topotests/pim_igmp_vrf/r12/ospfd.conf new file mode 100644 index 0000000000..03acc82c1d --- /dev/null +++ b/tests/topotests/pim_igmp_vrf/r12/ospfd.conf @@ -0,0 +1,14 @@ +hostname r12 +! +debug ospf event +! +interface r12-eth0 + ip ospf hello-interval 2 + ip ospf dead-interval 10 + ip ospf priority 10 +! +router ospf + ospf router-id 192.168.0.12 + network 192.168.0.12/32 area 0 + network 192.168.101.0/24 area 0 +! diff --git a/tests/topotests/pim_igmp_vrf/r12/pim_red_join.json b/tests/topotests/pim_igmp_vrf/r12/pim_red_join.json new file mode 100644 index 0000000000..6926246568 --- /dev/null +++ b/tests/topotests/pim_igmp_vrf/r12/pim_red_join.json @@ -0,0 +1,19 @@ +{ + "r12-eth0":{ + "name":"r12-eth0", + "state":"up", + "address":"192.168.101.12", + "flagMulticast":true, + "flagBroadcast":true, + "lanDelayEnabled":true, + "239.100.0.1":{ + "*":{ + "source":"*", + "group":"239.100.0.1", + "prune":"--:--", + "channelJoinName":"JOIN", + "protocolPim":1 + } + } + } +} diff --git a/tests/topotests/pim_igmp_vrf/r12/pimd.conf b/tests/topotests/pim_igmp_vrf/r12/pimd.conf new file mode 100644 index 0000000000..5cb76efa22 --- /dev/null +++ b/tests/topotests/pim_igmp_vrf/r12/pimd.conf @@ -0,0 +1,17 @@ +hostname r12 +! +debug pim events +debug pim packets +debug pim trace +debug pim zebra +debug pim bsm +! +ip pim rp 192.168.0.12 239.100.0.0/28 +ip pim join-prune-interval 5 +! +interface lo + ip pim +! +interface r12-eth0 + ip pim +! diff --git a/tests/topotests/pim_igmp_vrf/r12/zebra.conf b/tests/topotests/pim_igmp_vrf/r12/zebra.conf new file mode 100644 index 0000000000..bede104906 --- /dev/null +++ b/tests/topotests/pim_igmp_vrf/r12/zebra.conf @@ -0,0 +1,13 @@ +! +hostname r12 +log file zebra.log +! +interface lo + ip address 192.168.0.12/32 +! +interface r12-eth0 + description connection to r1 via sw1 + ip address 192.168.101.12/24 +! +ip route 0.0.0.0/0 192.168.101.1 +! diff --git a/tests/topotests/pim_igmp_vrf/test_pim_vrf.py b/tests/topotests/pim_igmp_vrf/test_pim_vrf.py new file mode 100755 index 0000000000..cb207cb810 --- /dev/null +++ b/tests/topotests/pim_igmp_vrf/test_pim_vrf.py @@ -0,0 +1,462 @@ +#!/usr/bin/env python + +# +# test_pim_vrf.py +# Part of NetDEF Topology Tests +# +# Copyright (c) 2020 by +# Network Device Education Foundation, Inc. ("NetDEF") +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +test_pim_vrf.py: Test PIM with VRFs. +""" + +# Tests PIM with VRF +# +# R1 is split into 2 VRF: Blue and Red, the others are normal +# routers and Hosts +# There are 2 similar topologies with overlapping IPs in each +# section. +# +# Test steps: +# - setup_module() +# Create topology. Hosts are only using zebra/staticd, +# no PIM, no OSPF (using IGMPv2 for multicast) +# - test_ospf_convergence() +# Wait for OSPF convergence in each VRF. OSPF is run on +# R1, R11 and R12. +# - test_pim_convergence() +# Wait for PIM convergence in each VRF. PIM is run on +# R1, R11 and R12. R11 is the RP for vrf blue, R12 is RP +# for vrf red. +# - test_vrf_pimreg_interfaces() +# Adding PIM RP in VRF information and verify pimreg +# interfaces in VRF blue and red +# - test_mcast_vrf_blue() +# Start multicast stream for group 239.100.0.1 from Host +# H2 and join from Host H1 on vrf blue +# Verify PIM JOIN status on R1 and R11 +# Stop multicast after verification +# - test_mcast_vrf_red() +# Start multicast stream for group 239.100.0.1 from Host +# H4 and join from Host H3 on vrf blue +# Verify PIM JOIN status on R1 and R12 +# Stop multicast after verification +# - teardown_module(module) +# shutdown topology +# + +TOPOLOGY = """ + +----------+ + | Host H2 | + | Source | + +----------+ + .2 | ++---------+ +------------+ | +---------+ +| Host H1 | 192.168.100.0/24 | | .1 | .11 | Host H2 | +| receive |------------------| VRF Blue |---------+--------| PIM RP | +|IGMP JOIN| .10 .1 | | 192.168.101.0/24 | | ++---------+ | | +---------+ + =| = = R1 = = |= ++---------+ | | +---------+ +| Host H3 | 192.168.100.0/24 | | 192.168.101.0/24 | Host H4 | +| receive |------------------| VRF Red |---------+--------| PIM RP | +|IGMP JOIN| .20 .1 | | .1 | .12 | | ++---------+ +------------+ | +---------+ + .4 | + +----------+ + | Host H4 | + | Source | + +----------+ +""" + +import json +import functools +import os +import sys +import pytest +import re +import time +from time import sleep +import socket + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topolog import logger +from lib.topotest import iproute2_is_vrf_capable +from lib.common_config import ( + required_linux_kernel_version) + +# Required to instantiate the topology builder class. +from mininet.topo import Topo + +pytestmark = [pytest.mark.ospfd, pytest.mark.pimd] + + +# +# Test global variables: +# They are used to handle communicating with external application. +# +APP_SOCK_PATH = '/tmp/topotests/apps.sock' +HELPER_APP_PATH = os.path.join(CWD, "../lib/mcast-tester.py") +app_listener = None +app_clients = {} + +def listen_to_applications(): + "Start listening socket to connect with applications." + # Remove old socket. + try: + os.unlink(APP_SOCK_PATH) + except OSError: + pass + + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0) + sock.bind(APP_SOCK_PATH) + sock.listen(10) + global app_listener + app_listener = sock + +def accept_host(host): + "Accept connection from application running in hosts." + global app_listener, app_clients + conn = app_listener.accept() + app_clients[host] = { + 'fd': conn[0], + 'address': conn[1] + } + +def close_applications(): + "Signal applications to stop and close all sockets." + global app_listener, app_clients + + if app_listener: + # Close listening socket. + app_listener.close() + + # Remove old socket. + try: + os.unlink(APP_SOCK_PATH) + except OSError: + pass + + # Close all host connections. + for host in ["h1", "h2"]: + if app_clients.get(host) is None: + continue + app_clients[host]["fd"].close() + + # Reset listener and clients data struct + app_listener = None + app_clients = {} + + +class PIMVRFTopo(Topo): + "PIM VRF Test Topology" + + def build(self): + tgen = get_topogen(self) + + # Create the hosts + for hostNum in range(1,5): + tgen.add_router("h{}".format(hostNum)) + + # Create the main router + tgen.add_router("r1") + + # Create the PIM RP routers + for rtrNum in range(11, 13): + tgen.add_router("r{}".format(rtrNum)) + + # Setup Switches and connections + for swNum in range(1, 5): + tgen.add_switch("sw{}".format(swNum)) + + ################ + # 1st set of connections to routers for VRF red + ################ + + # Add connections H1 to R1 switch sw1 + tgen.gears["h1"].add_link(tgen.gears["sw1"]) + tgen.gears["r1"].add_link(tgen.gears["sw1"]) + + # Add connections R1 to R1x switch sw2 + tgen.gears["r1"].add_link(tgen.gears["sw2"]) + tgen.gears["h2"].add_link(tgen.gears["sw2"]) + tgen.gears["r11"].add_link(tgen.gears["sw2"]) + + ################ + # 2nd set of connections to routers for vrf blue + ################ + + # Add connections H1 to R1 switch sw1 + tgen.gears["h3"].add_link(tgen.gears["sw3"]) + tgen.gears["r1"].add_link(tgen.gears["sw3"]) + + # Add connections R1 to R1x switch sw2 + tgen.gears["r1"].add_link(tgen.gears["sw4"]) + tgen.gears["h4"].add_link(tgen.gears["sw4"]) + tgen.gears["r12"].add_link(tgen.gears["sw4"]) + +##################################################### +# +# Tests starting +# +##################################################### + +def setup_module(module): + logger.info("PIM IGMP VRF Topology: \n {}".format(TOPOLOGY)) + + tgen = Topogen(PIMVRFTopo, module.__name__) + tgen.start_topology() + + vrf_setup_cmds = [ + "ip link add name blue type vrf table 11", + "ip link add name red type vrf table 12", + "ip link set dev blue up", + "ip link set dev red up", + "ip link set dev r1-eth0 vrf blue up", + "ip link set dev r1-eth1 vrf blue up", + "ip link set dev r1-eth2 vrf red up", + "ip link set dev r1-eth3 vrf red up", + ] + + # Starting Routers + router_list = tgen.routers() + + # Create VRF on r2 first and add it's interfaces + for cmd in vrf_setup_cmds: + tgen.net["r1"].cmd(cmd) + + for rname, router in router_list.items(): + logger.info("Loading router %s" % rname) + router.load_config( + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) + ) + if rname[0] != 'h': + # Only load ospf on routers, not on end hosts + router.load_config( + TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname)) + ) + router.load_config( + TopoRouter.RD_PIM, os.path.join(CWD, "{}/pimd.conf".format(rname)) + ) + tgen.start_router() + + +def teardown_module(module): + tgen = get_topogen() + tgen.stop_topology() + close_applications() + + +def test_ospf_convergence(): + "Test for OSPFv2 convergence" + tgen = get_topogen() + + # Required linux kernel version for this suite to run. + result = required_linux_kernel_version("4.15") + if result is not True: + pytest.skip("Kernel requirements are not met") + + # iproute2 needs to support VRFs for this suite to run. + if not iproute2_is_vrf_capable(): + pytest.skip("Installed iproute2 version does not support VRFs") + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + logger.info("Checking OSPFv2 convergence on router r1 for VRF blue") + + router = tgen.gears["r1"] + reffile = os.path.join(CWD, "r1/ospf_blue_neighbor.json") + expected = json.loads(open(reffile).read()) + + test_func = functools.partial( + topotest.router_json_cmp, router, "show ip ospf vrf blue neighbor json", expected + ) + _, res = topotest.run_and_expect(test_func, None, count=60, wait=2) + assertmsg = "OSPF router R1 did not converge on VRF blue" + assert res is None, assertmsg + + logger.info("Checking OSPFv2 convergence on router r1 for VRF red") + + router = tgen.gears["r1"] + reffile = os.path.join(CWD, "r1/ospf_red_neighbor.json") + expected = json.loads(open(reffile).read()) + + test_func = functools.partial( + topotest.router_json_cmp, router, "show ip ospf vrf red neighbor json", expected + ) + _, res = topotest.run_and_expect(test_func, None, count=60, wait=2) + assertmsg = "OSPF router R1 did not converge on VRF red" + assert res is None, assertmsg + + +def test_pim_convergence(): + "Test for PIM convergence" + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + logger.info("Checking PIM convergence on router r1 for VRF red") + + router = tgen.gears["r1"] + reffile = os.path.join(CWD, "r1/pim_red_neighbor.json") + expected = json.loads(open(reffile).read()) + + test_func = functools.partial( + topotest.router_json_cmp, router, "show ip pim vrf red neighbor json", expected + ) + _, res = topotest.run_and_expect(test_func, None, count=30, wait=2) + assertmsg = "PIM router R1 did not converge for VRF red" + assert res is None, assertmsg + + logger.info("Checking PIM convergence on router r1 for VRF blue") + + router = tgen.gears["r1"] + reffile = os.path.join(CWD, "r1/pim_blue_neighbor.json") + expected = json.loads(open(reffile).read()) + + test_func = functools.partial( + topotest.router_json_cmp, router, "show ip pim vrf blue neighbor json", expected + ) + _, res = topotest.run_and_expect(test_func, None, count=30, wait=2) + assertmsg = "PIM router R1 did not converge for VRF blue" + assert res is None, assertmsg + + +def test_vrf_pimreg_interfaces(): + "Adding PIM RP in VRF information and verify pimreg interfaces" + tgen = get_topogen() + + r1 = tgen.gears["r1"] + r1.vtysh_cmd("conf\ninterface blue\nip pim") + r1.vtysh_cmd("conf\nvrf blue\nip pim rp 192.168.0.11 239.100.0.1/32\nexit-vrf") + + # Check pimreg11 interface on R1, VRF blue + reffile = os.path.join(CWD, "r1/pim_blue_pimreg11.json") + expected = json.loads(open(reffile).read()) + test_func = functools.partial( + topotest.router_json_cmp, r1, "show ip pim vrf blue inter pimreg11 json", expected + ) + _, res = topotest.run_and_expect(test_func, None, count=5, wait=2) + assertmsg = "PIM router R1, VRF blue (table 11) pimreg11 interface missing or incorrect status" + assert res is None, assertmsg + + r1.vtysh_cmd("conf\ninterface red\nip pim") + r1.vtysh_cmd("conf\nvrf red\nip pim rp 192.168.0.12 239.100.0.1/32\nexit-vrf") + + # Check pimreg12 interface on R1, VRF red + reffile = os.path.join(CWD, "r1/pim_red_pimreg12.json") + expected = json.loads(open(reffile).read()) + test_func = functools.partial( + topotest.router_json_cmp, r1, "show ip pim vrf red inter pimreg12 json", expected + ) + _, res = topotest.run_and_expect(test_func, None, count=5, wait=2) + assertmsg = "PIM router R1, VRF red (table 12) pimreg12 interface missing or incorrect status" + assert res is None, assertmsg + + +################################## +### Test PIM / IGMP with VRF +################################## + +def check_mcast_entry(mcastaddr, pimrp, receiver, sender, vrf): + "Helper function to check RP" + tgen = get_topogen() + + logger.info("Testing PIM for VRF {} entry using {}".format(vrf, mcastaddr)); + + # Start applications socket. + listen_to_applications() + + tgen.gears[sender].run("{} --send='0.7' '{}' '{}' '{}' &".format( + HELPER_APP_PATH, APP_SOCK_PATH, mcastaddr, '{}-eth0'.format(sender))) + accept_host(sender) + + tgen.gears[receiver].run("{} '{}' '{}' '{}' &".format( + HELPER_APP_PATH, APP_SOCK_PATH, mcastaddr, '{}-eth0'.format(receiver))) + accept_host(receiver) + + logger.info("mcast join and source for {} started".format(mcastaddr)) + + # tgen.mininet_cli() + + router = tgen.gears["r1"] + reffile = os.path.join(CWD, "r1/pim_{}_join.json".format(vrf)) + expected = json.loads(open(reffile).read()) + + logger.info("verifying pim join on r1 for {} on VRF {}".format(mcastaddr, vrf)) + test_func = functools.partial( + topotest.router_json_cmp, router, "show ip pim vrf {} join json".format(vrf), + expected + ) + _, res = topotest.run_and_expect(test_func, None, count=10, wait=2) + assertmsg = "PIM router r1 did not show join status on VRF".format(vrf) + assert res is None, assertmsg + + logger.info("verifying pim join on PIM RP {} for {}".format(pimrp, mcastaddr)) + router = tgen.gears[pimrp] + reffile = os.path.join(CWD, "{}/pim_{}_join.json".format(pimrp, vrf)) + expected = json.loads(open(reffile).read()) + + test_func = functools.partial( + topotest.router_json_cmp, router, "show ip pim join json", expected + ) + _, res = topotest.run_and_expect(test_func, None, count=10, wait=2) + assertmsg = "PIM router {} did not get selected as the PIM RP for VRF {}".format(pimrp, vrf) + assert res is None, assertmsg + + close_applications() + return + + +def test_mcast_vrf_blue(): + "Test vrf blue with 239.100.0.1" + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + check_mcast_entry('239.100.0.1', 'r11', 'h1', 'h2', 'blue') + + +def test_mcast_vrf_red(): + "Test vrf red with 239.100.0.1" + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + check_mcast_entry('239.100.0.1', 'r12', 'h3', 'h4', 'red') + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/route_scale/test_route_scale.py b/tests/topotests/route_scale/test_route_scale.py index bbd6ef8d60..469ad42d64 100644 --- a/tests/topotests/route_scale/test_route_scale.py +++ b/tests/topotests/route_scale/test_route_scale.py @@ -48,6 +48,9 @@ from lib.common_config import shutdown_bringup_interface # Required to instantiate the topology builder class. from mininet.topo import Topo +pytestmark = [pytest.mark.sharpd] + + ##################################################### ## ## Network Topology Definition diff --git a/tests/topotests/simple_snmp_test/test_simple_snmp.py b/tests/topotests/simple_snmp_test/test_simple_snmp.py index 5647e2b663..bdb44816b6 100755 --- a/tests/topotests/simple_snmp_test/test_simple_snmp.py +++ b/tests/topotests/simple_snmp_test/test_simple_snmp.py @@ -90,6 +90,16 @@ def setup_module(mod): r1 = tgen.gears["r1"] + r1.run("ip addr add 192.168.12.12/24 dev r1-eth0") + r1.run("ip -6 addr add 2000:1:1:12::12/64 dev r1-eth0") + r1.run("ip addr add 192.168.13.13/24 dev r1-eth1") + r1.run("ip -6 addr add 2000:1:1:13::13/64 dev r1-eth1") + r1.run("ip addr add 192.168.14.14/24 dev r1-eth2") + r1.run("ip -6 addr add 2000:1:1:14::14/64 dev r1-eth2") + r1.run("ip addr add 1.1.1.1/32 dev lo") + r1.run("ip -6 addr add 2000:1:1:1::1/128 dev lo") + r1.run("ip addr show") + router_list = tgen.routers() # For all registred routers, load the zebra configuration file diff --git a/tests/topotests/srv6_locator/test_srv6_locator.py b/tests/topotests/srv6_locator/test_srv6_locator.py index a7416ce085..04b0d8db97 100755 --- a/tests/topotests/srv6_locator/test_srv6_locator.py +++ b/tests/topotests/srv6_locator/test_srv6_locator.py @@ -43,6 +43,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo +pytestmark = [pytest.mark.bgpd, pytest.mark.sharpd] + def open_json_file(filename): try: diff --git a/tests/topotests/static_routing_with_ebgp/test_static_routes_topo4_ebgp.py b/tests/topotests/static_routing_with_ebgp/test_static_routes_topo4_ebgp.py index 812b39797f..626de6b422 100644 --- a/tests/topotests/static_routing_with_ebgp/test_static_routes_topo4_ebgp.py +++ b/tests/topotests/static_routing_with_ebgp/test_static_routes_topo4_ebgp.py @@ -72,6 +72,9 @@ from lib.bgp import ( ) from lib.topojson import build_topo_from_json, build_config_from_json +pytestmark = [pytest.mark.bgpd, pytest.mark.staticd] + + # Reading the data from JSON File for topology creation jsonFile = "{}/static_routes_topo4_ebgp.json".format(CWD) try: diff --git a/tests/topotests/zebra_netlink/test_zebra_netlink.py b/tests/topotests/zebra_netlink/test_zebra_netlink.py index 94baf8438f..cf08ee9639 100644 --- a/tests/topotests/zebra_netlink/test_zebra_netlink.py +++ b/tests/topotests/zebra_netlink/test_zebra_netlink.py @@ -47,6 +47,9 @@ from lib.common_config import shutdown_bringup_interface # Required to instantiate the topology builder class. from mininet.topo import Topo +pytestmark = [pytest.mark.sharpd] + + ##################################################### ## ## Network Topology Definition diff --git a/tests/topotests/zebra_opaque/test_zebra_opaque.py b/tests/topotests/zebra_opaque/test_zebra_opaque.py index cc52fbd1a7..2339b0f5b0 100644 --- a/tests/topotests/zebra_opaque/test_zebra_opaque.py +++ b/tests/topotests/zebra_opaque/test_zebra_opaque.py @@ -38,6 +38,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from mininet.topo import Topo +pytestmark = [pytest.mark.bgpd] + class TemplateTopo(Topo): def build(self, *_args, **_opts): diff --git a/tests/topotests/zebra_rib/test_zebra_rib.py b/tests/topotests/zebra_rib/test_zebra_rib.py index 9fcf7b6820..56d112b7c3 100644 --- a/tests/topotests/zebra_rib/test_zebra_rib.py +++ b/tests/topotests/zebra_rib/test_zebra_rib.py @@ -46,6 +46,8 @@ from time import sleep # Required to instantiate the topology builder class. from mininet.topo import Topo +pytestmark = [pytest.mark.sharpd] + class ZebraTopo(Topo): "Test topology builder" @@ -124,7 +126,7 @@ def test_zebra_kernel_admin_distance(): "show ip route 4.5.{}.0 json".format(i), expected, ) - _, result = topotest.run_and_expect(test_func, None, count=2, wait=0.5) + _, result = topotest.run_and_expect(test_func, None, count=20, wait=0.5) assertmsg = '"r1" JSON output mismatches' assert result is None, assertmsg # tgen.mininet_cli() @@ -145,7 +147,7 @@ def test_zebra_kernel_override(): test_func = partial( topotest.router_json_cmp, r1, "show ip route 4.5.1.0 json", expected ) - _, result = topotest.run_and_expect(test_func, None, count=2, wait=0.5) + _, result = topotest.run_and_expect(test_func, None, count=20, wait=0.5) assert result is None, '"r1" JSON output mismatches' logger.info( @@ -158,7 +160,7 @@ def test_zebra_kernel_override(): test_func = partial( topotest.router_json_cmp, r1, "show ip route 4.5.1.0 json", expected ) - _, result = topotest.run_and_expect(test_func, None, count=2, wait=0.5) + _, result = topotest.run_and_expect(test_func, None, count=20, wait=0.5) assert result is None, '"r1" JSON output mismatches' diff --git a/tests/topotests/zebra_seg6_route/test_zebra_seg6_route.py b/tests/topotests/zebra_seg6_route/test_zebra_seg6_route.py index e83b2c1007..a83c6d6ec0 100755 --- a/tests/topotests/zebra_seg6_route/test_zebra_seg6_route.py +++ b/tests/topotests/zebra_seg6_route/test_zebra_seg6_route.py @@ -43,6 +43,8 @@ from lib.topolog import logger from lib.common_config import shutdown_bringup_interface from mininet.topo import Topo +pytestmark = [pytest.mark.sharpd] + def open_json_file(filename): try: diff --git a/tests/topotests/zebra_seg6local_route/test_zebra_seg6local_route.py b/tests/topotests/zebra_seg6local_route/test_zebra_seg6local_route.py index 1c9d208fef..6cdb77b94b 100755 --- a/tests/topotests/zebra_seg6local_route/test_zebra_seg6local_route.py +++ b/tests/topotests/zebra_seg6local_route/test_zebra_seg6local_route.py @@ -43,6 +43,8 @@ from lib.topolog import logger from lib.common_config import shutdown_bringup_interface from mininet.topo import Topo +pytestmark = [pytest.mark.sharpd] + def open_json_file(filename): try: |
