diff options
Diffstat (limited to 'tests')
34 files changed, 3299 insertions, 143 deletions
diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce1/zebra.conf b/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce1/zebra.conf index 46831bb711..375bbea9ff 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce1/zebra.conf +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce1/zebra.conf @@ -4,6 +4,8 @@ hostname ce1 ! interface lo ip address 99.0.0.1/32 + ip address 5.1.0.1/24 + ip address 6.0.2.1/24 ! interface ce1-eth0 description to r1 diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce2/zebra.conf b/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce2/zebra.conf index fb4d8cc9c4..90dd3c55b4 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce2/zebra.conf +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce2/zebra.conf @@ -4,6 +4,8 @@ hostname ce2 ! interface lo ip address 99.0.0.2/32 + ip address 5.1.0.1/24 + ip address 6.0.2.1/24 ! interface ce2-eth0 description to r3 diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce3/bgpd.conf b/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce3/bgpd.conf index e316de5690..cf7396eb12 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce3/bgpd.conf +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce3/bgpd.conf @@ -19,6 +19,7 @@ router bgp 5227 network 5.1.3.0/24 route-map rm-nh network 6.0.1.0/24 route-map rm-nh network 6.0.2.0/24 route-map rm-nh-same + network 6.0.3.0/24 route-map rm-nh-same neighbor 192.168.1.1 activate exit-address-family ! diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce3/zebra.conf b/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce3/zebra.conf index 77a1163a4b..df6ac47b08 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce3/zebra.conf +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce3/zebra.conf @@ -4,6 +4,7 @@ hostname ce3 ! interface lo ip address 99.0.0.3/32 + ip address 6.0.3.1/24 ! interface ce3-eth0 description to r4 diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce4/bgpd.conf b/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce4/bgpd.conf index 60d9e93108..9a6ca08a0b 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce4/bgpd.conf +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce4/bgpd.conf @@ -19,6 +19,7 @@ router bgp 5228 vrf ce4-cust2 network 5.4.3.0/24 route-map rm-nh network 6.0.1.0/24 route-map rm-nh network 6.0.2.0/24 route-map rm-nh-same + network 6.0.3.0/24 route-map rm-nh-same neighbor 192.168.2.1 activate exit-address-family ! diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce4/zebra.conf b/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce4/zebra.conf index e55c9e779a..0e3a736292 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce4/zebra.conf +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/ce4/zebra.conf @@ -4,6 +4,7 @@ hostname ce4 ! interface ce4-cust2 ip address 99.0.0.4/32 + ip address 6.0.3.1/24 ! interface ce4-eth0 description to r4 diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/customize.py b/tests/topotests/bgp_l3vpn_to_bgp_vrf/customize.py index 5161d8471f..b2bf5f5f63 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/customize.py +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/customize.py @@ -175,6 +175,20 @@ def ltemplatePreRouterStartHook(): "setup {0} vrf {0}-cust1, {0}-eth4. enabled mpls input.".format(rtr) ) # configure cust2 VRFs & MPLS + rtrs = ["r1"] + cmds = [ + "ip link add {0}-cust3 type vrf table 20", + "ip link set dev {0}-cust3 up", + "ip link add {0}-cust4 type vrf table 30", + "ip link set dev {0}-cust4 up", + "ip link add {0}-cust5 type vrf table 40", + "ip link set dev {0}-cust5 up", + ] + for rtr in rtrs: + for cmd in cmds: + cc.doCmd(tgen, rtr, cmd.format(rtr)) + logger.info("setup {0} vrf {0}-cust3 and{0}-cust4.".format(rtr)) + # configure cust2 VRFs & MPLS rtrs = ["r4"] cmds = [ "ip link add {0}-cust2 type vrf table 20", diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/r1/bgpd.conf b/tests/topotests/bgp_l3vpn_to_bgp_vrf/r1/bgpd.conf index 8d42cfc0d8..24e9f95372 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/r1/bgpd.conf +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/r1/bgpd.conf @@ -11,6 +11,7 @@ log file bgpd.log debugging #debug bgp vpn leak-from-vrf #debug bgp vpn label #debug bgp updates out +#debug bgp nht router bgp 5226 bgp router-id 1.1.1.1 @@ -39,6 +40,11 @@ router bgp 5227 vrf r1-cust1 neighbor 192.168.1.2 timers 3 10 address-family ipv4 unicast + network 10.2.3.4/32 + network 192.0.0.0/24 + + redistribute connected + neighbor 192.168.1.2 activate neighbor 192.168.1.2 next-hop-self @@ -51,5 +57,47 @@ router bgp 5227 vrf r1-cust1 exit-address-family +router bgp 5228 vrf r1-cust3 + bgp router-id 192.168.1.1 + + address-family ipv4 unicast + rd vpn export 10:13 + rt vpn import 52:100 + + import vpn + export vpn + exit-address-family + + +router bgp 5227 vrf r1-cust4 + no bgp network import-check + + bgp router-id 192.168.1.1 + + address-family ipv4 unicast + network 28.0.0.0/24 + + rd vpn export 10:14 + rt vpn export 52:100 + + import vpn + export vpn + exit-address-family + + +router bgp 5227 vrf r1-cust5 + bgp router-id 192.168.1.1 + + address-family ipv4 unicast + redistribute connected + + label vpn export 105 + rd vpn export 10:15 + rt vpn both 52:100 + + import vpn + export vpn + exit-address-family + ! end diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/r1/staticd.conf b/tests/topotests/bgp_l3vpn_to_bgp_vrf/r1/staticd.conf new file mode 100644 index 0000000000..59430fdf99 --- /dev/null +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/r1/staticd.conf @@ -0,0 +1,6 @@ +hostname r1 +log file staticd.log +! +vrf r1-cust1 + ip route 192.0.0.0/24 192.168.1.2 +exit-vrf diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/r1/zebra.conf b/tests/topotests/bgp_l3vpn_to_bgp_vrf/r1/zebra.conf index 221bc7a839..e81bc6b2ab 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/r1/zebra.conf +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/r1/zebra.conf @@ -4,6 +4,9 @@ hostname r1 password zebra #debug zebra packet +#debug zebra rib detailed +#debug zebra dplane detailed +#debug zebra nexthop detail interface lo ip address 1.1.1.1/32 @@ -18,6 +21,14 @@ interface r1-eth4 ip address 192.168.1.1/24 no link-detect +interface r1-cust1 + ip address 10.4.5.6/24 + no link-detect + +interface r1-cust5 + ip address 29.0.0.1/32 + no link-detect + ip forwarding diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_linux_mpls.py b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_linux_mpls.py index 91a7adf997..89369241a8 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_linux_mpls.py +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_linux_mpls.py @@ -81,3 +81,24 @@ if ret != False and found != None: "wait", "CE3->CE4 (loopback) ping", ) + luCommand( + "r1", + "ip vrf exec r1-cust1 ping 6.0.3.1 -I 10.4.5.6 -c 1", + " 0. packet loss", + "wait", + "R1(r1-cust1)->CE3/4 (loopback) ping", + ) + luCommand( + "r1", + "ip vrf exec r1-cust1 ping 6.0.3.1 -I 10.4.5.6 -c 1", + " 0. packet loss", + "pass", + "R1(r1-cust1)->CE3/4 (loopback) ping", + ) + luCommand( + "r1", + "ip vrf exec r1-cust5 ping 6.0.3.1 -I 29.0.0.1 -c 1", + " 0. packet loss", + "pass", + "R1(r1-cust5)->CE3/4 ( (loopback) ping", + ) diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_linux_vrf.py b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_linux_vrf.py index 75158b127e..e9647898ab 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_linux_vrf.py +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_linux_vrf.py @@ -72,3 +72,53 @@ luCommand( "wait", "CE4->PE4 ping", ) +ret = luCommand( + "r1", + "ip vrf exec r1-cust5 ping 29.0.0.1 -I 29.0.0.1 -c 1", + " 0. packet loss", + "pass", + "Ping its own IP. Check https://bugzilla.kernel.org/show_bug.cgi?id=203483 if it fails", +) +luCommand( + "r1", + "ip vrf exec r1-cust5 ping 192.168.1.1 -I 29.0.0.1 -c 1", + " 0. packet loss", + "pass", + "R1(r1-cust5)->R1(r1-cust1 - r1-eth4) ping", +) +luCommand( + "r1", + "ip vrf exec r1-cust5 ping 192.168.1.2 -I 29.0.0.1 -c 1", + " 0. packet loss", + "wait", + "R1(r1-cust5)->CE1 ping", +) +luCommand( + "r1", + "ip vrf exec r1-cust5 ping 192.168.1.2 -I 29.0.0.1 -c 1", + " 0. packet loss", + "pass", + "R1(r1-cust5)->CE1 ping", +) +luCommand( + "r1", + "ip vrf exec r1-cust5 ping 99.0.0.1 -I 29.0.0.1 -c 1", + " 0. packet loss", + "pass", + "R1(r1-cust5)->CE1 (loopback) ping", +) +luCommand( + "r1", + "ip vrf exec r1-cust5 ping 5.1.0.1 -I 29.0.0.1 -c 1", + " 0. packet loss", + "wait", + "R1(r1-cust5)->CE1 (loopback) ping", + time=30, +) +luCommand( + "r1", + "ip vrf exec r1-cust5 ping 5.1.0.1 -I 29.0.0.1 -c 1", + " 0. packet loss", + "pass", + "R1(r1-cust5)->CE1 (loopback) ping", +) diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_routes.py b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_routes.py index 1e2758c1c9..3242e3bd3a 100644 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_routes.py +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/check_routes.py @@ -54,15 +54,44 @@ bgpribRequireUnicastRoutes("ce4", "ipv4", "ce4-cust2", "Cust 4 routes in ce1", w # # r1 vtysh -c "show bgp vrf r1-cust1 ipv4" # -want_r1_cust1_routes = [ +want_r1_cust1_3_5_routes = [ {"p": "5.1.0.0/24", "n": "99.0.0.1"}, {"p": "5.1.1.0/24", "n": "99.0.0.1"}, {"p": "6.0.1.0/24", "n": "99.0.0.1"}, {"p": "6.0.2.0/24", "n": "99.0.0.1"}, + {"p": "10.2.3.4/32", "n": "0.0.0.0", "bp": False}, + {"p": "10.4.5.0/24", "n": "0.0.0.0", "bp": True}, + {"p": "28.0.0.0/24", "n": "0.0.0.0", "bp": True}, + {"p": "29.0.0.1/32", "n": "0.0.0.0", "bp": True}, {"p": "99.0.0.1/32", "n": "192.168.1.2"}, + {"p": "192.0.0.0/24", "n": "0.0.0.0", "bp": True}, + {"p": "192.168.1.0/24", "n": "0.0.0.0", "bp": True}, ] bgpribRequireUnicastRoutes( - "r1", "ipv4", "r1-cust1", "Customer 1 routes in r1 vrf", want_r1_cust1_routes + "r1", "ipv4", "r1-cust1", "Customer 1 routes in r1 vrf", want_r1_cust1_3_5_routes +) +bgpribRequireUnicastRoutes( + "r1", "ipv4", "r1-cust3", "Customer 3 routes in r1 vrf", want_r1_cust1_3_5_routes +) +bgpribRequireUnicastRoutes( + "r1", "ipv4", "r1-cust5", "Customer 5 routes in r1 vrf", want_r1_cust1_3_5_routes +) + +want_r1_cust4_routes = [ + {"p": "5.1.0.0/24", "n": "99.0.0.1", "exist": False}, + {"p": "5.1.1.0/24", "n": "99.0.0.1", "exist": False}, + {"p": "6.0.1.0/24", "n": "99.0.0.1", "exist": False}, + {"p": "6.0.2.0/24", "n": "99.0.0.1", "exist": False}, + {"p": "10.2.3.4/32", "n": "0.0.0.0", "exist": False}, + {"p": "10.4.5.0/24", "n": "0.0.0.0", "exist": False}, + {"p": "28.0.0.0/24", "n": "0.0.0.0", "bp": True}, + {"p": "29.0.0.1/32", "n": "0.0.0.0", "exist": False}, + {"p": "99.0.0.1/32", "n": "192.168.1.2", "exist": False}, + {"p": "192.0.0.0/24", "n": "0.0.0.0", "exist": False}, + {"p": "192.168.1.0/24", "n": "0.0.0.0", "exist": False}, +] +bgpribRequireUnicastRoutes( + "r1", "ipv4", "r1-cust4", "Customer 4 routes in r1 vrf", want_r1_cust4_routes ) want_r3_cust1_routes = [ @@ -70,10 +99,20 @@ want_r3_cust1_routes = [ {"p": "5.1.1.0/24", "n": "99.0.0.2"}, {"p": "6.0.1.0/24", "n": "99.0.0.2"}, {"p": "6.0.2.0/24", "n": "99.0.0.2"}, + {"p": "10.2.3.4/32", "n": "0.0.0.0", "exist": False}, + {"p": "28.0.0.0/24", "n": "1.1.1.1", "bp": True}, + {"p": "29.0.0.1/32", "n": "1.1.1.1", "bp": True}, {"p": "99.0.0.2/32", "n": "192.168.1.2"}, + {"p": "192.0.0.0/24", "n": "1.1.1.1", "bp": True}, + {"p": "192.168.1.0/24", "n": "1.1.1.1", "bp": True}, ] bgpribRequireUnicastRoutes( - "r3", "ipv4", "r3-cust1", "Customer 1 routes in r3 vrf", want_r3_cust1_routes + "r3", + "ipv4", + "r3-cust1", + "Customer 1 routes in r3 vrf", + want_r3_cust1_routes, + retry=30, ) want_r4_cust1_routes = [ @@ -81,10 +120,20 @@ want_r4_cust1_routes = [ {"p": "5.1.3.0/24", "n": "99.0.0.3"}, {"p": "6.0.1.0/24", "n": "99.0.0.3"}, {"p": "6.0.2.0/24", "n": "99.0.0.3"}, + {"p": "10.2.3.4/32", "n": "0.0.0.0", "exist": False}, + {"p": "28.0.0.0/24", "n": "1.1.1.1", "bp": True}, + {"p": "29.0.0.1/32", "n": "1.1.1.1", "bp": True}, {"p": "99.0.0.3/32", "n": "192.168.1.2"}, + {"p": "192.0.0.0/24", "n": "1.1.1.1", "bp": True}, + {"p": "192.168.1.0/24", "n": "1.1.1.1", "bp": True}, ] bgpribRequireUnicastRoutes( - "r4", "ipv4", "r4-cust1", "Customer 1 routes in r4 vrf", want_r4_cust1_routes + "r4", + "ipv4", + "r4-cust1", + "Customer 1 routes in r4 vrf", + want_r4_cust1_routes, + retry=30, ) want_r4_cust2_routes = [ @@ -92,10 +141,20 @@ want_r4_cust2_routes = [ {"p": "5.4.3.0/24", "n": "99.0.0.4"}, {"p": "6.0.1.0/24", "n": "99.0.0.4"}, {"p": "6.0.2.0/24", "n": "99.0.0.4"}, + {"p": "10.2.3.4/32", "n": "0.0.0.0", "exist": False}, + {"p": "28.0.0.0/24", "n": "1.1.1.1", "bp": True}, + {"p": "29.0.0.1/32", "n": "1.1.1.1", "bp": True}, {"p": "99.0.0.4/32", "n": "192.168.2.2"}, + {"p": "192.0.0.0/24", "n": "1.1.1.1", "bp": True}, + {"p": "192.168.1.0/24", "n": "1.1.1.1", "bp": True}, ] bgpribRequireUnicastRoutes( - "r4", "ipv4", "r4-cust2", "Customer 2 routes in r4 vrf", want_r4_cust2_routes + "r4", + "ipv4", + "r4-cust2", + "Customer 2 routes in r4 vrf", + want_r4_cust2_routes, + retry=30, ) ######################################################################## @@ -667,7 +726,7 @@ bgpribRequireUnicastRoutes( luCommand( "ce1", 'vtysh -c "show bgp ipv4 uni"', - "12 routes and 12", + "18 routes and 19", "wait", "Local and remote routes", 10, @@ -689,7 +748,7 @@ bgpribRequireUnicastRoutes( luCommand( "ce2", 'vtysh -c "show bgp ipv4 uni"', - "12 routes and 15", + "18 routes and 22", "wait", "Local and remote routes", 10, @@ -721,7 +780,7 @@ luCommand("r4", 'vtysh -c "show ip route vrf r4-cust2"') luCommand( "ce3", 'vtysh -c "show bgp ipv4 uni"', - "12 routes and 13", + "18 routes and 19", "wait", "Local and remote routes", 10, @@ -743,7 +802,7 @@ bgpribRequireUnicastRoutes( luCommand( "ce4", 'vtysh -c "show bgp vrf ce4-cust2 ipv4 uni"', - "12 routes and 14", + "18 routes and 21", "wait", "Local and remote routes", 10, diff --git a/tests/topotests/bgp_srv6l3vpn_route_leak/ce1/bgpd.conf b/tests/topotests/bgp_srv6l3vpn_route_leak/ce1/bgpd.conf new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/topotests/bgp_srv6l3vpn_route_leak/ce1/bgpd.conf diff --git a/tests/topotests/bgp_srv6l3vpn_route_leak/ce1/zebra.conf b/tests/topotests/bgp_srv6l3vpn_route_leak/ce1/zebra.conf new file mode 100644 index 0000000000..823a56d53f --- /dev/null +++ b/tests/topotests/bgp_srv6l3vpn_route_leak/ce1/zebra.conf @@ -0,0 +1,9 @@ +log file zebra.log +! +hostname ce1 +! +interface eth0 + ip address 172.16.0.1/24 +! +line vty +! diff --git a/tests/topotests/bgp_srv6l3vpn_route_leak/pe1/bgpd.conf b/tests/topotests/bgp_srv6l3vpn_route_leak/pe1/bgpd.conf new file mode 100644 index 0000000000..15779aa0d5 --- /dev/null +++ b/tests/topotests/bgp_srv6l3vpn_route_leak/pe1/bgpd.conf @@ -0,0 +1,41 @@ +frr defaults traditional +! +hostname pe1 +password zebra +! +log stdout notifications +log monitor notifications +log commands +! +router bgp 65001 + bgp router-id 192.0.2.1 + ! + segment-routing srv6 + locator default + exit + ! +! +router bgp 65001 vrf vrf10 + bgp router-id 192.0.2.1 + ! + address-family ipv4 unicast + redistribute connected + sid vpn export auto + rd vpn export 65001:10 + rt vpn both 0:10 + import vpn + export vpn + exit-address-family + ! +! +router bgp 65001 vrf vrf20 + bgp router-id 192.0.2.1 + ! + address-family ipv4 unicast + rd vpn export 65001:20 + rt vpn both 0:10 + import vpn + export vpn + exit-address-family + ! +! diff --git a/tests/topotests/bgp_srv6l3vpn_route_leak/pe1/results/default_ipv4_vpn.json b/tests/topotests/bgp_srv6l3vpn_route_leak/pe1/results/default_ipv4_vpn.json new file mode 100644 index 0000000000..dc86d7c978 --- /dev/null +++ b/tests/topotests/bgp_srv6l3vpn_route_leak/pe1/results/default_ipv4_vpn.json @@ -0,0 +1,31 @@ +{ + "vrfName": "default", + "routerId": "192.0.2.1", + "localAS": 65001, + "routes": { + "routeDistinguishers": { + "65001:10": { + "172.16.0.0/24": [ + { + "valid": true, + "bestpath": true, + "pathFrom": "external", + "prefix": "172.16.0.0", + "prefixLen": 24, + "network": "172.16.0.0\/24", + "origin": "incomplete", + "announceNexthopSelf": true, + "nhVrfName": "vrf10", + "nexthops": [ + { + "hostname": "pe1", + "afi": "ipv4", + "used": true + } + ] + } + ] + } + } + } +} diff --git a/tests/topotests/bgp_srv6l3vpn_route_leak/pe1/results/vrf10_ipv4_unicast.json b/tests/topotests/bgp_srv6l3vpn_route_leak/pe1/results/vrf10_ipv4_unicast.json new file mode 100644 index 0000000000..ce2d5c19c3 --- /dev/null +++ b/tests/topotests/bgp_srv6l3vpn_route_leak/pe1/results/vrf10_ipv4_unicast.json @@ -0,0 +1,25 @@ +{ + "vrfName": "vrf10", + "routerId": "192.0.2.1", + "localAS": 65001, + "routes": { + "172.16.0.0/24": [ + { + "valid": true, + "bestpath": true, + "pathFrom": "external", + "prefix": "172.16.0.0", + "prefixLen": 24, + "network": "172.16.0.0\/24", + "origin": "incomplete", + "nexthops": [ + { + "hostname": "pe1", + "afi": "ipv4", + "used": true + } + ] + } + ] + } +} diff --git a/tests/topotests/bgp_srv6l3vpn_route_leak/pe1/results/vrf20_ipv4.json b/tests/topotests/bgp_srv6l3vpn_route_leak/pe1/results/vrf20_ipv4.json new file mode 100644 index 0000000000..2ce936b291 --- /dev/null +++ b/tests/topotests/bgp_srv6l3vpn_route_leak/pe1/results/vrf20_ipv4.json @@ -0,0 +1,22 @@ +{ + "172.16.0.0\/24": [ + { + "prefix": "172.16.0.0\/24", + "prefixLen": 24, + "protocol": "bgp", + "vrfName": "vrf20", + "selected": true, + "destSelected": true, + "installed": true, + "nexthops": [ + { + "fib": true, + "directlyConnected": true, + "interfaceName": "vrf10", + "vrf": "vrf10", + "active": true + } + ] + } + ] +} diff --git a/tests/topotests/bgp_srv6l3vpn_route_leak/pe1/results/vrf20_ipv4_unicast.json b/tests/topotests/bgp_srv6l3vpn_route_leak/pe1/results/vrf20_ipv4_unicast.json new file mode 100644 index 0000000000..6a88d39a8c --- /dev/null +++ b/tests/topotests/bgp_srv6l3vpn_route_leak/pe1/results/vrf20_ipv4_unicast.json @@ -0,0 +1,27 @@ +{ + "vrfName": "vrf20", + "routerId": "192.0.2.1", + "localAS": 65001, + "routes": { + "172.16.0.0/24": [ + { + "valid": true, + "bestpath": true, + "pathFrom": "external", + "prefix": "172.16.0.0", + "prefixLen": 24, + "network": "172.16.0.0\/24", + "origin": "incomplete", + "announceNexthopSelf": true, + "nhVrfName": "vrf10", + "nexthops": [ + { + "hostname": "pe1", + "afi": "ipv4", + "used": true + } + ] + } + ] + } +} diff --git a/tests/topotests/bgp_srv6l3vpn_route_leak/pe1/zebra.conf b/tests/topotests/bgp_srv6l3vpn_route_leak/pe1/zebra.conf new file mode 100644 index 0000000000..52341fc4fc --- /dev/null +++ b/tests/topotests/bgp_srv6l3vpn_route_leak/pe1/zebra.conf @@ -0,0 +1,27 @@ +log file zebra.log +! +hostname pe1 +! +interface lo + ip address 10.0.0.1/32 +! +interface eth0 vrf vrf10 + ip address 172.16.0.254/24 +! +line vty +! +segment-routing + srv6 + locators + locator default + prefix 2001:db8:2::/64 block-len 40 node-len 24 func-bits 16 + exit + ! + exit + ! + exit + ! +exit +! +end +! diff --git a/tests/topotests/bgp_srv6l3vpn_route_leak/test_bgp_srv6l3vpn_route_leak.py b/tests/topotests/bgp_srv6l3vpn_route_leak/test_bgp_srv6l3vpn_route_leak.py new file mode 100755 index 0000000000..16f8adb3c5 --- /dev/null +++ b/tests/topotests/bgp_srv6l3vpn_route_leak/test_bgp_srv6l3vpn_route_leak.py @@ -0,0 +1,128 @@ +#!/usr/bin/env python + +# Copyright (c) 2022, LINE Corporation +# Authored by Ryoga Saito <ryoga.saito@linecorp.com> +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +import os +import re +import sys +import json +import functools +import pytest + +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topolog import logger +from lib.common_config import required_linux_kernel_version + +pytestmark = [pytest.mark.bgpd] + + +def build_topo(tgen): + tgen.add_router("pe1") + tgen.add_router("ce1") + + tgen.add_link(tgen.gears["pe1"], tgen.gears["ce1"], "eth0", "eth0") + + +def setup_module(mod): + tgen = Topogen(build_topo, mod.__name__) + tgen.start_topology() + + for rname, router in tgen.routers().items(): + router.load_config(TopoRouter.RD_ZEBRA, + os.path.join(CWD, '{}/zebra.conf'.format(rname))) + router.load_config(TopoRouter.RD_BGP, + os.path.join(CWD, '{}/bgpd.conf'.format(rname))) + + tgen.gears["pe1"].run("ip link add vrf10 type vrf table 10") + tgen.gears["pe1"].run("ip link set vrf10 up") + tgen.gears["pe1"].run("ip link add vrf20 type vrf table 20") + tgen.gears["pe1"].run("ip link set vrf20 up") + tgen.gears["pe1"].run("ip link set eth0 master vrf10") + + tgen.start_router() + + +def teardown_module(mod): + tgen = get_topogen() + tgen.stop_topology() + + +def open_json_file(path): + try: + with open(path, "r") as f: + return json.load(f) + except IOError: + assert False, "Could not read file {}".format(path) + + +def check(name, command, checker): + tgen = get_topogen() + router = tgen.gears[name] + + def _check(): + try: + return checker(router.vtysh_cmd(command)) + except: + return False + + logger.info('[+] check {} "{}"'.format(name, command)) + _, result = topotest.run_and_expect(_check, None, count=10, wait=0.5) + assert result is None, "Failed" + + +def check_vrf10_bgp_rib(output): + expected = open_json_file("%s/pe1/results/vrf10_ipv4_unicast.json" % CWD) + actual = json.loads(output) + return topotest.json_cmp(actual, expected) + + +def check_default_bgp_vpn_rib(output): + expected = open_json_file("%s/pe1/results/default_ipv4_vpn.json" % CWD) + actual = json.loads(output) + return topotest.json_cmp(actual, expected) + + +def check_vrf20_bgp_rib(output): + expected = open_json_file("%s/pe1/results/vrf20_ipv4_unicast.json" % CWD) + actual = json.loads(output) + return topotest.json_cmp(actual, expected) + + +def check_vrf20_rib(output): + expected = open_json_file("%s/pe1/results/vrf20_ipv4.json" % CWD) + actual = json.loads(output) + return topotest.json_cmp(actual, expected) + + +def test_rib(): + check("pe1", "show bgp vrf vrf10 ipv4 unicast json", check_vrf10_bgp_rib) + check("pe1", "show bgp ipv4 vpn json", check_default_bgp_vpn_rib) + check("pe1", "show bgp vrf vrf20 ipv4 unicast json", check_vrf20_bgp_rib) + check("pe1", "show ip route vrf vrf20 json", check_vrf20_rib) + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_unique_rid/bgp_unique_rid.json b/tests/topotests/bgp_unique_rid/bgp_unique_rid.json new file mode 100644 index 0000000000..c42ce29954 --- /dev/null +++ b/tests/topotests/bgp_unique_rid/bgp_unique_rid.json @@ -0,0 +1,505 @@ +{ + "address_types": [ + "ipv4", + "ipv6" + ], + "ipv4base": "10.0.0.0", + "ipv4mask": 30, + "ipv6base": "fd00::", + "ipv6mask": 64, + "link_ip_start": { + "ipv4": "10.0.0.0", + "v4mask": 30, + "ipv6": "fd00::", + "v6mask": 64 + }, + "lo_prefix": { + "ipv4": "1.0.", + "v4mask": 32, + "ipv6": "2001:DB8:F::", + "v6mask": 128 + }, + "routers": { + "r1": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r2": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "100", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r1": {} + } + }, + "r3": { + "dest_link": { + "r1": {} + } + } + }, + "redistribute": [ + { + "redist_type": "static" + }, + { + "redist_type": "connected" + } + ] + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r1": {} + } + }, + "r3": { + "dest_link": { + "r1": {} + } + } + }, + "redistribute": [ + { + "redist_type": "static" + }, + { + "redist_type": "connected" + } + ] + } + } + } + } + }, + "r2": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r1": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "100", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r2": {} + } + }, + "r3": { + "dest_link": { + "r2": {} + } + } + }, + "redistribute": [ + { + "redist_type": "static" + }, + { + "redist_type": "connected" + } + ] + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r2": {} + } + }, + "r3": { + "dest_link": { + "r2": {} + } + } + }, + "redistribute": [ + { + "redist_type": "static" + }, + { + "redist_type": "connected" + } + ] + } + } + } + } + }, + "r3": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r1": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r4": { + "ipv4": "auto", + "ipv6": "auto", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r5": { + "ipv4": "auto", + "ipv6": "auto", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r4-link1": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r4-link2": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r4-link3": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r4-link4": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r4-link5": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r4-link6": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r4-link7": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "100", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": {} + } + }, + "r2": { + "dest_link": { + "r3": {} + } + }, + "r4": { + "dest_link": { + "r3": {}, + "r3-link1": {}, + "r3-link2": {}, + "r3-link3": {}, + "r3-link4": {}, + "r3-link5": {}, + "r3-link6": {}, + "r3-link7": {} + } + }, + "r5": { + "dest_link": { + "r3": {} + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": {} + } + }, + "r2": { + "dest_link": { + "r3": {} + } + }, + "r4": { + "dest_link": { + "r3": {}, + "r3-link1": {}, + "r3-link2": {}, + "r3-link3": {}, + "r3-link4": {}, + "r3-link5": {}, + "r3-link6": {}, + "r3-link7": {} + } + }, + "r5": { + "dest_link": { + "r3": {} + } + } + } + } + } + }, + "redistribute": [ + { + "redist_type": "static" + }, + { + "redist_type": "connected" + } + ] + }, + "ospf": { + "router_id": "100.1.1.3", + "neighbors": { + "r4": {}, + "r5": {} + }, + "redistribute": [ + { + "redist_type": "static" + }, + { + "redist_type": "connected" + } + ] + } + }, + "r4": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r3": { + "ipv4": "auto", + "ipv6": "auto", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r3-link1": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link2": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link3": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link4": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link5": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link6": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link7": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "200", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r4": {}, + "r4-link1": {}, + "r4-link2": {}, + "r4-link3": {}, + "r4-link4": {}, + "r4-link5": {}, + "r4-link6": {}, + "r4-link7": {} + } + } + }, + "redistribute": [ + { + "redist_type": "static" + }, + { + "redist_type": "connected" + } + ] + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r4": {}, + "r4-link1": {}, + "r4-link2": {}, + "r4-link3": {}, + "r4-link4": {}, + "r4-link5": {}, + "r4-link6": {}, + "r4-link7": {} + } + } + }, + "redistribute": [ + { + "redist_type": "static" + }, + { + "redist_type": "connected" + } + ] + } + } + } + }, + "ospf": { + "router_id": "10.10.10.10", + "neighbors": { + "r3": {} + } + } + }, + "r5": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r3": { + "ipv4": "auto", + "ipv6": "auto", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + } + }, + "bgp": { + "local_as": "300", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r5": {} + } + } + }, + "redistribute": [ + { + "redist_type": "static" + }, + { + "redist_type": "connected" + } + ] + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r5": {} + } + } + }, + "redistribute": [ + { + "redist_type": "static" + }, + { + "redist_type": "connected" + } + ] + } + } + } + }, + "ospf": { + "router_id": "100.1.1.5", + "neighbors": { + "r3": {} + }, + "redistribute": [ + { + "redist_type": "static" + }, + { + "redist_type": "connected" + } + ] + } + } + } +}
\ No newline at end of file diff --git a/tests/topotests/bgp_unique_rid/bgp_unique_rid_vrf.json b/tests/topotests/bgp_unique_rid/bgp_unique_rid_vrf.json new file mode 100644 index 0000000000..1e280f1847 --- /dev/null +++ b/tests/topotests/bgp_unique_rid/bgp_unique_rid_vrf.json @@ -0,0 +1,529 @@ +{ + "address_types": [ + "ipv4", + "ipv6" + ], + "ipv4base": "10.0.0.0", + "ipv4mask": 30, + "ipv6base": "fd00::", + "ipv6mask": 64, + "link_ip_start": { + "ipv4": "10.0.0.0", + "v4mask": 30, + "ipv6": "fd00::", + "v6mask": 64 + }, + "lo_prefix": { + "ipv4": "1.0.", + "v4mask": 32, + "ipv6": "2001:DB8:F::", + "v6mask": 128 + }, + "routers": { + "r1": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback", + "vrf": "GREEN" + }, + "r2": { + "ipv4": "auto", + "ipv6": "auto", + "vrf": "GREEN" + }, + "r3": { + "ipv4": "auto", + "ipv6": "auto", + "vrf": "GREEN" + } + }, + "bgp": [ + { + "local_as": "100", + "vrf": "GREEN", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r1": {} + } + }, + "r3": { + "dest_link": { + "r1": {} + } + } + }, + "redistribute": [ + { + "redist_type": "static" + }, + { + "redist_type": "connected" + } + ] + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r1": {} + } + }, + "r3": { + "dest_link": { + "r1": {} + } + } + }, + "redistribute": [ + { + "redist_type": "static" + }, + { + "redist_type": "connected" + } + ] + } + } + } + } + ], + "vrfs": [ + { + "name": "GREEN", + "id": "1" + } + ] + }, + "r2": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback", + "vrf": "GREEN" + }, + "r1": { + "ipv4": "auto", + "ipv6": "auto", + "vrf": "GREEN" + }, + "r3": { + "ipv4": "auto", + "ipv6": "auto", + "vrf": "GREEN" + } + }, + "bgp": [ + { + "local_as": "100", + "vrf": "GREEN", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r2": {} + } + }, + "r3": { + "dest_link": { + "r2": {} + } + } + }, + "redistribute": [ + { + "redist_type": "static" + }, + { + "redist_type": "connected" + } + ] + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r2": {} + } + }, + "r3": { + "dest_link": { + "r2": {} + } + } + }, + "redistribute": [ + { + "redist_type": "static" + }, + { + "redist_type": "connected" + } + ] + } + } + } + } + ], + "vrfs": [ + { + "name": "GREEN", + "id": "1" + } + ] + }, + "r3": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback", + "vrf": "GREEN" + }, + "r1": { + "ipv4": "auto", + "ipv6": "auto", + "vrf": "GREEN" + }, + "r2": { + "ipv4": "auto", + "ipv6": "auto", + "vrf": "GREEN" + }, + "r4": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r5": { + "ipv4": "auto", + "ipv6": "auto", + "vrf": "RED" + }, + "r4-link1": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r4-link2": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r4-link3": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r4-link4": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r4-link5": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r4-link6": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r4-link7": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": [ + { + "local_as": "100", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r4": { + "dest_link": { + "r3": {}, + "r3-link1": {}, + "r3-link2": {}, + "r3-link3": {}, + "r3-link4": {}, + "r3-link5": {}, + "r3-link6": {}, + "r3-link7": {} + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r4": { + "dest_link": { + "r3": {}, + "r3-link1": {}, + "r3-link2": {}, + "r3-link3": {}, + "r3-link4": {}, + "r3-link5": {}, + "r3-link6": {}, + "r3-link7": {} + } + } + } + } + } + } + }, + { + "local_as": "100", + "vrf": "GREEN", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": {} + } + }, + "r2": { + "dest_link": { + "r3": {} + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": {} + } + }, + "r2": { + "dest_link": { + "r3": {} + } + } + } + } + } + } + }, + { + "local_as": "100", + "vrf": "RED", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r5": { + "dest_link": { + "r3": {} + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r5": { + "dest_link": { + "r3": {} + } + } + } + } + } + } + } + ], + "vrfs": [ + { + "name": "RED", + "id": "1" + }, + { + "name": "GREEN", + "id": "2" + } + ] + }, + "r4": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r3": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link1": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link2": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link3": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link4": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link5": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link6": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link7": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": [{ + "local_as": "200", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r4": {}, + "r4-link1": {}, + "r4-link2": {}, + "r4-link3": {}, + "r4-link4": {}, + "r4-link5": {}, + "r4-link6": {}, + "r4-link7": {} + } + } + }, + "redistribute": [ + { + "redist_type": "static" + }, + { + "redist_type": "connected" + } + ] + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r4": {}, + "r4-link1": {}, + "r4-link2": {}, + "r4-link3": {}, + "r4-link4": {}, + "r4-link5": {}, + "r4-link6": {}, + "r4-link7": {} + } + } + }, + "redistribute": [ + { + "redist_type": "static" + }, + { + "redist_type": "connected" + } + ] + } + } + } + }] + }, + "r5": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback", + "vrf": "RED" + }, + "r3": { + "ipv4": "auto", + "ipv6": "auto", + "vrf": "RED" + } + }, + "bgp": [ + { + "local_as": "300", + "vrf":"RED", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r5": {} + } + } + }, + "redistribute": [ + { + "redist_type": "static" + }, + { + "redist_type": "connected" + } + ] + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r5": {} + } + } + }, + "redistribute": [ + { + "redist_type": "static" + }, + { + "redist_type": "connected" + } + ] + } + } + } + } + ], + "vrfs": [ + { + "name": "RED", + "id": "1" + } + ] + } + } +}
\ No newline at end of file diff --git a/tests/topotests/bgp_unique_rid/test_bgp_unique_rid.py b/tests/topotests/bgp_unique_rid/test_bgp_unique_rid.py new file mode 100644 index 0000000000..7156310536 --- /dev/null +++ b/tests/topotests/bgp_unique_rid/test_bgp_unique_rid.py @@ -0,0 +1,906 @@ +#!/usr/bin/env python + +# +# Copyright (c) 2022 by VMware, Inc. ("VMware") +# Used Copyright (c) 2018 by Network Device Education Foundation, +# Inc. ("NetDEF") in this file. +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +import sys +import time +import pytest +import inspect +import os +from copy import deepcopy + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../lib/")) + +"""Following tests are covered to test bgp unique rid functionality. +1. Verify eBGP session when same and different router ID is configured. +2. Verify iBGP session when same and different router ID is configured. +3. Verify two different eBGP sessions initiated with same router ID. +4. Chaos - Verify bgp unique rid functionality in chaos scenarios. +5. Chaos - Verify bgp unique rid functionality when router reboots with same loopback id. +6. Chaos - Verify bgp unique rid functionality when router reboots without any ip addresses. +""" + +################################# +# TOPOLOGY +################################# +""" + + +-------+ + +--------- | R2 | + | +-------+ + |iBGP | + +-------+ | + | R1 | |iBGP + +-------+ | + | | + | iBGP +-------+ eBGP +-------+ + +---------- | R3 |========= | R4 | + +-------+ +-------+ + | + |eBGP + | + +-------+ + | R5 | + +-------+ + + +""" + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib.topogen import Topogen, get_topogen +from lib.topojson import build_config_from_json +from lib.topolog import logger + +pytestmark = [pytest.mark.bgpd, pytest.mark.ospfd, pytest.mark.staticd] + +# Required to instantiate the topology builder class. +from lib.common_config import ( + start_topology, + write_test_header, + step, + write_test_footer, + verify_rib, + check_address_types, + reset_config_on_routers, + check_router_status, + stop_router, + kill_router_daemons, + start_router_daemons, + start_router, +) +from lib.topolog import logger +from lib.bgp import ( + verify_bgp_convergence, + create_router_bgp, + clear_bgp_and_verify, +) + +# Global variables +topo = None +bgp_convergence = False +NETWORK = { + "ipv4": [ + "192.168.20.1/32", + "192.168.20.2/32", + "192.168.21.1/32", + "192.168.21.2/32", + "192.168.22.1/32", + "192.168.22.2/32", + ], + "ipv6": [ + "fc07:50::1/128", + "fc07:50::2/128", + "fc07:150::1/128", + "fc07:150::2/128", + "fc07:1::1/128", + "fc07:1::2/128", + ], +} + +bgp_convergence = False +ADDR_TYPES = check_address_types() +routerid = {"ipv4": "10.10.10.14", "ipv6": "fd00:0:0:3::2"} + + +def setup_module(mod): + """setup_module. + + Set up the pytest environment + * `mod`: module name + """ + global topo + testsuite_run_time = time.asctime(time.localtime(time.time())) + logger.info("Testsuite start time: {}".format(testsuite_run_time)) + logger.info("=" * 40) + + logger.info("Running setup_module to create topology") + + # This function initiates the topology build with Topogen... + json_file = "{}/bgp_unique_rid.json".format(CWD) + tgen = Topogen(json_file, mod.__name__) + global topo + topo = tgen.json_topo + # ... and here it calls Mininet initialization functions. + + # Starting topology, create tmp files which are loaded to routers + # to start daemons and then start routers + start_topology(tgen) + + # Creating configuration from JSON + build_config_from_json(tgen, topo) + + # Checking BGP convergence + global bgp_convergence + global ADDR_TYPES + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + # Api call verify whether BGP is converged + bgp_convergence = verify_bgp_convergence(tgen, topo) + assert bgp_convergence is True, "setup_module :Failed \n Error:" " {}".format( + bgp_convergence + ) + logger.info("Running setup_module() done") + + +def teardown_module(): + """Teardown the pytest environment""" + + logger.info("Running teardown_module to delete topology") + + tgen = get_topogen() + + # Stop toplogy and Remove tmp files + tgen.stop_topology() + + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) + logger.info("=" * 40) + + +##################################################### +# Tests starting +##################################################### + + +def test_bgp_unique_rid_ebgp_p0(): + """ + TC: 1 + Verify eBGP session when same and different router ID is configured. + """ + tgen = get_topogen() + global bgp_convergence + + if bgp_convergence is not True: + pytest.skip("skipped because of BGP Convergence failure") + + # test case name + tc_name = inspect.stack()[0][3] + write_test_header(tc_name) + if tgen.routers_have_failure(): + check_router_status(tgen) + + step("Configure base config as per the topology") + reset_config_on_routers(tgen) + + step( + "Base config should be up, verify using BGP convergence on all \ + the routers for IPv4 and IPv6 nbrs" + ) + + result = verify_bgp_convergence(tgen, topo) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("Configure the same router id between R4 and R3 10.10.10.10") + input_dict = { + "r3": {"bgp": {"router_id": "10.10.10.10"}}, + "r4": {"bgp": {"router_id": "10.10.10.10"}}, + } + result = create_router_bgp(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("Verify neighbours are in ESTAB state.") + result = verify_bgp_convergence(tgen, topo) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("Configure the same router id between R5 and R3 (10.10.10.10)") + input_dict = { + "r3": {"bgp": {"router_id": "10.10.10.10"}}, + "r5": {"bgp": {"router_id": "10.10.10.10"}}, + } + result = create_router_bgp(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("Verify neighbours are in ESTAB state.") + result = verify_bgp_convergence(tgen, topo) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("modify the router id on r3 to different router id (11.11.11.11)") + input_dict = {"r3": {"bgp": {"router_id": "11.11.11.11"}}} + result = create_router_bgp(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("Verify neighbours are in ESTAB state.") + result = verify_bgp_convergence(tgen, topo) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("Reset bgp process") + step("Verify neighbours are in ESTAB state.") + dut = "r3" + result = clear_bgp_and_verify(tgen, topo, dut) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Clear ip bgp process with *") + step("Verify neighbours are in ESTAB state.") + result = clear_bgp_and_verify(tgen, topo, dut) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Configure neighbours between R3 and R4 in EVPN address family.") + input_dict = { + "r3": { + "bgp": { + "address_family": { + "l2vpn": { + "evpn": { + "advertise": { + "ipv4": {"unicast": {}}, + "ipv6": {"unicast": {}}, + } + } + } + } + } + }, + "r4": { + "bgp": { + "address_family": { + "l2vpn": { + "evpn": { + "advertise": { + "ipv4": {"unicast": {}}, + "ipv6": {"unicast": {}}, + } + } + } + } + } + }, + } + result = create_router_bgp(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("Verify neighbours are in ESTAB state.") + result = clear_bgp_and_verify(tgen, topo, dut) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +def test_bgp_unique_rid_ibgp_p0(): + """ + TC: 2 + Verify iBGP session when same and different router ID is configured. + """ + tgen = get_topogen() + global bgp_convergence + + if bgp_convergence is not True: + pytest.skip("skipped because of BGP Convergence failure") + + # test case name + tc_name = inspect.stack()[0][3] + write_test_header(tc_name) + if tgen.routers_have_failure(): + check_router_status(tgen) + + step("Configure base config as per the topology") + reset_config_on_routers(tgen) + + step( + "Base config should be up, verify using BGP convergence on all \ + the routers for IPv4 and IPv6 nbrs" + ) + + result = verify_bgp_convergence(tgen, topo) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("Configure the same router id between R1 and R3 (10.10.10.10)") + input_dict = { + "r3": {"bgp": {"router_id": "10.10.10.10"}}, + "r1": {"bgp": {"router_id": "10.10.10.10"}}, + } + result = create_router_bgp(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("Verify neighbours are in idle state.") + result = verify_bgp_convergence(tgen, topo, expected=False) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("Configure the same router id between R2 and R3 (10.10.10.10)") + input_dict = { + "r3": {"bgp": {"router_id": "10.10.10.10"}}, + "r2": {"bgp": {"router_id": "10.10.10.10"}}, + } + result = create_router_bgp(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("Verify neighbours are in idle state.") + result = verify_bgp_convergence(tgen, topo, expected=False) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("modify the router id on r3 to different router id (11.11.11.11)") + input_dict = {"r3": {"bgp": {"router_id": "11.11.11.11"}}} + result = create_router_bgp(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("Verify neighbours are in ESTAB state.") + result = verify_bgp_convergence(tgen, topo, dut="r3") + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("Reset bgp process") + step("Verify neighbours are in ESTAB state.") + dut = "r3" + result = clear_bgp_and_verify(tgen, topo, dut) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Clear ip bgp process with *") + result = clear_bgp_and_verify(tgen, topo, dut) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +def test_bgp_unique_rid_multi_bgp_nbrs_p0(): + """ + TC: 3 + 3. Verify two different eBGP sessions initiated with same router ID + + """ + tgen = get_topogen() + global bgp_convergence, topo + + if bgp_convergence is not True: + pytest.skip("skipped because of BGP Convergence failure") + + # test case name + tc_name = inspect.stack()[0][3] + write_test_header(tc_name) + if tgen.routers_have_failure(): + check_router_status(tgen) + + step("Configure base config as per the topology") + reset_config_on_routers(tgen) + + step( + "Base config should be up, verify using BGP convergence on all \ + the routers for IPv4 and IPv6 nbrs" + ) + + result = verify_bgp_convergence(tgen, topo) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("Configure the same router id between R3, R4 and R5 (10.10.10.10)") + input_dict = { + "r3": {"bgp": {"router_id": "10.10.10.10"}}, + "r4": {"bgp": {"router_id": "10.10.10.10"}}, + "r5": {"bgp": {"router_id": "10.10.10.10"}}, + } + result = create_router_bgp(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("Verify neighbours are in ESTAB state.") + result = verify_bgp_convergence(tgen, topo) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step( + "Configure the same IP address on on R4 and R5 loopback address and \ + change the neighborship to loopback neighbours between R3 to R4 \ + and R3 to R5 respectively." + ) + + topo1 = deepcopy(topo) + + for rtr in ["r4", "r5"]: + topo1["routers"][rtr]["links"]["lo"]["ipv4"] = "192.168.1.1/32" + + topo1["routers"]["r3"]["links"]["lo"]["ipv4"] = "192.168.1.3/32" + build_config_from_json(tgen, topo1, save_bkup=False) + + step( + "change the neighborship to loopback neighbours between R3 to R4 and R3 to R5 respectively." + ) + for rtr in ["r4", "r5"]: + configure_bgp_on_rtr = { + "r3": { + "bgp": { + "address_family": { + "ipv4": { + "unicast": {"neighbor": {rtr: {"dest_link": {"lo": {}}}}} + } + } + }, + } + } + result = create_router_bgp(tgen, topo1, configure_bgp_on_rtr) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + bgp_convergence = verify_bgp_convergence(tgen, topo1, dut="r3") + assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, bgp_convergence + ) + + step("Change the IP address on the R4 loopback.") + topo1["routers"]["r4"]["links"]["lo"]["ipv4"] = "192.168.1.4/32" + build_config_from_json(tgen, topo1, save_bkup=False) + + step("Verify neighbours should be again in ESTAB state. (show ip bgp neighbours)") + bgp_convergence = verify_bgp_convergence(tgen, topo1, dut="r3") + assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, bgp_convergence + ) + + step("Clear ip bgp process with *") + result = clear_bgp_and_verify(tgen, topo, router="r3") + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +def test_bgp_unique_rid_chaos1_p2(): + """ + TC: 4 + 4. Chaos - Verify bgp unique rid functionality in chaos scenarios. + + """ + tgen = get_topogen() + global bgp_convergence + + if bgp_convergence is not True: + pytest.skip("skipped because of BGP Convergence failure") + + # test case name + tc_name = inspect.stack()[0][3] + write_test_header(tc_name) + if tgen.routers_have_failure(): + check_router_status(tgen) + + step("Configure base config as per the topology") + reset_config_on_routers(tgen) + + step( + "Base config should be up, verify using BGP convergence on all \ + the routers for IPv4 and IPv6 nbrs" + ) + + result = verify_bgp_convergence(tgen, topo) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("Configure the same router id between R3, R4 and R5 (10.10.10.10)") + input_dict = { + "r3": {"bgp": {"router_id": "10.10.10.10"}}, + "r4": {"bgp": {"router_id": "10.10.10.10"}}, + "r5": {"bgp": {"router_id": "10.10.10.10"}}, + } + result = create_router_bgp(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("Verify neighbours are in ESTAB state.") + result = verify_bgp_convergence(tgen, topo) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step( + "Verify eBGP session when same router ID is configured and bgpd process is restarted" + ) + + # restart bgpd router and verify + kill_router_daemons(tgen, "r3", ["bgpd"]) + start_router_daemons(tgen, "r3", ["bgpd"]) + + step( + "The session should be established between R3 & R4. " + "Once after restart bgp, neighbor should come back up ." + ) + + bgp_convergence = verify_bgp_convergence(tgen, topo) + assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, bgp_convergence + ) + + step( + "Verify eBGP session when same router ID is configured and neighbor shutdown is issued and again no shutdown." + ) + + input_dict = { + "r3": { + "bgp": { + "local_as": "100", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r4": { + "dest_link": { + "r3-link1": {"shutdown": True}, + "r3-link2": {"shutdown": True}, + "r3-link3": {"shutdown": True}, + "r3-link4": {"shutdown": True}, + "r3-link5": {"shutdown": True}, + "r3-link6": {"shutdown": True}, + "r3-link7": {"shutdown": True}, + } + }, + "r5": {"dest_link": {"r3": {"shutdown": True}}}, + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r4": { + "dest_link": { + "r3-link1": {"shutdown": True}, + "r3-link2": {"shutdown": True}, + "r3-link3": {"shutdown": True}, + "r3-link4": {"shutdown": True}, + "r3-link5": {"shutdown": True}, + "r3-link6": {"shutdown": True}, + "r3-link7": {"shutdown": True}, + } + }, + "r5": {"dest_link": {"r3": {"shutdown": True}}}, + } + } + }, + }, + } + } + } + result = create_router_bgp(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + input_dict = { + "r3": { + "bgp": { + "local_as": "100", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r4": { + "dest_link": { + "r3-link1": {"shutdown": False}, + "r3-link2": {"shutdown": False}, + "r3-link3": {"shutdown": False}, + "r3-link4": {"shutdown": False}, + "r3-link5": {"shutdown": False}, + "r3-link6": {"shutdown": False}, + "r3-link7": {"shutdown": False}, + } + }, + "r5": {"dest_link": {"r3": {"shutdown": False}}}, + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r4": { + "dest_link": { + "r3-link1": {"shutdown": False}, + "r3-link2": {"shutdown": False}, + "r3-link3": {"shutdown": False}, + "r3-link4": {"shutdown": False}, + "r3-link5": {"shutdown": False}, + "r3-link6": {"shutdown": False}, + "r3-link7": {"shutdown": False}, + } + }, + "r5": {"dest_link": {"r3": {"shutdown": False}}}, + } + } + }, + }, + } + } + } + result = create_router_bgp(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step( + "The session should be established between R3 & R4. " + "Once after restart bgp, neighbor should come back up ." + ) + + bgp_convergence = verify_bgp_convergence(tgen, topo) + assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, bgp_convergence + ) + + step( + "Verify eBGP session when same router ID is configured and neighbor config is deleted & reconfigured." + ) + + input_dict = { + "r3": { + "bgp": { + "local_as": "100", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r4": { + "dest_link": { + "r3-link1": {}, + "r3-link2": {}, + "r3-link3": {}, + "r3-link4": {}, + "r3-link5": {}, + "r3-link6": {}, + "r3-link7": {}, + } + }, + "r5": {"dest_link": {"r3": {}}}, + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r4": { + "dest_link": { + "r3-link1": {}, + "r3-link2": {}, + "r3-link3": {}, + "r3-link4": {}, + "r3-link5": {}, + "r3-link6": {}, + "r3-link7": {}, + } + }, + "r5": {"dest_link": {"r3": {}}}, + } + } + }, + }, + } + } + } + result = create_router_bgp(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step( + "The session should be established between R3 & R4. " + "Once after restart bgp, neighbor should come back up ." + ) + + bgp_convergence = verify_bgp_convergence(tgen, topo) + assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, bgp_convergence + ) + + step( + "Verify eBGP session when same router ID is configured and FRR router is restarted." + ) + stop_router(tgen, "r3") + start_router(tgen, "r3") + + step( + "The session should be established between R3 & R4. " + "Once after restart bgp, neighbor should come back up ." + ) + + bgp_convergence = verify_bgp_convergence(tgen, topo) + assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, bgp_convergence + ) + + step( + "Verify eBGP session when same router ID is configured and zebra process is restarted" + ) + + kill_router_daemons(tgen, "r3", ["zebra"]) + start_router_daemons(tgen, "r3", ["zebra"]) + + step( + "The session should be established between R3 & R4. " + "Once after restart bgp, neighbor should come back up ." + ) + + bgp_convergence = verify_bgp_convergence(tgen, topo) + assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, bgp_convergence + ) + + write_test_footer(tc_name) + + +def test_bgp_unique_rid_chaos3_p2(): + """ + TC: 4 + 4. Chaos - Verify bgp unique rid functionality when router reboots with same loopback id. + + """ + tgen = get_topogen() + global bgp_convergence + + if bgp_convergence is not True: + pytest.skip("skipped because of BGP Convergence failure") + + # test case name + tc_name = inspect.stack()[0][3] + write_test_header(tc_name) + if tgen.routers_have_failure(): + check_router_status(tgen) + + step("Configure base config as per the topology") + reset_config_on_routers(tgen) + + global topo + topo1 = deepcopy(topo) + + for rtr in topo["routers"].keys(): + topo1["routers"][rtr]["links"]["lo"]["ipv4"] = "192.168.1.1/32" + + topo1["routers"]["r3"]["links"]["lo"]["ipv4"] = "192.168.1.3/32" + build_config_from_json(tgen, topo1, save_bkup=False) + + step("verify bgp convergence before starting test case") + + bgp_convergence = verify_bgp_convergence(tgen, topo1) + assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, bgp_convergence + ) + + step( + "Configure loopback on R1 to R5 with IP address 1.1.1.1 on all the routers. Change neighborship on all the routers using loopback neighborship ids." + ) + for rtr in ["r1", "r2", "r4", "r5"]: + configure_bgp_on_rtr = { + "r3": { + "bgp": { + "address_family": { + "ipv4": { + "unicast": {"neighbor": {rtr: {"dest_link": {"lo": {}}}}} + } + } + }, + } + } + result = create_router_bgp(tgen, topo1, configure_bgp_on_rtr) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + bgp_convergence = verify_bgp_convergence(tgen, topo1, dut="r3") + assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, bgp_convergence + ) + + step("Reboot the router (restart frr) or using watch frr.") + stop_router(tgen, "r3") + start_router(tgen, "r3") + + step("Neighbors between R3, R4 and R3 to R5 should be in ESTB state.") + bgp_convergence = verify_bgp_convergence(tgen, topo1, dut="r3") + assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, bgp_convergence + ) + + step("Clear bgp process.") + clear_bgp_and_verify(tgen, topo, "r3") + + step("Neighbors between R3, R4 and R3 to R5 should be in ESTB state.") + bgp_convergence = verify_bgp_convergence(tgen, topo1, dut="r3") + assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, bgp_convergence + ) + + write_test_footer(tc_name) + + +def test_bgp_unique_rid_chaos4_p2(): + """ + TC: 6 + 6. Chaos - Verify bgp unique rid functionality when router reboots without any ip addresses. + + """ + tgen = get_topogen() + global bgp_convergence + + if bgp_convergence is not True: + pytest.skip("skipped because of BGP Convergence failure") + + # test case name + tc_name = inspect.stack()[0][3] + write_test_header(tc_name) + if tgen.routers_have_failure(): + check_router_status(tgen) + + reset_config_on_routers(tgen) + + global topo + topo1 = deepcopy(topo) + topo2 = deepcopy(topo) + + step( + "Configure base config as per the topology without loopback as well as Ip address on any of the interface." + ) + for rtr in topo["routers"].keys(): + for intf in topo["routers"][rtr]["links"].keys(): + topo1["routers"][rtr]["links"][intf].pop("ipv4") + topo1["routers"][rtr]["links"][intf].pop("ipv6") + if intf is "lo": + topo1["routers"][rtr]["links"][intf].pop("ipv4") + + build_config_from_json(tgen, topo1, save_bkup=False) + + bgp_convergence = verify_bgp_convergence(tgen, topo) + assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, bgp_convergence + ) + + step("Configure the ip addresses on the physical interfaces") + build_config_from_json(tgen, topo2, save_bkup=False) + + step("All the neighbors should be in ESTAB state.") + bgp_convergence = verify_bgp_convergence(tgen, topo) + assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, bgp_convergence + ) + + step("Configure loopback addresses with higher IP address ") + build_config_from_json(tgen, topo, save_bkup=False) + + step("All the neighbors should be in ESTAB state.") + bgp_convergence = verify_bgp_convergence(tgen, topo) + assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, bgp_convergence + ) + + step("Reboot the router (restart frr) or using watch frr.") + stop_router(tgen, "r3") + start_router(tgen, "r3") + + step("Neighbors between R3, R4 and R3 to R5 should be in ESTB state.") + bgp_convergence = verify_bgp_convergence(tgen, topo, dut="r3") + assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, bgp_convergence + ) + + write_test_footer(tc_name) + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_unique_rid/test_bgp_unique_rid_vrf.py b/tests/topotests/bgp_unique_rid/test_bgp_unique_rid_vrf.py new file mode 100644 index 0000000000..e009efee87 --- /dev/null +++ b/tests/topotests/bgp_unique_rid/test_bgp_unique_rid_vrf.py @@ -0,0 +1,479 @@ +#!/usr/bin/env python + +# +# Copyright (c) 2022 by VMware, Inc. ("VMware") +# Used Copyright (c) 2018 by Network Device Education Foundation, +# Inc. ("NetDEF") in this file. +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +import sys +import time +import pytest +import inspect +import os +from copy import deepcopy + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../lib/")) + +"""Following tests are covered to test bgp unique rid functionality. +1. Verify iBGP session when same and different router ID is configured in user VRF(GREEN). +2. Verify eBGP session when same and different router ID is configured in user vrf (VRF RED) +3. Verify two different eBGP sessions initiated with same router ID in user VRf (RED and GREEN) +""" + +################################# +# TOPOLOGY +################################# +""" + + +-------+ + +--------- | R2 | + | +-------+ + |iBGP | + +-------+ | + | R1 | |iBGP + +-------+ | + | | + | iBGP +-------+ eBGP +-------+ + +---------- | R3 |========= | R4 | + +-------+ +-------+ + | + |eBGP + | + +-------+ + | R5 | + +-------+ + + +""" + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib.topogen import Topogen, get_topogen +from lib.topojson import build_config_from_json +from lib.topolog import logger + +pytestmark = [pytest.mark.bgpd, pytest.mark.staticd] + +# Required to instantiate the topology builder class. +from lib.common_config import ( + start_topology, + write_test_header, + step, + write_test_footer, + check_address_types, + reset_config_on_routers, + check_router_status, +) +from lib.topolog import logger +from lib.bgp import ( + verify_bgp_convergence, + create_router_bgp, + clear_bgp_and_verify, +) + +# Global variables +topo = None +bgp_convergence = False +NETWORK = { + "ipv4": [ + "192.168.20.1/32", + "192.168.20.2/32", + "192.168.21.1/32", + "192.168.21.2/32", + "192.168.22.1/32", + "192.168.22.2/32", + ], + "ipv6": [ + "fc07:50::1/128", + "fc07:50::2/128", + "fc07:150::1/128", + "fc07:150::2/128", + "fc07:1::1/128", + "fc07:1::2/128", + ], +} + +bgp_convergence = False +ADDR_TYPES = check_address_types() + + +def setup_module(mod): + """setup_module. + + Set up the pytest environment + * `mod`: module name + """ + global topo + testsuite_run_time = time.asctime(time.localtime(time.time())) + logger.info("Testsuite start time: {}".format(testsuite_run_time)) + logger.info("=" * 40) + + logger.info("Running setup_module to create topology") + + # This function initiates the topology build with Topogen... + json_file = "{}/bgp_unique_rid_vrf.json".format(CWD) + tgen = Topogen(json_file, mod.__name__) + global topo + topo = tgen.json_topo + # ... and here it calls Mininet initialization functions. + + # Starting topology, create tmp files which are loaded to routers + # to start daemons and then start routers + start_topology(tgen) + + # Creating configuration from JSON + build_config_from_json(tgen, topo) + + # Checking BGP convergence + global bgp_convergence + global ADDR_TYPES + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + # Api call verify whether BGP is converged + bgp_convergence = verify_bgp_convergence(tgen, topo) + assert bgp_convergence is True, "setup_module :Failed \n Error:" " {}".format( + bgp_convergence + ) + logger.info("Running setup_module() done") + + +def teardown_module(): + """Teardown the pytest environment""" + + logger.info("Running teardown_module to delete topology") + + tgen = get_topogen() + + # Stop toplogy and Remove tmp files + tgen.stop_topology() + + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) + logger.info("=" * 40) + + +##################################################### +# Tests starting +##################################################### + + +def test_bgp_unique_rid_ebgp_vrf_p0(): + """ + TC: 1 + Verify iBGP session when same and different router ID is configured in user VRF(GREEN). + """ + tgen = get_topogen() + global bgp_convergence + + if bgp_convergence is not True: + pytest.skip("skipped because of BGP Convergence failure") + + # test case name + tc_name = inspect.stack()[0][3] + write_test_header(tc_name) + if tgen.routers_have_failure(): + check_router_status(tgen) + + step("Configure base config as per the topology") + reset_config_on_routers(tgen) + + step( + "Base config should be up, verify using BGP convergence on all \ + the routers for IPv4 and IPv6 nbrs" + ) + + result = verify_bgp_convergence(tgen, topo) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("Configure the same router id between R4 and R3 10.10.10.10") + input_dict = { + "r3": {"bgp": {"router_id": "10.10.10.10", "local_as": 100, "vrf": "RED"}}, + "r4": {"bgp": {"router_id": "10.10.10.10", "local_as": 200, "vrf": "RED"}}, + } + result = create_router_bgp(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("Verify neighbours are in ESTAB state.") + result = verify_bgp_convergence(tgen, topo) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("Configure the same router id between R5 and R3 (10.10.10.10)") + input_dict = { + "r3": {"bgp": {"router_id": "10.10.10.10", "local_as": 100, "vrf": "RED"}}, + "r5": {"bgp": {"router_id": "10.10.10.10", "local_as": 300, "vrf": "RED"}}, + } + result = create_router_bgp(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("Verify neighbours are in ESTAB state.") + result = verify_bgp_convergence(tgen, topo) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("modify the router id on r3 to different router id (11.11.11.11)") + input_dict = { + "r3": {"bgp": {"router_id": "11.11.11.11", "local_as": 100, "vrf": "RED"}} + } + result = create_router_bgp(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("Verify neighbours are in ESTAB state.") + result = verify_bgp_convergence(tgen, topo) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("Reset bgp process") + step("Verify neighbours are in ESTAB state.") + dut = "r3" + result = clear_bgp_and_verify(tgen, topo, router="r3") + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Clear ip bgp process with *") + step("Verify neighbours are in ESTAB state.") + result = clear_bgp_and_verify(tgen, topo, dut) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Configure neighbours between R3 and R4 in EVPN address family.") + input_dict = { + "r3": { + "bgp": { + "local_as": 100, + "vrf": "RED", + "address_family": { + "l2vpn": { + "evpn": { + "advertise": { + "ipv4": {"unicast": {}}, + "ipv6": {"unicast": {}}, + } + } + } + }, + } + }, + "r4": { + "bgp": { + "local_as": 200, + "vrf": "RED", + "address_family": { + "l2vpn": { + "evpn": { + "advertise": { + "ipv4": {"unicast": {}}, + "ipv6": {"unicast": {}}, + } + } + } + }, + } + }, + } + result = create_router_bgp(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("Verify neighbours are in ESTAB state.") + result = clear_bgp_and_verify(tgen, topo, dut) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +def test_bgp_unique_rid_ibgp_vrf_p0(): + """ + TC: 2 + Verify eBGP session when same and different router ID is configured in user vrf (VRF RED) + """ + tgen = get_topogen() + global bgp_convergence + + if bgp_convergence is not True: + pytest.skip("skipped because of BGP Convergence failure") + + # test case name + tc_name = inspect.stack()[0][3] + write_test_header(tc_name) + if tgen.routers_have_failure(): + check_router_status(tgen) + + step("Configure base config as per the topology") + reset_config_on_routers(tgen) + + step( + "Base config should be up, verify using BGP convergence on all \ + the routers for IPv4 and IPv6 nbrs" + ) + + result = verify_bgp_convergence(tgen, topo) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("Configure the same router id between R1 and R3 (10.10.10.10)") + input_dict = { + "r3": {"bgp": {"router_id": "10.10.10.10", "local_as": 100, "vrf": "RED"}}, + "r1": {"bgp": {"router_id": "10.10.10.10", "local_as": 100, "vrf": "RED"}}, + } + result = create_router_bgp(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("Verify neighbours are in ESTAB state.") + result = verify_bgp_convergence(tgen, topo) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("Configure the same router id between R2 and R3 (10.10.10.10)") + input_dict = { + "r3": {"bgp": {"router_id": "10.10.10.10", "local_as": 100, "vrf": "RED"}}, + "r2": {"bgp": {"router_id": "10.10.10.10", "local_as": 100, "vrf": "RED"}}, + } + result = create_router_bgp(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("Verify neighbours are in ESTAB state.") + result = verify_bgp_convergence(tgen, topo) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("modify the router id on r3 to different router id (11.11.11.11)") + input_dict = { + "r3": {"bgp": {"router_id": "11.11.11.11", "local_as": 100, "vrf": "RED"}} + } + result = create_router_bgp(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("Verify neighbours are in ESTAB state.") + result = verify_bgp_convergence(tgen, topo, dut="r3") + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("Reset bgp process") + step("Verify neighbours are in ESTAB state.") + dut = "r3" + result = clear_bgp_and_verify(tgen, topo, dut) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Clear ip bgp process with *") + result = clear_bgp_and_verify(tgen, topo, dut) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +def test_bgp_unique_rid_multi_bgp_nbrs_vrf_p0(): + """ + TC: 3 + Verify two different eBGP sessions initiated with same router ID in user VRf (RED and GREEN) + + """ + tgen = get_topogen() + global bgp_convergence, topo + + if bgp_convergence is not True: + pytest.skip("skipped because of BGP Convergence failure") + + # test case name + tc_name = inspect.stack()[0][3] + write_test_header(tc_name) + if tgen.routers_have_failure(): + check_router_status(tgen) + + step("Configure base config as per the topology") + reset_config_on_routers(tgen) + + step( + "Base config should be up, verify using BGP convergence on all \ + the routers for IPv4 and IPv6 nbrs" + ) + + result = verify_bgp_convergence(tgen, topo) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("Configure the same router id between R3, R4 and R5 (10.10.10.10)") + input_dict = { + "r3": {"bgp": {"router_id": "10.10.10.10", "local_as": 100, "vrf": "RED"}}, + "r4": {"bgp": {"router_id": "10.10.10.10", "local_as": 200, "vrf": "RED"}}, + "r5": {"bgp": {"router_id": "10.10.10.10", "local_as": 300, "vrf": "RED"}}, + } + result = create_router_bgp(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("Verify neighbours are in ESTAB state.") + result = verify_bgp_convergence(tgen, topo) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step( + "Configure the same IP address on on R4 and R5 loopback address and \ + change the neighborship to loopback neighbours between R3 to R4 \ + and R3 to R5 respectively." + ) + + topo1 = deepcopy(topo) + + for rtr in ["r4", "r5"]: + topo1["routers"][rtr]["links"]["lo"]["ipv4"] = "192.168.1.1/32" + + topo1["routers"]["r3"]["links"]["lo"]["ipv4"] = "192.168.1.3/32" + build_config_from_json(tgen, topo1, save_bkup=False) + + step( + "change the neighborship to loopback neighbours between R3 to R4 and R3 to R5 respectively." + ) + for rtr in ["r4", "r5"]: + configure_bgp_on_rtr = { + "r3": { + "bgp": { + "local_as": 100, + "vrf": "RED", + "address_family": { + "ipv4": { + "unicast": {"neighbor": {rtr: {"dest_link": {"lo": {}}}}} + } + }, + }, + } + } + result = create_router_bgp(tgen, topo1, configure_bgp_on_rtr) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + bgp_convergence = verify_bgp_convergence(tgen, topo1, dut="r3") + assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, bgp_convergence + ) + + step("Change the IP address on the R4 loopback.") + topo1["routers"]["r4"]["links"]["lo"]["ipv4"] = "192.168.1.4/32" + build_config_from_json(tgen, topo1, save_bkup=False) + + step("Verify neighbours should be again in ESTAB state. (show ip bgp neighbours)") + bgp_convergence = verify_bgp_convergence(tgen, topo1, dut="r3") + assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, bgp_convergence + ) + + step("Clear ip bgp process with *") + result = clear_bgp_and_verify(tgen, topo, router="r3") + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_vrf_route_leak_basic/r1/bgpd.conf b/tests/topotests/bgp_vrf_route_leak_basic/r1/bgpd.conf index 03dfbf9322..0540a62096 100644 --- a/tests/topotests/bgp_vrf_route_leak_basic/r1/bgpd.conf +++ b/tests/topotests/bgp_vrf_route_leak_basic/r1/bgpd.conf @@ -1,5 +1,11 @@ hostname r1 + +#debug bgp vpn leak-to-vrf +#debug bgp vpn leak-from-vrf +#debug bgp nht + + router bgp 99 vrf DONNA no bgp ebgp-requires-policy address-family ipv4 unicast diff --git a/tests/topotests/bgp_vrf_route_leak_basic/r1/zebra.conf b/tests/topotests/bgp_vrf_route_leak_basic/r1/zebra.conf index 35038557df..731a00829d 100644 --- a/tests/topotests/bgp_vrf_route_leak_basic/r1/zebra.conf +++ b/tests/topotests/bgp_vrf_route_leak_basic/r1/zebra.conf @@ -16,3 +16,9 @@ int dummy4 ip address 10.0.3.1/24 no shut ! +int EVA + no shut +! +int DONNA + no shut +! diff --git a/tests/topotests/bgp_vrf_route_leak_basic/test_bgp-vrf-route-leak-basic.py b/tests/topotests/bgp_vrf_route_leak_basic/test_bgp-vrf-route-leak-basic.py index 191a0b53ec..be07c85997 100644 --- a/tests/topotests/bgp_vrf_route_leak_basic/test_bgp-vrf-route-leak-basic.py +++ b/tests/topotests/bgp_vrf_route_leak_basic/test_bgp-vrf-route-leak-basic.py @@ -29,6 +29,7 @@ import os import sys from functools import partial import pytest +import time CWD = os.path.dirname(os.path.realpath(__file__)) sys.path.append(os.path.join(CWD, "../")) @@ -77,7 +78,117 @@ def teardown_module(mod): tgen.stop_topology() -def test_vrf_route_leak(): +def check_bgp_rib(router, vrf, in_fib): + if in_fib: + attr = [{"protocol": "bgp", "selected": True, "nexthops": [{"fib": True}]}] + else: + attr = [{"protocol": "bgp", "nexthops": []}] + + if vrf == "DONNA": + expect = { + "10.0.0.0/24": [ + { + "protocol": "connected", + } + ], + "10.0.1.0/24": attr, + "10.0.2.0/24": [{"protocol": "connected"}], + "10.0.3.0/24": attr, + } + else: + expect = { + "10.0.0.0/24": attr, + "10.0.1.0/24": [ + { + "protocol": "connected", + } + ], + "10.0.2.0/24": attr, + "10.0.3.0/24": [ + { + "protocol": "connected", + } + ], + } + + test_func = partial( + topotest.router_json_cmp, router, "show ip route vrf %s json" % vrf, expect + ) + return topotest.run_and_expect(test_func, None, count=10, wait=0.5) + + +def check_bgp_fib(router, vrf, in_rib): + # Check FIB + # DONNA + # 10.0.1.0/24 dev EVA proto bgp metric 20 + # 10.0.3.0/24 dev EVA proto bgp metric 20 + # EVA + # 10.0.0.0/24 dev DONNA proto bgp metric 20 + # 10.0.2.0/24 dev DONNA proto bgp metric 20 + + if vrf == "DONNA": + table = 1001 + nh_vrf = "EVA" + else: + table = 1002 + nh_vrf = "DONNA" + + negate = "" if in_rib else "! " + + cmd = "%sip route show table %s | grep %s" % (negate, table, nh_vrf) + result = False + retry = 5 + output = "" + while retry: + retry -= 1 + try: + output = router.cmd_raises(cmd) + result = True + break + except: + time.sleep(0.1) + + logger.info("VRF %s leaked FIB content %s: %s", vrf, cmd, output) + + return result, output + + +def check_bgp_ping(router, vrf): + if vrf == "DONNA": + cmd = "ip vrf exec DONNA ping -c1 10.0.1.1 -I 10.0.0.1" + else: + cmd = "ip vrf exec EVA ping -c1 10.0.0.1 -I 10.0.1.1" + + result = False + retry = 5 + output = "" + while retry: + retry -= 1 + try: + output = router.cmd_raises(cmd) + result = True + break + except: + time.sleep(0.1) + + return result, output + + +def check_bgp_ping_own_ip(router): + cmd = "ip vrf exec DONNA ping -c1 10.0.0.1 -I 10.0.0.1" + + output = "" + try: + output = router.cmd_raises(cmd) + result = True + except: + result = False + pass + + return result, output + + +def test_vrf_route_leak_test1(): logger.info("Ensure that routes are leaked back and forth") tgen = get_topogen() # Don't run this test if we have any failure. @@ -86,53 +197,86 @@ def test_vrf_route_leak(): r1 = tgen.gears["r1"] - # Test DONNA VRF. - expect = { - "10.0.0.0/24": [ - { - "protocol": "connected", - } - ], - "10.0.1.0/24": [ - {"protocol": "bgp", "selected": True, "nexthops": [{"fib": True}]} - ], - "10.0.2.0/24": [{"protocol": "connected"}], - "10.0.3.0/24": [ - {"protocol": "bgp", "selected": True, "nexthops": [{"fib": True}]} - ], - } - - test_func = partial( - topotest.router_json_cmp, r1, "show ip route vrf DONNA json", expect + result, output = check_bgp_ping_own_ip(r1) + assert ( + result + ), "Ping from VRF fails - check https://bugzilla.kernel.org/show_bug.cgi?id=203483\n:{}".format( + output ) - result, diff = topotest.run_and_expect(test_func, None, count=10, wait=0.5) - assert result, "BGP VRF DONNA check failed:\n{}".format(diff) - - # Test EVA VRF. - expect = { - "10.0.0.0/24": [ - {"protocol": "bgp", "selected": True, "nexthops": [{"fib": True}]} - ], - "10.0.1.0/24": [ - { - "protocol": "connected", - } - ], - "10.0.2.0/24": [ - {"protocol": "bgp", "selected": True, "nexthops": [{"fib": True}]} - ], - "10.0.3.0/24": [ - { - "protocol": "connected", - } - ], - } - test_func = partial( - topotest.router_json_cmp, r1, "show ip route vrf EVA json", expect + for vrf in ["EVA", "DONNA"]: + result, diff = check_bgp_rib(r1, vrf, True) + assert result, "BGP RIB VRF {} check failed:\n{}".format(vrf, diff) + result, output = check_bgp_fib(r1, vrf, True) + assert result, "BGP FIB VRF {} check failed:\n{}".format(vrf, output) + result, output = check_bgp_ping(r1, vrf) + assert result, "Ping from VRF {} failed:\n{}".format(vrf, output) + + +def test_vrf_route_leak_test2(): + logger.info( + "Ensure that leaked are still present after VRF iface IP address deletion" ) - result, diff = topotest.run_and_expect(test_func, None, count=10, wait=0.5) - assert result, "BGP VRF EVA check failed:\n{}".format(diff) + tgen = get_topogen() + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + r1 = tgen.gears["r1"] + + logger.info("Adding and removing an IPv4 address to EVA and DONNA VRF ifaces") + r1.cmd("ip address add 1.1.1.1/32 dev EVA && ip address del 1.1.1.1/32 dev EVA") + r1.cmd("ip address add 2.2.2.2/32 dev DONNA && ip address del 2.2.2.2/32 dev DONNA") + + for vrf in ["EVA", "DONNA"]: + result, diff = check_bgp_rib(r1, vrf, True) + assert result, "BGP RIB VRF {} check failed:\n{}".format(vrf, diff) + result, output = check_bgp_fib(r1, vrf, True) + assert result, "BGP FIB VRF {} check failed:\n{}".format(vrf, output) + result, output = check_bgp_ping(r1, vrf) + assert result, "Ping from VRF {} failed:\n{}".format(vrf, output) + + +def test_vrf_route_leak_test3(): + logger.info("Ensure that setting down the VRF ifaces invalidates leaked routes") + tgen = get_topogen() + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + r1 = tgen.gears["r1"] + + logger.info("Setting down EVA and DONNA VRF ifaces") + r1.cmd("ip link set EVA down") + r1.cmd("ip link set DONNA down") + + for vrf in ["EVA", "DONNA"]: + result, diff = check_bgp_rib(r1, vrf, False) + assert result, "BGP RIB VRF {} check failed:\n{}".format(vrf, diff) + result, output = check_bgp_fib(r1, vrf, False) + assert result, "BGP FIB VRF {} check failed:\n{}".format(vrf, output) + + +def test_vrf_route_leak_test4(): + logger.info("Ensure that setting up the VRF ifaces validates leaked routes") + tgen = get_topogen() + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + r1 = tgen.gears["r1"] + + logger.info("Setting up EVA and DONNA VRF ifaces") + r1.cmd("ip link set EVA up") + r1.cmd("ip link set DONNA up") + + for vrf in ["EVA", "DONNA"]: + result, diff = check_bgp_rib(r1, vrf, True) + assert result, "BGP RIB VRF {} check failed:\n{}".format(vrf, diff) + result, output = check_bgp_fib(r1, vrf, True) + assert result, "BGP FIB VRF {} check failed:\n{}".format(vrf, output) + result, output = check_bgp_ping(r1, vrf) + assert result, "Ping from VRF {} failed:\n{}".format(vrf, output) def test_memory_leak(): diff --git a/tests/topotests/lib/bgp.py b/tests/topotests/lib/bgp.py index a09b0b8b2e..2be0f5773b 100644 --- a/tests/topotests/lib/bgp.py +++ b/tests/topotests/lib/bgp.py @@ -1909,7 +1909,7 @@ def clear_bgp(tgen, addr_type, router, vrf=None, neighbor=None): logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) -def clear_bgp_and_verify(tgen, topo, router): +def clear_bgp_and_verify(tgen, topo, router, rid=None): """ This API is to clear bgp neighborship and verify bgp neighborship is coming up(BGP is converged) usinf "show bgp summary json" command @@ -1959,7 +1959,11 @@ def clear_bgp_and_verify(tgen, topo, router): return errormsg # To find neighbor ip type - bgp_addr_type = topo["routers"][router]["bgp"]["address_family"] + try: + bgp_addr_type = topo["routers"][router]["bgp"]["address_family"] + except TypeError: + bgp_addr_type = topo["routers"][router]["bgp"][0]["address_family"] + total_peer = 0 for addr_type in bgp_addr_type.keys(): @@ -2019,10 +2023,15 @@ def clear_bgp_and_verify(tgen, topo, router): logger.info("Clearing BGP neighborship for router %s..", router) for addr_type in bgp_addr_type.keys(): if addr_type == "ipv4": - run_frr_cmd(rnode, "clear ip bgp *") + if rid: + run_frr_cmd(rnode, "clear bgp ipv4 {}".format(rid)) + else: + run_frr_cmd(rnode, "clear bgp ipv4 *") elif addr_type == "ipv6": - run_frr_cmd(rnode, "clear bgp ipv6 *") - + if rid: + run_frr_cmd(rnode, "clear bgp ipv6 {}".format(rid)) + else: + run_frr_cmd(rnode, "clear bgp ipv6 *") peer_uptime_after_clear_bgp = {} # Verifying BGP convergence after bgp clear command for retry in range(50): @@ -2042,7 +2051,11 @@ def clear_bgp_and_verify(tgen, topo, router): return errormsg # To find neighbor ip type - bgp_addr_type = topo["routers"][router]["bgp"]["address_family"] + try: + bgp_addr_type = topo["routers"][router]["bgp"]["address_family"] + except TypeError: + bgp_addr_type = topo["routers"][router]["bgp"][0]["address_family"] + total_peer = 0 for addr_type in bgp_addr_type.keys(): if not check_address_types(addr_type): @@ -2797,7 +2810,11 @@ def verify_best_path_as_per_admin_distance( if route in rib_routes_json: st_found = True # Verify next_hop in rib_routes_json - if [nh for nh in rib_routes_json[route][0]["nexthops"] if nh['ip'] == _next_hop]: + if [ + nh + for nh in rib_routes_json[route][0]["nexthops"] + if nh["ip"] == _next_hop + ]: nh_found = True else: errormsg = ( diff --git a/tests/topotests/lib/bgprib.py b/tests/topotests/lib/bgprib.py index 35a57d0a99..01439373c5 100644 --- a/tests/topotests/lib/bgprib.py +++ b/tests/topotests/lib/bgprib.py @@ -37,6 +37,7 @@ from lib.lutil import luCommand, luResult, LUtil import json import re +import time # gpz: get rib in json form and compare against desired routes class BgpRib: @@ -48,7 +49,15 @@ class BgpRib: for pfx in pfxtbl.keys(): if debug: self.log("trying pfx %s" % pfx) - if pfx != want["p"]: + if "exist" in want and want["exist"] == False: + if pfx == want["p"]: + if debug: + self.log("unexpected route: pfx=" + want["p"]) + return 0 + if debug: + self.log("unwant pfx=" + want["p"] + ", not " + pfx) + continue + elif pfx != want["p"]: if debug: self.log("want pfx=" + want["p"] + ", not " + pfx) continue @@ -75,53 +84,67 @@ class BgpRib: if debug: self.log("missing route: pfx=" + want["p"] + ", nh=" + want["n"]) return 0 + if "exist" in want and want["exist"] == False: + return 1 + return 0 - def RequireVpnRoutes(self, target, title, wantroutes, debug=0): + def RequireVpnRoutes(self, target, title, wantroutes, retry=0, wait=1, debug=0): import json logstr = "RequireVpnRoutes " + str(wantroutes) - # non json form for humans - luCommand( - target, - 'vtysh -c "show bgp ipv4 vpn"', - ".", - "None", - "Get VPN RIB (non-json)", - ) - ret = luCommand( - target, - 'vtysh -c "show bgp ipv4 vpn json"', - ".*", - "None", - "Get VPN RIB (json)", - ) - if re.search(r"^\s*$", ret): - # degenerate case: empty json means no routes - if len(wantroutes) > 0: - luResult(target, False, title, logstr) - return - luResult(target, True, title, logstr) - rib = json.loads(ret) - rds = rib["routes"]["routeDistinguishers"] - for want in wantroutes: - found = 0 - if debug: - self.log("want rd %s" % want["rd"]) - for rd in rds.keys(): - if rd != want["rd"]: - continue + retry += 1 + while retry: + retry -= 1 + # non json form for humans + luCommand( + target, + 'vtysh -c "show bgp ipv4 vpn"', + ".", + "None", + "Get VPN RIB (non-json)", + ) + ret = luCommand( + target, + 'vtysh -c "show bgp ipv4 vpn json"', + ".*", + "None", + "Get VPN RIB (json)", + ) + if re.search(r"^\s*$", ret): + # degenerate case: empty json means no routes + if len(wantroutes) > 0: + luResult(target, False, title, logstr) + return + luResult(target, True, title, logstr) + rib = json.loads(ret) + rds = rib["routes"]["routeDistinguishers"] + for want in wantroutes: + found = 0 if debug: - self.log("found rd %s" % rd) - table = rds[rd] - if self.routes_include_wanted(table, want, debug): - found = 1 - break - if not found: - luResult(target, False, title, logstr) - return - luResult(target, True, title, logstr) + self.log("want rd %s" % want["rd"]) + for rd in rds.keys(): + if rd != want["rd"]: + continue + if debug: + self.log("found rd %s" % rd) + table = rds[rd] + if self.routes_include_wanted(table, want, debug): + found = 1 + break + if not found: + if retry: + break + luResult(target, False, title, logstr) + return + if not found and retry: + time.sleep(wait) + continue + luResult(target, True, title, logstr) + break - def RequireUnicastRoutes(self, target, afi, vrf, title, wantroutes, debug=0): + def RequireUnicastRoutes( + self, target, afi, vrf, title, wantroutes, retry=0, wait=1, debug=0 + ): logstr = "RequireUnicastRoutes %s" % str(wantroutes) vrfstr = "" if vrf != "": @@ -130,48 +153,62 @@ class BgpRib: if (afi != "ipv4") and (afi != "ipv6"): self.log("ERROR invalid afi") - cmdstr = "show bgp %s %s unicast" % (vrfstr, afi) - # non json form for humans - cmd = 'vtysh -c "%s"' % cmdstr - luCommand(target, cmd, ".", "None", "Get %s %s RIB (non-json)" % (vrfstr, afi)) - cmd = 'vtysh -c "%s json"' % cmdstr - ret = luCommand( - target, cmd, ".*", "None", "Get %s %s RIB (json)" % (vrfstr, afi) - ) - if re.search(r"^\s*$", ret): - # degenerate case: empty json means no routes - if len(wantroutes) > 0: - luResult(target, False, title, logstr) + retry += 1 + while retry: + retry -= 1 + cmdstr = "show bgp %s %s unicast" % (vrfstr, afi) + # non json form for humans + cmd = 'vtysh -c "%s"' % cmdstr + luCommand( + target, cmd, ".", "None", "Get %s %s RIB (non-json)" % (vrfstr, afi) + ) + cmd = 'vtysh -c "%s json"' % cmdstr + ret = luCommand( + target, cmd, ".*", "None", "Get %s %s RIB (json)" % (vrfstr, afi) + ) + if re.search(r"^\s*$", ret): + # degenerate case: empty json means no routes + if len(wantroutes) > 0: + luResult(target, False, title, logstr) + return + luResult(target, True, title, logstr) + rib = json.loads(ret) + try: + table = rib["routes"] + # KeyError: 'routes' probably means missing/bad VRF + except KeyError as err: + if vrf != "": + errstr = "-script ERROR: check if wrong vrf (%s)" % (vrf) + else: + errstr = "-script ERROR: check if vrf missing" + if retry: + time.sleep(wait) + continue + luResult(target, False, title + errstr, logstr) return + # if debug: + # self.log("table=%s" % table) + for want in wantroutes: + if debug: + self.log("want=%s" % want) + if not self.routes_include_wanted(table, want, debug): + if retry: + time.sleep(wait) + continue + luResult(target, False, title, logstr) + return luResult(target, True, title, logstr) - rib = json.loads(ret) - try: - table = rib["routes"] - # KeyError: 'routes' probably means missing/bad VRF - except KeyError as err: - if vrf != "": - errstr = "-script ERROR: check if wrong vrf (%s)" % (vrf) - else: - errstr = "-script ERROR: check if vrf missing" - luResult(target, False, title + errstr, logstr) - return - # if debug: - # self.log("table=%s" % table) - for want in wantroutes: - if debug: - self.log("want=%s" % want) - if not self.routes_include_wanted(table, want, debug): - luResult(target, False, title, logstr) - return - luResult(target, True, title, logstr) + break BgpRib = BgpRib() -def bgpribRequireVpnRoutes(target, title, wantroutes, debug=0): - BgpRib.RequireVpnRoutes(target, title, wantroutes, debug) +def bgpribRequireVpnRoutes(target, title, wantroutes, retry=0, wait=1, debug=0): + BgpRib.RequireVpnRoutes(target, title, wantroutes, retry, wait, debug) -def bgpribRequireUnicastRoutes(target, afi, vrf, title, wantroutes, debug=0): - BgpRib.RequireUnicastRoutes(target, afi, vrf, title, wantroutes, debug) +def bgpribRequireUnicastRoutes( + target, afi, vrf, title, wantroutes, retry=0, wait=1, debug=0 +): + BgpRib.RequireUnicastRoutes(target, afi, vrf, title, wantroutes, retry, wait, debug) diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-default.txt b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-default.txt index 86c089ab3b..6bafbbb556 100644 --- a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-default.txt +++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-default.txt @@ -5,5 +5,5 @@ B>* 10.0.3.0/24 [20/20] via 10.0.30.3, r1-eth2 (vrf neno), weight 1, XX:XX:XX O>* 10.0.4.0/24 [110/20] via 10.0.20.2, r1-eth1, weight 1, XX:XX:XX O 10.0.20.0/24 [110/10] is directly connected, r1-eth1, weight 1, XX:XX:XX C>* 10.0.20.0/24 is directly connected, r1-eth1, XX:XX:XX -B>* 10.0.30.0/24 [20/0] is directly connected, r1-eth2 (vrf neno), weight 1, XX:XX:XX +B>* 10.0.30.0/24 [20/0] is directly connected, neno (vrf neno), weight 1, XX:XX:XX O>* 10.0.40.0/24 [110/20] via 10.0.20.2, r1-eth1, weight 1, XX:XX:XX diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-default.txt b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-default.txt index 9681d8a04e..3ed6b1b3a1 100644 --- a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-default.txt +++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-default.txt @@ -7,4 +7,4 @@ B>* 10.0.4.0/24 [20/20] via 10.0.40.4, r2-eth2 (vrf ray), weight 1, XX:XX:XX O 10.0.20.0/24 [110/10] is directly connected, r2-eth1, weight 1, XX:XX:XX C>* 10.0.20.0/24 is directly connected, r2-eth1, XX:XX:XX O>* 10.0.30.0/24 [110/20] via 10.0.20.1, r2-eth1, weight 1, XX:XX:XX -B>* 10.0.40.0/24 [20/0] is directly connected, r2-eth2 (vrf ray), weight 1, XX:XX:XX +B>* 10.0.40.0/24 [20/0] is directly connected, ray (vrf ray), weight 1, XX:XX:XX diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-ray.txt b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-ray.txt index ce9903ae71..4ad8441d85 100644 --- a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-ray.txt +++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-ray.txt @@ -1,9 +1,9 @@ VRF ray: B 10.0.1.0/24 [20/20] via 10.0.20.1, r2-eth1 (vrf default) inactive, weight 1, XX:XX:XX -B 10.0.2.0/24 [20/0] is directly connected, r2-eth0 (vrf default) inactive, weight 1, XX:XX:XX +B 10.0.2.0/24 [20/0] is directly connected, lo (vrf default) inactive, weight 1, XX:XX:XX B>* 10.0.3.0/24 [20/20] via 10.0.20.1, r2-eth1 (vrf default), weight 1, XX:XX:XX O>* 10.0.4.0/24 [110/20] via 10.0.40.4, r2-eth2, weight 1, XX:XX:XX -B 10.0.20.0/24 [20/0] is directly connected, r2-eth1 (vrf default) inactive, weight 1, XX:XX:XX +B 10.0.20.0/24 [20/0] is directly connected, lo (vrf default) inactive, weight 1, XX:XX:XX B>* 10.0.30.0/24 [20/20] via 10.0.20.1, r2-eth1 (vrf default), weight 1, XX:XX:XX O 10.0.40.0/24 [110/10] is directly connected, r2-eth2, weight 1, XX:XX:XX C>* 10.0.40.0/24 is directly connected, r2-eth2, XX:XX:XX |
